python310Packages.numba: Backport support for numpy 1.24.x

This is a last ditch effort to keep numba working.
This commit is contained in:
Martin Weinelt 2023-03-01 04:43:51 +01:00
parent 6d34d4fe71
commit 7b8dbe2d73
2 changed files with 652 additions and 2 deletions

View File

@ -37,12 +37,16 @@ in buildPythonPackage rec {
postPatch = ''
substituteInPlace setup.py \
--replace "setuptools<60" "setuptools"
--replace 'max_numpy_run_version = "1.24"' 'max_numpy_run_version = "1.25"'
substituteInPlace numba/__init__.py \
--replace "elif numpy_version > (1, 23):" "elif numpy_version > (1, 24):"
'';
env.NIX_CFLAGS_COMPILE = lib.optionalString stdenv.isDarwin "-I${lib.getDev libcxx}/include/c++/v1";
nativeBuildInputs = lib.optionals cudaSupport [
nativeBuildInputs = [
numpy
] ++ lib.optionals cudaSupport [
addOpenGLRunpath
];
@ -65,6 +69,8 @@ in buildPythonPackage rec {
url = "https://github.com/numba/numba/commit/993e8c424055a7677b2755b184fc9e07549713b9.patch";
hash = "sha256-IhIqRLmP8gazx+KWIyCxZrNLMT4jZT8CWD3KcH4KjOo=";
})
# Backport numpy 1.24 support from https://github.com/numba/numba/pull/8691
./numpy-1.24.patch
] ++ lib.optionals cudaSupport [
(substituteAll {
src = ./cuda_path.patch;

View File

@ -0,0 +1,644 @@
From c3e6994e07fb6ac57be5d9d33d9046c5453b2256 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 15:41:24 +0000
Subject: [PATCH 01/13] CUDA intrinsics tests: correct np.float -> np.float16
I believe this was written in error and should always have been float16.
---
numba/cuda/tests/cudapy/test_intrinsics.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/numba/cuda/tests/cudapy/test_intrinsics.py b/numba/cuda/tests/cudapy/test_intrinsics.py
index 6e5fc0a0e..46fe8c607 100644
--- a/numba/cuda/tests/cudapy/test_intrinsics.py
+++ b/numba/cuda/tests/cudapy/test_intrinsics.py
@@ -619,7 +619,7 @@ class TestCudaIntrinsic(CUDATestCase):
arg2 = np.float16(4.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg2)
- arg1 = np.float(5.)
+ arg1 = np.float16(5.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1)
@@ -631,7 +631,7 @@ class TestCudaIntrinsic(CUDATestCase):
arg2 = np.float16(4.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1)
- arg1 = np.float(5.)
+ arg1 = np.float16(5.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg2)
--
2.39.1
From 550fc6a25a82f76bc1f06bdea39177df635038c2 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 22 Dec 2022 13:02:22 +0000
Subject: [PATCH 02/13] TestLinalgSvd.test_no_input_mutation: use
reconstruction if necessary
This test only checked for a plain match when comparing outputs.
However, in some cases a reconstruction check can be necessary, as in
`test_linalg_svd`.
---
numba/tests/test_linalg.py | 61 ++++++++++++++++++++------------------
1 file changed, 32 insertions(+), 29 deletions(-)
diff --git a/numba/tests/test_linalg.py b/numba/tests/test_linalg.py
index db183059d..b1d4f0a83 100644
--- a/numba/tests/test_linalg.py
+++ b/numba/tests/test_linalg.py
@@ -1122,6 +1122,32 @@ class TestLinalgSvd(TestLinalgBase):
Tests for np.linalg.svd.
"""
+ # This checks that A ~= U*S*V**H, i.e. SV decomposition ties out. This is
+ # required as NumPy uses only double precision LAPACK routines and
+ # computation of SVD is numerically sensitive. Numba uses type-specific
+ # routines and therefore sometimes comes out with a different answer to
+ # NumPy (orthonormal bases are not unique, etc.).
+
+ def check_reconstruction(self, a, got, expected):
+ u, sv, vt = got
+
+ # Check they are dimensionally correct
+ for k in range(len(expected)):
+ self.assertEqual(got[k].shape, expected[k].shape)
+
+ # Columns in u and rows in vt dictates the working size of s
+ s = np.zeros((u.shape[1], vt.shape[0]))
+ np.fill_diagonal(s, sv)
+
+ rec = np.dot(np.dot(u, s), vt)
+ resolution = np.finfo(a.dtype).resolution
+ np.testing.assert_allclose(
+ a,
+ rec,
+ rtol=10 * resolution,
+ atol=100 * resolution # zeros tend to be fuzzy
+ )
+
@needs_lapack
def test_linalg_svd(self):
"""
@@ -1150,34 +1176,8 @@ class TestLinalgSvd(TestLinalgBase):
# plain match failed, test by reconstruction
use_reconstruction = True
- # if plain match fails then reconstruction is used.
- # this checks that A ~= U*S*V**H
- # i.e. SV decomposition ties out
- # this is required as numpy uses only double precision lapack
- # routines and computation of svd is numerically
- # sensitive, numba using the type specific routines therefore
- # sometimes comes out with a different answer (orthonormal bases
- # are not unique etc.).
if use_reconstruction:
- u, sv, vt = got
-
- # check they are dimensionally correct
- for k in range(len(expected)):
- self.assertEqual(got[k].shape, expected[k].shape)
-
- # regardless of full_matrices cols in u and rows in vt
- # dictates the working size of s
- s = np.zeros((u.shape[1], vt.shape[0]))
- np.fill_diagonal(s, sv)
-
- rec = np.dot(np.dot(u, s), vt)
- resolution = np.finfo(a.dtype).resolution
- np.testing.assert_allclose(
- a,
- rec,
- rtol=10 * resolution,
- atol=100 * resolution # zeros tend to be fuzzy
- )
+ self.check_reconstruction(a, got, expected)
# Ensure proper resource management
with self.assertNoNRTLeak():
@@ -1238,8 +1238,11 @@ class TestLinalgSvd(TestLinalgBase):
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
- for e_a, g_a in zip(expected, got):
- np.testing.assert_allclose(e_a, g_a)
+ try:
+ for e_a, g_a in zip(expected, got):
+ np.testing.assert_allclose(e_a, g_a)
+ except AssertionError:
+ self.check_reconstruction(X, got, expected)
class TestLinalgQr(TestLinalgBase):
--
2.39.1
From c9ca2d1ae5e09ace729cddf6fba08effcd69a0b7 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 21:39:27 +0000
Subject: [PATCH 03/13] test_comp_nest_with_dependency: skip on NumPy 1.24
Setting an array element with a sequence is removed in NumPy 1.24.
---
numba/tests/test_comprehension.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/numba/tests/test_comprehension.py b/numba/tests/test_comprehension.py
index 2cdd3dc25..092ed51da 100644
--- a/numba/tests/test_comprehension.py
+++ b/numba/tests/test_comprehension.py
@@ -11,6 +11,7 @@ from numba import jit, typed
from numba.core import types, utils
from numba.core.errors import TypingError, LoweringError
from numba.core.types.functions import _header_lead
+from numba.np.numpy_support import numpy_version
from numba.tests.support import tag, _32bit, captured_stdout
@@ -360,6 +361,7 @@ class TestArrayComprehension(unittest.TestCase):
self.check(comp_nest_with_array_conditional, 5,
assert_allocate_list=True)
+ @unittest.skipUnless(numpy_version < (1, 24), 'Removed in NumPy 1.24')
def test_comp_nest_with_dependency(self):
def comp_nest_with_dependency(n):
l = np.array([[i * j for j in range(i+1)] for i in range(n)])
--
2.39.1
From e69ad519352ac5a1f7714083968fcbac3ba92f95 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 16:48:37 +0000
Subject: [PATCH 04/13] Avoid use of np.bool in stencilparfor.py
---
numba/stencils/stencilparfor.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/numba/stencils/stencilparfor.py b/numba/stencils/stencilparfor.py
index 5f30893b3..4f23ed903 100644
--- a/numba/stencils/stencilparfor.py
+++ b/numba/stencils/stencilparfor.py
@@ -21,6 +21,7 @@ from numba.core.ir_utils import (get_call_table, mk_unique_var,
find_callname, require, find_const, GuardException)
from numba.core.errors import NumbaValueError
from numba.core.utils import OPERATORS_TO_BUILTINS
+from numba.np import numpy_support
def _compute_last_ind(dim_size, index_const):
@@ -264,7 +265,11 @@ class StencilPass(object):
dtype_g_np_assign = ir.Assign(dtype_g_np, dtype_g_np_var, loc)
init_block.body.append(dtype_g_np_assign)
- dtype_np_attr_call = ir.Expr.getattr(dtype_g_np_var, return_type.dtype.name, loc)
+ return_type_name = numpy_support.as_dtype(
+ return_type.dtype).type.__name__
+ if return_type_name == 'bool':
+ return_type_name = 'bool_'
+ dtype_np_attr_call = ir.Expr.getattr(dtype_g_np_var, return_type_name, loc)
dtype_attr_var = ir.Var(scope, mk_unique_var("$np_attr_attr"), loc)
self.typemap[dtype_attr_var.name] = types.functions.NumberClass(return_type.dtype)
dtype_attr_assign = ir.Assign(dtype_np_attr_call, dtype_attr_var, loc)
--
2.39.1
From dd96d5996abd8646443501f2bbd7d4e1a9c0eec4 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 15:46:52 +0000
Subject: [PATCH 05/13] test_hypot: Tweak regex so it matches NumPy 1.24
The modified regex matches the existing message produced by NumPy <
1.24, and the new improved message in 1.24.
---
numba/tests/test_mathlib.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/numba/tests/test_mathlib.py b/numba/tests/test_mathlib.py
index a3f535316..05e3d68f5 100644
--- a/numba/tests/test_mathlib.py
+++ b/numba/tests/test_mathlib.py
@@ -516,7 +516,7 @@ class TestMathLib(TestCase):
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
self.assertRaisesRegexp(RuntimeWarning,
- 'overflow encountered in .*_scalars',
+ 'overflow encountered in .*scalar',
naive_hypot, val, val)
def test_hypot_npm(self):
--
2.39.1
From b755c22caeec9e6b0e0606f0cee245648914d592 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 11:29:53 +0000
Subject: [PATCH 06/13] Don't test summing with timedelta dtype
This always produced invalid results (though they were consistent
between Numba and NumPy) but now this fails in NumPy 1.24 with an
exception:
```
TypeError: The `dtype` and `signature` arguments to ufuncs only select
the general DType and not details such as the byte order or time unit.
You can avoid this error by using the scalar types `np.float64` or the
dtype string notation.
```
Note that the exception message is misleading, and using the dtype
string notation does not provide a workaround.
---
numba/tests/test_array_methods.py | 15 ++++++---------
1 file changed, 6 insertions(+), 9 deletions(-)
diff --git a/numba/tests/test_array_methods.py b/numba/tests/test_array_methods.py
index eee5cfeff..a2312adba 100644
--- a/numba/tests/test_array_methods.py
+++ b/numba/tests/test_array_methods.py
@@ -1193,7 +1193,7 @@ class TestArrayMethods(MemoryLeakMixin, TestCase):
pyfunc = array_sum_dtype_kws
cfunc = jit(nopython=True)(pyfunc)
all_dtypes = [np.float64, np.float32, np.int64, np.int32, np.uint32,
- np.uint64, np.complex64, np.complex128, TIMEDELTA_M]
+ np.uint64, np.complex64, np.complex128]
all_test_arrays = [
[np.ones((7, 6, 5, 4, 3), arr_dtype),
np.ones(1, arr_dtype),
@@ -1207,8 +1207,7 @@ class TestArrayMethods(MemoryLeakMixin, TestCase):
np.dtype('uint32'): [np.float64, np.int64, np.float32],
np.dtype('uint64'): [np.float64, np.int64],
np.dtype('complex64'): [np.complex64, np.complex128],
- np.dtype('complex128'): [np.complex128],
- np.dtype(TIMEDELTA_M): [np.dtype(TIMEDELTA_M)]}
+ np.dtype('complex128'): [np.complex128]}
for arr_list in all_test_arrays:
for arr in arr_list:
@@ -1216,15 +1215,15 @@ class TestArrayMethods(MemoryLeakMixin, TestCase):
subtest_str = ("Testing np.sum with {} input and {} output"
.format(arr.dtype, out_dtype))
with self.subTest(subtest_str):
- self.assertPreciseEqual(pyfunc(arr, dtype=out_dtype),
- cfunc(arr, dtype=out_dtype))
+ self.assertPreciseEqual(pyfunc(arr, dtype=out_dtype),
+ cfunc(arr, dtype=out_dtype))
def test_sum_axis_dtype_kws(self):
""" test sum with axis and dtype parameters over a whole range of dtypes """
pyfunc = array_sum_axis_dtype_kws
cfunc = jit(nopython=True)(pyfunc)
all_dtypes = [np.float64, np.float32, np.int64, np.int32, np.uint32,
- np.uint64, np.complex64, np.complex128, TIMEDELTA_M]
+ np.uint64, np.complex64, np.complex128]
all_test_arrays = [
[np.ones((7, 6, 5, 4, 3), arr_dtype),
np.ones(1, arr_dtype),
@@ -1238,9 +1237,7 @@ class TestArrayMethods(MemoryLeakMixin, TestCase):
np.dtype('uint32'): [np.float64, np.int64, np.float32],
np.dtype('uint64'): [np.float64, np.uint64],
np.dtype('complex64'): [np.complex64, np.complex128],
- np.dtype('complex128'): [np.complex128],
- np.dtype(TIMEDELTA_M): [np.dtype(TIMEDELTA_M)],
- np.dtype(TIMEDELTA_Y): [np.dtype(TIMEDELTA_Y)]}
+ np.dtype('complex128'): [np.complex128]}
for arr_list in all_test_arrays:
for arr in arr_list:
--
2.39.1
From 65df00379df1276b7045b44818347a119bb32361 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 10:03:54 +0000
Subject: [PATCH 07/13] Replace use of deprecated np.bool with np.bool_
np.bool was removed in NumPy 1.24.
---
numba/tests/test_np_functions.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/numba/tests/test_np_functions.py b/numba/tests/test_np_functions.py
index 4cdaf548b..e195ac781 100644
--- a/numba/tests/test_np_functions.py
+++ b/numba/tests/test_np_functions.py
@@ -932,11 +932,11 @@ class TestNPFunctions(MemoryLeakMixin, TestCase):
yield np.inf, None
yield np.PINF, None
yield np.asarray([-np.inf, 0., np.inf]), None
- yield np.NINF, np.zeros(1, dtype=np.bool)
- yield np.inf, np.zeros(1, dtype=np.bool)
- yield np.PINF, np.zeros(1, dtype=np.bool)
+ yield np.NINF, np.zeros(1, dtype=np.bool_)
+ yield np.inf, np.zeros(1, dtype=np.bool_)
+ yield np.PINF, np.zeros(1, dtype=np.bool_)
yield np.NINF, np.empty(12)
- yield np.asarray([-np.inf, 0., np.inf]), np.zeros(3, dtype=np.bool)
+ yield np.asarray([-np.inf, 0., np.inf]), np.zeros(3, dtype=np.bool_)
pyfuncs = [isneginf, isposinf]
for pyfunc in pyfuncs:
--
2.39.1
From 065475bd8d5f39ad0a2b0d154ca283dec10bf5d0 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Thu, 24 Nov 2022 09:56:06 +0000
Subject: [PATCH 08/13] Overload np.MachAr only for NumPy < 1.24
---
numba/np/arraymath.py | 4 ++++
numba/tests/test_np_functions.py | 4 +++-
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/numba/np/arraymath.py b/numba/np/arraymath.py
index 9885526ee..f6e5f5560 100644
--- a/numba/np/arraymath.py
+++ b/numba/np/arraymath.py
@@ -4177,6 +4177,10 @@ iinfo = namedtuple('iinfo', _iinfo_supported)
# This module is imported under the compiler lock which should deal with the
# lack of thread safety in the warning filter.
def _gen_np_machar():
+ # NumPy 1.24 removed np.MachAr
+ if numpy_version >= (1, 24):
+ return
+
np122plus = numpy_version >= (1, 22)
w = None
with warnings.catch_warnings(record=True) as w:
diff --git a/numba/tests/test_np_functions.py b/numba/tests/test_np_functions.py
index e195ac781..e8a9bccd0 100644
--- a/numba/tests/test_np_functions.py
+++ b/numba/tests/test_np_functions.py
@@ -4775,6 +4775,7 @@ def foo():
eval(compile(funcstr, '<string>', 'exec'))
return locals()['foo']
+ @unittest.skipIf(numpy_version >= (1, 24), "NumPy < 1.24 required")
def test_MachAr(self):
attrs = ('ibeta', 'it', 'machep', 'eps', 'negep', 'epsneg', 'iexp',
'minexp', 'xmin', 'maxexp', 'xmax', 'irnd', 'ngrd',
@@ -4817,7 +4818,8 @@ def foo():
cfunc = jit(nopython=True)(iinfo)
cfunc(np.float64(7))
- @unittest.skipUnless(numpy_version >= (1, 22), "Needs NumPy >= 1.22")
+ @unittest.skipUnless((1, 22) <= numpy_version < (1, 24),
+ "Needs NumPy >= 1.22, < 1.24")
@TestCase.run_test_in_subprocess
def test_np_MachAr_deprecation_np122(self):
# Tests that Numba is replaying the NumPy 1.22 deprecation warning
--
2.39.1
From 4925287144b9dca5624886ac44a27831178a7198 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Fri, 25 Nov 2022 10:55:04 +0000
Subject: [PATCH 09/13] _internal.c: Remove NPY_API_VERSION checks
The API version has long since been greater than 0x7 / 0x8 for any
supported NumPy.
---
numba/np/ufunc/_internal.c | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/numba/np/ufunc/_internal.c b/numba/np/ufunc/_internal.c
index 98a643788..3ab725f8f 100644
--- a/numba/np/ufunc/_internal.c
+++ b/numba/np/ufunc/_internal.c
@@ -285,9 +285,7 @@ static struct _ufunc_dispatch {
PyCFunctionWithKeywords ufunc_accumulate;
PyCFunctionWithKeywords ufunc_reduceat;
PyCFunctionWithKeywords ufunc_outer;
-#if NPY_API_VERSION >= 0x00000008
PyCFunction ufunc_at;
-#endif
} ufunc_dispatch;
static int
@@ -303,10 +301,8 @@ init_ufunc_dispatch(int *numpy_uses_fastcall)
if (strncmp(crnt_name, "accumulate", 11) == 0) {
ufunc_dispatch.ufunc_accumulate =
(PyCFunctionWithKeywords)crnt->ml_meth;
-#if NPY_API_VERSION >= 0x00000008
} else if (strncmp(crnt_name, "at", 3) == 0) {
ufunc_dispatch.ufunc_at = crnt->ml_meth;
-#endif
} else {
result = -1;
}
@@ -351,9 +347,7 @@ init_ufunc_dispatch(int *numpy_uses_fastcall)
&& (ufunc_dispatch.ufunc_accumulate != NULL)
&& (ufunc_dispatch.ufunc_reduceat != NULL)
&& (ufunc_dispatch.ufunc_outer != NULL)
-#if NPY_API_VERSION >= 0x00000008
&& (ufunc_dispatch.ufunc_at != NULL)
-#endif
);
}
return result;
@@ -425,13 +419,11 @@ dufunc_outer_fast(PyDUFuncObject * self,
}
-#if NPY_API_VERSION >= 0x00000008
static PyObject *
dufunc_at(PyDUFuncObject * self, PyObject * args)
{
return ufunc_dispatch.ufunc_at((PyObject*)self->ufunc, args);
}
-#endif
static PyObject *
dufunc__compile_for_args(PyDUFuncObject * self, PyObject * args,
@@ -609,11 +601,9 @@ static struct PyMethodDef dufunc_methods[] = {
{"outer",
(PyCFunction)dufunc_outer,
METH_VARARGS | METH_KEYWORDS, NULL},
-#if NPY_API_VERSION >= 0x00000008
{"at",
(PyCFunction)dufunc_at,
METH_VARARGS, NULL},
-#endif
{"_compile_for_args",
(PyCFunction)dufunc__compile_for_args,
METH_VARARGS | METH_KEYWORDS,
@@ -643,11 +633,9 @@ static struct PyMethodDef dufunc_methods_fast[] = {
{"outer",
(PyCFunction)dufunc_outer_fast,
METH_FASTCALL | METH_KEYWORDS, NULL},
-#if NPY_API_VERSION >= 0x00000008
{"at",
(PyCFunction)dufunc_at,
METH_VARARGS, NULL},
-#endif
{"_compile_for_args",
(PyCFunction)dufunc__compile_for_args,
METH_VARARGS | METH_KEYWORDS,
@@ -791,9 +779,7 @@ MOD_INIT(_internal)
if (PyModule_AddIntMacro(m, PyUFunc_One)
|| PyModule_AddIntMacro(m, PyUFunc_Zero)
|| PyModule_AddIntMacro(m, PyUFunc_None)
-#if NPY_API_VERSION >= 0x00000007
|| PyModule_AddIntMacro(m, PyUFunc_ReorderableNone)
-#endif
)
return MOD_ERROR_VAL;
--
2.39.1
From 783ef5a297f15d16eec61fe38d13648b876e3750 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Tue, 3 Jan 2023 17:08:44 +0000
Subject: [PATCH 10/13] init_ufunc_dispatch: Handle unexpected ufunc methods
gracefully
If an unexpected ufunc method was encountered, `init_ufunc_dispatch()`
would return an error code indicating failure without setting an
exception, leading to errors like
```
SystemError: initialization of _internal failed without raising an
exception
```
as reported in Issue #8615.
This commit fixes the issue by setting an appropriate exception in this
case.
---
numba/np/ufunc/_internal.c | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/numba/np/ufunc/_internal.c b/numba/np/ufunc/_internal.c
index 3ab725f8f..6ce8989cd 100644
--- a/numba/np/ufunc/_internal.c
+++ b/numba/np/ufunc/_internal.c
@@ -337,6 +337,8 @@ init_ufunc_dispatch(int *numpy_uses_fastcall)
*numpy_uses_fastcall = crnt->ml_flags & METH_FASTCALL;
}
else if (*numpy_uses_fastcall != (crnt->ml_flags & METH_FASTCALL)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "ufunc.at() flags do not match numpy_uses_fastcall");
return -1;
}
}
@@ -349,7 +351,11 @@ init_ufunc_dispatch(int *numpy_uses_fastcall)
&& (ufunc_dispatch.ufunc_outer != NULL)
&& (ufunc_dispatch.ufunc_at != NULL)
);
+ } else {
+ char const * const fmt = "Unexpected ufunc method %s()";
+ PyErr_Format(PyExc_RuntimeError, fmt, crnt_name);
}
+
return result;
}
--
2.39.1
From 5c259e46a8e510c2b82c7ff449b167d3b430294b Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Tue, 3 Jan 2023 17:11:10 +0000
Subject: [PATCH 11/13] init_ufunc_dispatch: Update for NumPy 1.24
NumPy 1.24 adds a new method, `resolve_dtypes()`, and a private method
`_resolve_dtypes_and_context()`. We handle these by just ignoring them
(ignoring all private methods in general) in order to provide the same
level of functionality in Numba as for NumPy 1.23.
There is further room to build new functionality on top of this:
- Providing an implementation of `resolve_dtypes()` for `DUFunc`
objects.
- Using the `resolve_dtypes()` method in place of logic in Numba that
implements a similar dtype resolution process.
---
numba/np/ufunc/_internal.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/numba/np/ufunc/_internal.c b/numba/np/ufunc/_internal.c
index 6ce8989cd..e860081fb 100644
--- a/numba/np/ufunc/_internal.c
+++ b/numba/np/ufunc/_internal.c
@@ -322,10 +322,15 @@ init_ufunc_dispatch(int *numpy_uses_fastcall)
} else if (strncmp(crnt_name, "reduceat", 9) == 0) {
ufunc_dispatch.ufunc_reduceat =
(PyCFunctionWithKeywords)crnt->ml_meth;
+ } else if (strncmp(crnt_name, "resolve_dtypes", 15) == 0) {
+ /* Ignored */
} else {
result = -1;
}
break;
+ case '_':
+ // We ignore private methods
+ break;
default:
result = -1; /* Unknown method */
}
--
2.39.1
From 3736714982be943eb94f4a259368b1dce525ea64 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Wed, 11 Jan 2023 16:25:19 +0000
Subject: [PATCH 12/13] Update comment on skipped test
PR #8691 feedback.
---
numba/tests/test_comprehension.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/numba/tests/test_comprehension.py b/numba/tests/test_comprehension.py
index 092ed51da..9327b4ed3 100644
--- a/numba/tests/test_comprehension.py
+++ b/numba/tests/test_comprehension.py
@@ -361,7 +361,9 @@ class TestArrayComprehension(unittest.TestCase):
self.check(comp_nest_with_array_conditional, 5,
assert_allocate_list=True)
- @unittest.skipUnless(numpy_version < (1, 24), 'Removed in NumPy 1.24')
+ @unittest.skipUnless(numpy_version < (1, 24),
+ 'Setting an array element with a sequence is removed '
+ 'in NumPy 1.24')
def test_comp_nest_with_dependency(self):
def comp_nest_with_dependency(n):
l = np.array([[i * j for j in range(i+1)] for i in range(n)])
--
2.39.1
From 3cbab7ee436e3452e7d078f8283136671a36d944 Mon Sep 17 00:00:00 2001
From: Graham Markall <gmarkall@nvidia.com>
Date: Fri, 27 Jan 2023 12:06:57 +0000
Subject: [PATCH 13/13] Correct name of ufunc method in fastcall flags error
The name of the method should be given, which was never `at()`.
---
numba/np/ufunc/_internal.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/numba/np/ufunc/_internal.c b/numba/np/ufunc/_internal.c
index e860081fb..0a33de170 100644
--- a/numba/np/ufunc/_internal.c
+++ b/numba/np/ufunc/_internal.c
@@ -342,8 +342,9 @@ init_ufunc_dispatch(int *numpy_uses_fastcall)
*numpy_uses_fastcall = crnt->ml_flags & METH_FASTCALL;
}
else if (*numpy_uses_fastcall != (crnt->ml_flags & METH_FASTCALL)) {
- PyErr_SetString(PyExc_RuntimeError,
- "ufunc.at() flags do not match numpy_uses_fastcall");
+ PyErr_Format(PyExc_RuntimeError,
+ "ufunc.%s() flags do not match numpy_uses_fastcall",
+ crnt_name);
return -1;
}
}
--
2.39.1