@@ -110,16 +110,16 @@ def gpu_array_samples():
110110 samples = []
111111 if cp is not None :
112112 samples += [
113- (cp .empty (3 , dtype = cp .complex64 ), False ),
114- (cp .empty ((6 , 6 ), dtype = cp .float64 )[::2 , ::2 ], True ),
115- (cp .empty ((3 , 4 ), order = "F" ), True ),
113+ pytest . param (cp .empty (3 , dtype = cp .complex64 ), False , id = "cupy-complex64" ),
114+ pytest . param (cp .empty ((6 , 6 ), dtype = cp .float64 )[::2 , ::2 ], True , id = "cupy-float64" ),
115+ pytest . param (cp .empty ((3 , 4 ), order = "F" ), True , id = "cupy-fortran" ),
116116 ]
117117 # Numba's device_array is the only known array container that does not
118118 # support DLPack (so that we get to test the CAI coverage).
119119 if numba_cuda is not None :
120120 samples += [
121- (numba_cuda .device_array ((2 ,), dtype = np .int8 ), False ),
122- (numba_cuda .device_array ((4 , 2 ), dtype = np .float32 ), True ),
121+ pytest . param (numba_cuda .device_array ((2 ,), dtype = np .int8 ), False , id = "numba-cuda-int8" ),
122+ pytest . param (numba_cuda .device_array ((4 , 2 ), dtype = np .float32 ), True , id = "numba-cuda-float32" ),
123123 ]
124124 return samples
125125
@@ -132,7 +132,7 @@ def gpu_array_ptr(arr):
132132 raise NotImplementedError (f"{ arr = } " )
133133
134134
135- @pytest .mark .parametrize ("in_arr, use_stream" , ( * gpu_array_samples (), ))
135+ @pytest .mark .parametrize (( "in_arr" , " use_stream"), gpu_array_samples ())
136136class TestViewGPU :
137137 def test_args_viewable_as_strided_memory_gpu (self , in_arr , use_stream ):
138138 # TODO: use the device fixture?
0 commit comments