Skip to content

Commit 8c019b9

Browse files
leofangemcastilloclaude
committed
Fix linter errors
Co-Authored-By: Emilio Castillo <ecastillo@nvidia.com> Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 8c20237 commit 8c019b9

File tree

2 files changed

+13
-12
lines changed

2 files changed

+13
-12
lines changed

cuda_core/cuda/core/_tensor_bridge.pyx

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -134,26 +134,26 @@ cdef inline int check_aoti(AOTITorchError err, const char* name) except? -1:
134134

135135
cdef dict _build_dtype_map():
136136
try:
137-
from ml_dtypes import bfloat16 as _bf16
137+
from ml_dtypes import bfloat16 as _bf16 # noqa: F811
138138
has_bfloat16 = True
139139
except ImportError:
140140
has_bfloat16 = False
141141

142142
cdef dict m = {
143-
aoti_torch_dtype_float16(): numpy.dtype(numpy.float16),
144-
aoti_torch_dtype_float32(): numpy.dtype(numpy.float32),
145-
aoti_torch_dtype_float64(): numpy.dtype(numpy.float64),
146-
aoti_torch_dtype_uint8(): numpy.dtype(numpy.uint8),
147-
aoti_torch_dtype_int8(): numpy.dtype(numpy.int8),
148-
aoti_torch_dtype_int16(): numpy.dtype(numpy.int16),
149-
aoti_torch_dtype_int32(): numpy.dtype(numpy.int32),
150-
aoti_torch_dtype_int64(): numpy.dtype(numpy.int64),
151-
aoti_torch_dtype_bool(): numpy.dtype(numpy.bool_),
152-
aoti_torch_dtype_complex64(): numpy.dtype(numpy.complex64),
143+
aoti_torch_dtype_float16(): numpy.dtype(numpy.float16),
144+
aoti_torch_dtype_float32(): numpy.dtype(numpy.float32),
145+
aoti_torch_dtype_float64(): numpy.dtype(numpy.float64),
146+
aoti_torch_dtype_uint8(): numpy.dtype(numpy.uint8),
147+
aoti_torch_dtype_int8(): numpy.dtype(numpy.int8),
148+
aoti_torch_dtype_int16(): numpy.dtype(numpy.int16),
149+
aoti_torch_dtype_int32(): numpy.dtype(numpy.int32),
150+
aoti_torch_dtype_int64(): numpy.dtype(numpy.int64),
151+
aoti_torch_dtype_bool(): numpy.dtype(numpy.bool_),
152+
aoti_torch_dtype_complex64(): numpy.dtype(numpy.complex64),
153153
aoti_torch_dtype_complex128(): numpy.dtype(numpy.complex128),
154154
}
155155
if has_bfloat16:
156-
m[aoti_torch_dtype_bfloat16()] = numpy.dtype("bfloat16")
156+
m[aoti_torch_dtype_bfloat16()] = numpy.dtype(_bf16)
157157
return m
158158

159159

cuda_core/tests/test_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -845,6 +845,7 @@ def test_torch_tensor_bridge_cpu(init_cuda):
845845
@_torch_skip
846846
def test_torch_tensor_bridge_decorator(init_cuda):
847847
"""Verify tensor bridge works through the args_viewable_as_strided_memory decorator."""
848+
848849
@args_viewable_as_strided_memory((0,))
849850
def fn(tensor, stream):
850851
return tensor.view(stream.handle)

0 commit comments

Comments
 (0)