Skip to content

Commit 6a2d455

Browse files
committed
up
1 parent bf36732 commit 6a2d455

2 files changed

Lines changed: 6 additions & 91 deletions

File tree

backends/mlx/ops.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,12 @@ def normalize_reduction_dim(
219219
return dim, keepdim
220220

221221

222+
@REGISTRY.register(target=["NOOP", torch.ops.aten._assert_scalar.default])
223+
def _noop_handler(P: MLXProgramBuilder, n: Node) -> None:
224+
"""No-op handler for nodes that don't emit any MLX instructions."""
225+
return None
226+
227+
222228
@REGISTRY.register(target=[torch.ops.aten.addmm.default])
223229
def _addmm_handler(P: MLXProgramBuilder, n: Node) -> Slot:
224230
"""Handle addmm: self + (mat1 @ mat2).

backends/mlx/test/test_ops.py

Lines changed: 0 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -83,94 +83,3 @@ def create_model(self) -> nn.Module:
8383
def create_inputs(self) -> Tuple[torch.Tensor, ...]:
8484
x = torch.randn(self.batch_size, self.n, self.m)
8585
return (x,)
86-
87-
88-
class AddmmModel(nn.Module):
89-
"""Model that performs addmm: bias + (mat1 @ mat2)."""
90-
91-
def __init__(
92-
self,
93-
in_features: int,
94-
out_features: int,
95-
bias: bool = True,
96-
alpha: float = 1.0,
97-
beta: float = 1.0,
98-
):
99-
super().__init__()
100-
self.weight = nn.Parameter(torch.randn(out_features, in_features))
101-
if bias:
102-
self.bias = nn.Parameter(torch.randn(out_features))
103-
else:
104-
self.bias = None
105-
self.alpha = alpha
106-
self.beta = beta
107-
108-
def forward(self, x: torch.Tensor) -> torch.Tensor:
109-
if self.bias is not None:
110-
return torch.addmm(
111-
self.bias, x, self.weight.t(), beta=self.beta, alpha=self.alpha
112-
)
113-
else:
114-
return torch.mm(x, self.weight.t())
115-
116-
117-
@register_test
118-
class AddmmTest(OpTestCase):
119-
"""Test case for addmm."""
120-
121-
name = "addmm"
122-
rtol = 1e-4
123-
atol = 1e-4
124-
125-
def __init__(
126-
self,
127-
batch_size: int = 2,
128-
in_features: int = 64,
129-
out_features: int = 32,
130-
bias: bool = True,
131-
alpha: float = 1.0,
132-
beta: float = 1.0,
133-
):
134-
self.batch_size = batch_size
135-
self.in_features = in_features
136-
self.out_features = out_features
137-
self.bias = bias
138-
self.alpha = alpha
139-
self.beta = beta
140-
141-
# Build unique test name
142-
if not bias:
143-
name = f"addmm_{in_features}x{out_features}_no_bias"
144-
elif alpha != 1.0 or beta != 1.0:
145-
name = f"addmm_{in_features}x{out_features}_a{alpha}_b{beta}"
146-
else:
147-
name = f"addmm_{in_features}x{out_features}"
148-
self.name = name
149-
150-
@classmethod
151-
def get_test_configs(cls) -> List["AddmmTest"]:
152-
return [
153-
cls(
154-
batch_size=2, in_features=64, out_features=32
155-
), # with bias, default alpha/beta
156-
cls(
157-
batch_size=2, in_features=64, out_features=32, bias=False
158-
), # without bias
159-
cls(batch_size=4, in_features=128, out_features=64), # larger size
160-
cls(
161-
batch_size=2, in_features=64, out_features=32, alpha=2.0, beta=0.5
162-
), # custom alpha/beta
163-
]
164-
165-
def create_model(self) -> nn.Module:
166-
return AddmmModel(
167-
self.in_features,
168-
self.out_features,
169-
bias=self.bias,
170-
alpha=self.alpha,
171-
beta=self.beta,
172-
)
173-
174-
def create_inputs(self) -> Tuple[torch.Tensor, ...]:
175-
x = torch.randn(self.batch_size, self.in_features)
176-
return (x,)

0 commit comments

Comments
 (0)