Skip to content

Commit 4618b80

Browse files
authored
Fix nightly operator test failures across nxp_rt600 and DLA_V130 backends
Differential Revision: D100873709 Pull Request resolved: #18951
1 parent 38fdfae commit 4618b80

4 files changed

Lines changed: 12 additions & 21 deletions

File tree

backends/cadence/hifi/operators/op_permute_copy.cpp

Lines changed: 2 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -73,8 +73,7 @@ Tensor& permute_copy_out(
7373

7474
bool optimized = false;
7575

76-
if (out.scalar_type() == ScalarType::Float ||
77-
out.scalar_type() == ScalarType::Char ||
76+
if (out.scalar_type() == ScalarType::Char ||
7877
out.scalar_type() == ScalarType::Byte)
7978
optimized = true;
8079

@@ -101,22 +100,7 @@ Tensor& permute_copy_out(
101100
p_permute_vec[i] = dims[i];
102101
}
103102

104-
if (in_type == ScalarType::Float) {
105-
WORD32* p_inp = (WORD32*)in.const_data_ptr<float>();
106-
WORD32* p_out = (WORD32*)out.mutable_data_ptr<float>();
107-
108-
WORD32 ret_val = xa_nn_transpose_32_32(
109-
p_out,
110-
p_out_shape,
111-
p_inp,
112-
p_inp_shape,
113-
p_permute_vec,
114-
num_out_dims,
115-
num_inp_dims);
116-
117-
ET_KERNEL_CHECK(ctx, ret_val == 0, Internal, out);
118-
119-
} else if (in_type == ScalarType::Char) {
103+
if (in_type == ScalarType::Char) {
120104
WORD8* p_inp = (WORD8*)in.const_data_ptr<char>();
121105
WORD8* p_out = (WORD8*)out.mutable_data_ptr<char>();
122106

backends/cadence/hifi/operators/op_softmax.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,9 @@ Tensor& _softmax_out(
6868
if (in.dim() > kNnlibMaxDim)
6969
optimized = false;
7070

71+
if (dim < in.dim() - 1)
72+
optimized = false;
73+
7174
if (optimized) {
7275
int* p_inp = (int*)in.const_data_ptr<float>();
7376
int* out_data = (int*)out.mutable_data_ptr<float>();

backends/cadence/hifi/operators/op_where.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,9 @@ Tensor& where_self_out(
8181
if ((broadcast == 1) && (max_dim > kNnlibMaxDim))
8282
optimized = 0;
8383

84+
if (cond_is_broadcasted)
85+
optimized = 0;
86+
8487
if (optimized) {
8588
const float* a_data = a.const_data_ptr<float>();
8689
const float* b_data = b.const_data_ptr<float>();

backends/cadence/utils/facto_util.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
249249
case "permute_copy.default":
250250
tensor_constraints.extend(
251251
[
252-
cp.Dtype.In(lambda deps: [torch.float32, torch.int8, torch.uint8]),
252+
cp.Dtype.In(lambda deps: [torch.float32, torch.int32]),
253253
cp.Rank.Le(
254254
lambda deps: 5
255255
), # xa_nn_transpose only supports up to 5D
@@ -391,12 +391,13 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
391391
tensor_constraints.extend(
392392
[
393393
cp.Dtype.In(lambda deps: [torch.float32, torch.int32]),
394+
cp.Value.Ge(lambda deps, dtype, struct: 0),
394395
]
395396
)
396397
case "div.Tensor_mode" | "minimum.default":
397398
if index == 0:
398399
tensor_constraints = [
399-
cp.Dtype.In(lambda deps: [torch.int64, torch.int32, torch.float32]),
400+
cp.Dtype.In(lambda deps: [torch.int32, torch.float32]),
400401
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
401402
cp.Value.Le(lambda deps, dtype, struct: 2**4),
402403
cp.Rank.Ge(lambda deps: 1),
@@ -405,7 +406,7 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
405406
]
406407
else:
407408
tensor_constraints = [
408-
cp.Dtype.In(lambda deps: [torch.int64, torch.int32, torch.float32]),
409+
cp.Dtype.In(lambda deps: [torch.int32, torch.float32]),
409410
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
410411
cp.Value.Le(lambda deps, dtype, struct: 2**4),
411412
cp.Value.Ne(

0 commit comments

Comments
 (0)