forked from pytorch/executorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathop_mean.cpp
More file actions
118 lines (104 loc) · 3.98 KB
/
op_mean.cpp
File metadata and controls
118 lines (104 loc) · 3.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <c10/util/irange.h>
#include <executorch/kernels/portable/cpu/util/kernel_ops_util.h>
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>
namespace torch {
namespace executor {
namespace native {
using Tensor = executorch::aten::Tensor;
using ScalarType = executorch::aten::ScalarType;
Tensor& mean_dim_out(
KernelRuntimeContext& ctx,
const Tensor& in,
optional<ArrayRef<int64_t>> dim_list,
bool keepdim,
optional<ScalarType> dtype,
Tensor& out) {
(void)ctx;
ET_KERNEL_CHECK(
ctx,
check_mean_dim_args(in, dim_list, keepdim, dtype, out),
InvalidArgument,
out);
ET_KERNEL_CHECK(
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
ET_KERNEL_CHECK(
ctx,
resize_reduction_out(in, dim_list, keepdim, out) == Error::Ok,
InvalidArgument,
out);
// Fast path: contiguous tensor, single innermost dim reduction, same dtype.
// Bypasses generic MapReduceOverDimListPlan to use a tight vectorizable loop.
if (in.numel() > 0 && dim_list.has_value() && dim_list.value().size() == 1 &&
in.scalar_type() == out.scalar_type()) {
const int64_t d = dim_list.value()[0] < 0 ? dim_list.value()[0] + in.dim()
: dim_list.value()[0];
if (d >= 0 && d < in.dim() && d == in.dim() - 1 &&
tensor_is_contiguous(in)) {
const int64_t reduce_size = in.size(d);
const int64_t outer_size = in.numel() / reduce_size;
// @lint-ignore CLANGTIDY facebook-hte-CArray
static constexpr const char op_name[] = "mean.out";
ET_SWITCH_FLOATHBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE, [&] {
const CTYPE* in_data = in.const_data_ptr<CTYPE>();
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
const CTYPE denom = static_cast<CTYPE>(reduce_size);
for (int64_t i = 0; i < outer_size; i++) {
const CTYPE* row = in_data + i * reduce_size;
CTYPE acc = 0;
for (int64_t j = 0; j < reduce_size; j++) {
acc += row[j];
}
out_data[i] = acc / denom;
}
});
return out;
}
}
std::optional<MapReduceOverDimListPlan> plan;
if (in.numel() > 0) {
plan.emplace(in, dim_list);
}
// @lint-ignore CLANGTIDY facebook-hte-CArray
static constexpr const char op_name[] = "mean.out";
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, op_name, CTYPE_IN, [&] {
ET_SWITCH_FLOATHBF16_TYPES(out.scalar_type(), ctx, op_name, CTYPE_OUT, [&] {
CTYPE_OUT* out_data = out.mutable_data_ptr<CTYPE_OUT>();
const size_t num = get_reduced_dim_product(in, dim_list);
const bool success = parallel_for_each_reduce_over_dim_list_output_index(
in, dim_list, out, [&](const auto begin, const auto end) {
for (const auto out_ix : c10::irange(begin, end)) {
CTYPE_OUT sum = 0;
if (plan.has_value()) {
sum = plan->execute<CTYPE_IN, CTYPE_OUT>(
[](CTYPE_IN v) { return static_cast<CTYPE_OUT>(v); },
[](CTYPE_OUT outv, CTYPE_OUT acc) { return acc + outv; },
out_ix);
}
out_data[out_ix] = sum / static_cast<float>(num);
}
});
ET_KERNEL_CHECK_MSG(ctx, success, Internal, , "parallel_for failed");
});
});
return out;
}
Tensor& mean_dtype_out(
KernelRuntimeContext& ctx,
const Tensor& in,
optional<ScalarType> dtype,
Tensor& out) {
return mean_dim_out(ctx, in, ArrayRef<int64_t>(), false, dtype, out);
}
} // namespace native
} // namespace executor
} // namespace torch