-
Notifications
You must be signed in to change notification settings - Fork 1k
Expand file tree
/
Copy pathtensor_parser_test.cpp
More file actions
287 lines (247 loc) · 10.8 KB
/
tensor_parser_test.cpp
File metadata and controls
287 lines (247 loc) · 10.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <executorch/runtime/executor/tensor_parser.h>
#include <cstring>
#include <executorch/extension/data_loader/file_data_loader.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/tensor_layout.h>
#include <executorch/runtime/executor/test/managed_memory_manager.h>
#include <executorch/schema/program_generated.h>
#include <gtest/gtest.h>
using namespace ::testing;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::BoxedEvalueList;
using executorch::runtime::Error;
using executorch::runtime::EValue;
using executorch::runtime::FreeableBuffer;
using executorch::runtime::Program;
using executorch::runtime::Result;
using executorch::runtime::Span;
using executorch::runtime::TensorLayout;
using executorch::runtime::deserialization::parseListOptionalType;
using executorch::runtime::deserialization::parseTensor;
using executorch::runtime::deserialization::parseTensorList;
using executorch::runtime::deserialization::validateTensorLayout;
using executorch::runtime::testing::ManagedMemoryManager;
using torch::executor::util::FileDataLoader;
constexpr size_t kDefaultNonConstMemBytes = 32 * 1024U;
constexpr size_t kDefaultRuntimeMemBytes = 32 * 1024U;
class TensorParserTest : public ::testing::Test {
protected:
void SetUp() override {
// Load the serialized ModuleAdd data.
const char* path = std::getenv("ET_MODULE_ADD_PATH");
Result<FileDataLoader> float_loader = FileDataLoader::from(path);
ASSERT_EQ(float_loader.error(), Error::Ok);
float_loader_ =
std::make_unique<FileDataLoader>(std::move(float_loader.get()));
// Load the serialized ModuleAddHalf data.
const char* half_path = std::getenv("ET_MODULE_ADD_HALF_PATH");
Result<FileDataLoader> half_loader = FileDataLoader::from(half_path);
ASSERT_EQ(half_loader.error(), Error::Ok);
half_loader_ =
std::make_unique<FileDataLoader>(std::move(half_loader.get()));
}
std::unique_ptr<FileDataLoader> float_loader_;
std::unique_ptr<FileDataLoader> half_loader_;
};
namespace executorch {
namespace runtime {
namespace testing {
// Provides access to private Program methods.
class ProgramTestFriend final {
public:
const static executorch_flatbuffer::Program* GetInternalProgram(
const Program* program) {
return program->internal_program_;
}
};
} // namespace testing
} // namespace runtime
} // namespace executorch
using executorch::runtime::testing::ProgramTestFriend;
void test_module_add(
std::unique_ptr<FileDataLoader>& loader,
ScalarType scalar_type,
int type_size) {
Result<Program> program =
Program::load(loader.get(), Program::Verification::Minimal);
EXPECT_EQ(program.error(), Error::Ok);
const Program* program_ = &program.get();
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
const executorch_flatbuffer::Program* internal_program =
ProgramTestFriend::GetInternalProgram(program_);
executorch_flatbuffer::ExecutionPlan* execution_plan =
internal_program->execution_plan()->GetMutableObject(0);
auto flatbuffer_values = execution_plan->values();
int tensor_count = 0;
int double_count = 0;
for (size_t i = 0; i < flatbuffer_values->size(); ++i) {
auto serialization_value = flatbuffer_values->Get(i);
if (serialization_value->val_type() ==
executorch_flatbuffer::KernelTypes::Tensor) {
tensor_count++;
Result<Tensor> tensor = parseTensor(
program_, &mmm.get(), serialization_value->val_as_Tensor());
Tensor t = tensor.get();
ASSERT_EQ(scalar_type, t.scalar_type());
ASSERT_EQ(2, t.dim()); // [2, 2]
ASSERT_EQ(4, t.numel());
ASSERT_EQ(type_size * t.numel(), t.nbytes());
} else if (
serialization_value->val_type() ==
executorch_flatbuffer::KernelTypes::Double) {
double_count++;
ASSERT_EQ(1.0, serialization_value->val_as_Double()->double_val());
}
}
ASSERT_EQ(3, tensor_count); // input x2, output
ASSERT_EQ(2, double_count); // alpha x2
}
TEST_F(TensorParserTest, TestModuleAddFloat) {
test_module_add(float_loader_, ScalarType::Float, sizeof(float));
}
TEST_F(TensorParserTest, TestModuleAddHalf) {
test_module_add(
half_loader_, ScalarType::Half, sizeof(executorch::aten::Half));
}
TEST_F(TensorParserTest, TestMutableState) {
// Load the serialized ModuleSimpleTrain data.
const char* path = std::getenv("ET_MODULE_SIMPLE_TRAIN_PATH");
Result<FileDataLoader> train_loader = FileDataLoader::from(path);
ASSERT_EQ(train_loader.error(), Error::Ok);
Result<Program> program =
Program::load(&train_loader.get(), Program::Verification::Minimal);
EXPECT_EQ(program.error(), Error::Ok);
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
ManagedMemoryManager mmm_copy(
kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
const executorch_flatbuffer::Program* internal_program =
ProgramTestFriend::GetInternalProgram(&program.get());
executorch_flatbuffer::ExecutionPlan* execution_plan =
internal_program->execution_plan()->GetMutableObject(0);
auto flatbuffer_values = execution_plan->values();
size_t num_mutable_tensors = 0;
for (size_t i = 0; i < flatbuffer_values->size(); ++i) {
auto serialization_value = flatbuffer_values->Get(i);
if (serialization_value->val_type() ==
executorch_flatbuffer::KernelTypes::Tensor &&
serialization_value->val_as_Tensor()->allocation_info() != nullptr &&
serialization_value->val_as_Tensor()->data_buffer_idx() > 0) {
num_mutable_tensors++;
Result<torch::executor::Tensor> tensor = parseTensor(
&program.get(), &mmm.get(), serialization_value->val_as_Tensor());
torch::executor::Tensor t = tensor.get();
float loaded_value = t.const_data_ptr<float>()[0];
ASSERT_NE(nullptr, t.const_data_ptr());
ASSERT_NE(t.mutable_data_ptr<float>()[0], 0.5);
t.mutable_data_ptr<float>()[0] = 0.5;
ASSERT_EQ(
t.mutable_data_ptr<float>()[0],
0.5); // 0.5 can be represented perfectly by float so EQ and NE work
// fine here. Any power of 2 rational can be perfectly
// represented. See dyadic rationals for more info.
// Load the same tensor using the same mem manager and show the value is
// updated again.
Result<torch::executor::Tensor> tensor1_alias = parseTensor(
&program.get(), &mmm.get(), serialization_value->val_as_Tensor());
torch::executor::Tensor t2 = tensor.get();
ASSERT_NE(t2.mutable_data_ptr<float>()[0], 0.5);
// Show the tensors are equivalent
ASSERT_EQ(t.const_data_ptr(), t2.const_data_ptr());
// Set mutable tensor value back to 0.5 since it got overwritten by second
// parse.
t.mutable_data_ptr<float>()[0] = 0.5;
// Load the same tensor using a different mem manager and show the value
// is not the same as t.
Result<torch::executor::Tensor> tensor_new = parseTensor(
&program.get(),
&mmm_copy.get(),
serialization_value->val_as_Tensor());
torch::executor::Tensor t3 = tensor_new.get();
ASSERT_NE(t3.mutable_data_ptr<float>()[0], 0.5);
ASSERT_NE(t3.const_data_ptr(), t.const_data_ptr());
ASSERT_EQ(loaded_value, t3.const_data_ptr<float>()[0]);
}
}
ASSERT_EQ(num_mutable_tensors, 2);
}
// Tests that validateTensorLayout rejects tensors where dim_order is shorter
// than sizes, preventing out-of-bounds reads.
TEST(ValidateTensorLayoutTest, DimOrderSizeMismatchIsRejected) {
flatbuffers::FlatBufferBuilder builder;
std::vector<int32_t> sizes = {2, 3, 4};
std::vector<uint8_t> dim_order_short = {0};
auto tensor_offset = executorch_flatbuffer::CreateTensor(
builder,
executorch_flatbuffer::ScalarType::FLOAT,
0,
builder.CreateVector(sizes),
builder.CreateVector(dim_order_short));
builder.Finish(tensor_offset);
const auto* s_tensor = flatbuffers::GetRoot<executorch_flatbuffer::Tensor>(
builder.GetBufferPointer());
std::vector<int32_t> expected_sizes = {2, 3, 4};
std::vector<uint8_t> expected_dim_order = {0, 1, 2};
auto layout = TensorLayout::create(
Span<const int32_t>(expected_sizes.data(), expected_sizes.size()),
Span<const uint8_t>(expected_dim_order.data(), expected_dim_order.size()),
ScalarType::Float);
ASSERT_TRUE(layout.ok());
EXPECT_EQ(
validateTensorLayout(s_tensor, layout.get()), Error::InvalidExternalData);
}
// Helper to construct a flatbuffers::Vector<int32_t> from raw data.
// FlatBuffer vectors are stored as [uint32_t length][T elements...].
namespace {
struct FlatVectorInt32 {
static const flatbuffers::Vector<int32_t>* create(
std::vector<uint8_t>& buf,
const std::vector<int32_t>& elements) {
buf.resize(sizeof(uint32_t) + elements.size() * sizeof(int32_t));
uint32_t len = static_cast<uint32_t>(elements.size());
memcpy(buf.data(), &len, sizeof(len));
if (!elements.empty()) {
memcpy(
buf.data() + sizeof(uint32_t),
elements.data(),
elements.size() * sizeof(int32_t));
}
return reinterpret_cast<const flatbuffers::Vector<int32_t>*>(buf.data());
}
};
} // namespace
// parseTensorList should return an error when the EValue at the given index
// is not a Tensor, instead of aborting.
TEST_F(TensorParserTest, ParseTensorListRejectsNonTensorEValue) {
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
// Create an EValue array with a non-Tensor value at index 0.
EValue values[2];
values[0] = EValue(static_cast<int64_t>(42)); // Int, not Tensor
values[1] = EValue(static_cast<int64_t>(7));
// Create a vector with index 0 (pointing to the Int EValue).
std::vector<uint8_t> vec_buf;
auto* indices = FlatVectorInt32::create(vec_buf, {0});
auto result = parseTensorList(indices, values, 2, &mmm.get());
EXPECT_EQ(result.error(), Error::InvalidType);
}
// parseListOptionalType should return an error when the EValue at the given
// index is neither None nor the expected type.
TEST_F(TensorParserTest, ParseListOptionalTypeRejectsWrongType) {
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
// Create an EValue array with a non-Tensor, non-None value at index 0.
EValue values[2];
values[0] = EValue(static_cast<int64_t>(42)); // Int, not Tensor or None
values[1] = EValue(static_cast<int64_t>(7));
// Create a vector with index 0 (pointing to the Int EValue).
std::vector<uint8_t> vec_buf;
auto* indices = FlatVectorInt32::create(vec_buf, {0});
auto result = parseListOptionalType<Tensor>(indices, values, 2, &mmm.get());
EXPECT_EQ(result.error(), Error::InvalidType);
}