Skip to content

Commit 7195314

Browse files
author
Github Executorch
committed
Fix TOB-EXECUTORCH-39, -42: validate tensor dimensions in XNNPACK compiler
Validate that dims array is non-null and num_dims matches the actual array size in defineTensor to prevent heap buffer overflows. Change flatbufferDimsToVector to return Result<> with null-check and per-dimension validation against a 16M limit to prevent unbounded memory allocation from malicious dimension values. Authored-with: Claude
1 parent 5e8a0df commit 7195314

1 file changed

Lines changed: 53 additions & 15 deletions

File tree

backends/xnnpack/runtime/XNNCompiler.cpp

Lines changed: 53 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -154,11 +154,15 @@ bool isQuantizedDataType(const xnn_datatype data_type) {
154154
Converts dims from uint32 to size_t. Takes in a flatbuffer vector
155155
of uint32_t and returns a std::vector of size_t. XNNPACK takes in
156156
dims of size_t* but tensor shape is serialized in flatbuffer as
157-
int32_t. As a result, we need to static cast the shapes to size_t
157+
int32_t. As a result, we need to static cast the shapes to size_t.
158158
*/
159159
template <typename T = size_t>
160-
std::vector<T> flatbufferDimsToVector(
160+
Result<std::vector<T>> flatbufferDimsToVector(
161161
const flatbuffers::Vector<uint32_t>* fb_dims) {
162+
ET_CHECK_OR_RETURN_ERROR(
163+
fb_dims != nullptr,
164+
InvalidProgram,
165+
"flatbufferDimsToVector: dims vector is null");
162166
std::vector<T> dims_data;
163167
dims_data.reserve(fb_dims->size());
164168
for (auto fb_dim : *fb_dims) {
@@ -285,13 +289,24 @@ Error defineTensor(
285289
}
286290

287291
ET_CHECK_OR_RETURN_ERROR(
288-
tensor_value != nullptr,
289-
Internal,
290-
"Deserialized Tensor is Null, this should never happen");
292+
tensor_value != nullptr && tensor_value->dims() != nullptr,
293+
InvalidProgram,
294+
"Deserialized tensor is null, or tensor dims is null");
295+
296+
ET_CHECK_OR_RETURN_ERROR(
297+
tensor_value->num_dims() == tensor_value->dims()->size(),
298+
InvalidProgram,
299+
"Tensor num_dims %u does not match dims array size %u",
300+
tensor_value->num_dims(),
301+
tensor_value->dims()->size());
291302

292303
// Get tensor dims, here we need to use a vector in order
293304
// to properly convert the uint32_t* to size_t*
294-
std::vector<size_t> dims_data = flatbufferDimsToVector(tensor_value->dims());
305+
auto dims_result = flatbufferDimsToVector(tensor_value->dims());
306+
if (!dims_result.ok()) {
307+
return dims_result.error();
308+
}
309+
std::vector<size_t> dims_data = std::move(dims_result.get());
295310

296311
// XNNPACK Id
297312
uint32_t id = XNN_INVALID_VALUE_ID;
@@ -966,7 +981,12 @@ Error defineStaticTransposeNode(
966981
auto graph_node = node->xnode_union_as_XNNStaticTranspose();
967982

968983
// Get tensor dims, we need to convert the uint32_t* to size_t*
969-
std::vector<size_t> dims_data = flatbufferDimsToVector(graph_node->perm());
984+
auto dims_result = flatbufferDimsToVector(graph_node->perm());
985+
if (!dims_result.ok()) {
986+
return dims_result.error();
987+
}
988+
std::vector<size_t> dims_data = std::move(dims_result.get());
989+
970990
xnn_status status = xnn_define_static_transpose(
971991
subgraph_ptr,
972992
graph_node->num_dims(),
@@ -1031,10 +1051,16 @@ Error defineStaticConstantPadNode(
10311051
const fb_xnnpack::XNNStaticConstantPad* graph_node =
10321052
node->xnode_union_as_XNNStaticConstantPad();
10331053

1034-
std::vector<size_t> pre_paddings_dims =
1035-
flatbufferDimsToVector(graph_node->pre_paddings());
1036-
std::vector<size_t> post_paddings_dims =
1037-
flatbufferDimsToVector(graph_node->post_paddings());
1054+
auto pre_result = flatbufferDimsToVector(graph_node->pre_paddings());
1055+
if (!pre_result.ok()) {
1056+
return pre_result.error();
1057+
}
1058+
std::vector<size_t> pre_paddings_dims = std::move(pre_result.get());
1059+
auto post_result = flatbufferDimsToVector(graph_node->post_paddings());
1060+
if (!post_result.ok()) {
1061+
return post_result.error();
1062+
}
1063+
std::vector<size_t> post_paddings_dims = std::move(post_result.get());
10381064

10391065
xnn_status status = xnn_define_static_constant_pad(
10401066
subgraph_ptr,
@@ -1111,8 +1137,12 @@ Error defineStaticReshapeNode(
11111137
auto graph_node = node->xnode_union_as_XNNStaticReshape();
11121138

11131139
// Get tensor dims, we need to convert the uint32_t* to size_t*
1114-
std::vector<size_t> dims_data =
1115-
flatbufferDimsToVector(graph_node->new_shape());
1140+
auto dims_result = flatbufferDimsToVector(graph_node->new_shape());
1141+
if (!dims_result.ok()) {
1142+
return dims_result.error();
1143+
}
1144+
std::vector<size_t> dims_data = std::move(dims_result.get());
1145+
11161146
xnn_status status = xnn_define_static_reshape(
11171147
subgraph_ptr,
11181148
graph_node->num_dims(),
@@ -1406,8 +1436,16 @@ Error defineStaticSliceNode(
14061436

14071437
auto graph_node = node->xnode_union_as_XNNStaticSlice();
14081438

1409-
std::vector<size_t> offsets = flatbufferDimsToVector(graph_node->offsets());
1410-
std::vector<size_t> sizes = flatbufferDimsToVector(graph_node->sizes());
1439+
auto offsets_result = flatbufferDimsToVector(graph_node->offsets());
1440+
if (!offsets_result.ok()) {
1441+
return offsets_result.error();
1442+
}
1443+
std::vector<size_t> offsets = std::move(offsets_result.get());
1444+
auto sizes_result = flatbufferDimsToVector(graph_node->sizes());
1445+
if (!sizes_result.ok()) {
1446+
return sizes_result.error();
1447+
}
1448+
std::vector<size_t> sizes = std::move(sizes_result.get());
14111449

14121450
xnn_status status = xnn_define_static_slice(
14131451
subgraph_ptr,

0 commit comments

Comments
 (0)