|
8 | 8 |
|
9 | 9 | #include <executorch/runtime/executor/tensor_parser.h> |
10 | 10 |
|
| 11 | +#include <climits> |
| 12 | + |
| 13 | +#include <c10/util/safe_numerics.h> |
| 14 | + |
11 | 15 | #include <executorch/runtime/core/exec_aten/exec_aten.h> |
12 | 16 | #include <executorch/runtime/core/exec_aten/util/dim_order_util.h> |
13 | 17 | #include <executorch/runtime/core/exec_aten/util/scalar_type_util.h> |
@@ -118,17 +122,25 @@ Result<Tensor> parseTensor( |
118 | 122 | dim_order = |
119 | 123 | const_cast<executorch::aten::DimOrderType*>(serialized_dim_order); |
120 | 124 | } |
121 | | - // Validate sizes before using them in case the PTE data is bad. We can't |
122 | | - // detect bad positive values, but we can reject negative values, which would |
123 | | - // otherwise panic in the TensorImpl ctor. dim_order_to_stride() will validate |
124 | | - // dim_order. |
| 125 | + // Validate sizes before using them in case the PTE data is bad. Reject |
| 126 | + // negative values and check that the product of all dimensions doesn't |
| 127 | + // overflow ssize_t, which would otherwise abort in the TensorImpl ctor. |
| 128 | + // dim_order_to_stride() will validate dim_order. |
| 129 | + ssize_t numel = 1; |
125 | 130 | for (flatbuffers::uoffset_t i = 0; i < dim; i++) { |
126 | 131 | ET_CHECK_OR_RETURN_ERROR( |
127 | 132 | sizes[i] >= 0, |
128 | 133 | InvalidProgram, |
129 | 134 | "Negative size[%zu] %" PRId32, |
130 | 135 | static_cast<size_t>(i), |
131 | 136 | sizes[i]); |
| 137 | + ssize_t next_numel; |
| 138 | + ET_CHECK_OR_RETURN_ERROR( |
| 139 | + !c10::mul_overflows(numel, static_cast<ssize_t>(sizes[i]), &next_numel), |
| 140 | + InvalidProgram, |
| 141 | + "Overflow computing numel at dim %zu", |
| 142 | + static_cast<size_t>(i)); |
| 143 | + numel = next_numel; |
132 | 144 | } |
133 | 145 |
|
134 | 146 | // We will remove strides from schema. |
|
0 commit comments