1010
1111#include < cstdint>
1212#include < cstring>
13+ #include < optional>
1314#include < utility>
1415#include < vector>
1516
17+ #include < c10/util/safe_numerics.h>
18+
1619#include < executorch/backends/aoti/slim/c10/core/Contiguity.h>
1720#include < executorch/backends/aoti/slim/c10/core/Device.h>
1821#include < executorch/backends/aoti/slim/c10/core/ScalarType.h>
@@ -254,22 +257,113 @@ class SlimTensor {
254257 }
255258
256259 /* *
257- * Set sizes and strides together.
260+ * Set sizes, strides, and storage offset together.
258261 */
259- void set_sizes_and_strides (IntArrayRef sizes, IntArrayRef strides) {
262+ void set_sizes_and_strides (
263+ IntArrayRef sizes,
264+ IntArrayRef strides,
265+ std::optional<int64_t > storage_offset = std::nullopt ) {
266+ const size_t new_dim = sizes.size ();
260267 ET_CHECK_MSG (
261- sizes. size () == strides.size (),
262- " sizes (%zu) and strides (%zu) must have the same length " ,
263- sizes. size () ,
268+ new_dim == strides.size (),
269+ " dimensionality of sizes (%zu) must match dimensionality of strides (%zu)" ,
270+ new_dim ,
264271 strides.size ());
265272
266- sizes_and_strides_.set_sizes (sizes);
267- sizes_and_strides_.set_strides (strides);
273+ std::vector<int64_t > new_sizes = toVec (sizes);
274+ std::vector<int64_t > new_strides = toVec (strides);
275+
276+ // stride calculation logic
277+ bool overflowed = false ;
278+ if (new_dim > 0 ) {
279+ for (int64_t dim = new_dim - 1 ; dim >= 0 ; dim--) {
280+ if (strides[dim] >= 0 ) {
281+ new_strides[dim] = strides[dim];
282+ } else {
283+ // for negative strides
284+ if (dim == new_dim - 1 ) {
285+ new_strides[dim] = 1 ;
286+ } else {
287+ overflowed |= ::c10::mul_overflows (
288+ new_strides[dim + 1 ],
289+ std::max<int64_t >(new_sizes[dim + 1 ], 1 ),
290+ &new_strides[dim]);
291+ }
292+ }
293+ }
294+ }
295+ ET_CHECK_MSG (!overflowed, " Stride calculation overflowed" );
296+
297+ sizes_and_strides_.set_sizes (makeArrayRef (new_sizes));
298+ sizes_and_strides_.set_strides (makeArrayRef (new_strides));
299+ if (storage_offset.has_value ()) {
300+ storage_offset_ = *storage_offset;
301+ }
268302
269303 refresh_numel ();
270304 refresh_contiguous ();
271305 }
272306
307+ /* *
308+ * Set sizes to a contiguous layout (computes strides automatically).
309+ */
310+ void set_sizes_contiguous (IntArrayRef sizes) {
311+ std::vector<int64_t > contig_strides = compute_contiguous_strides (sizes);
312+ set_sizes_and_strides (sizes, makeArrayRef (contig_strides));
313+ }
314+
315+ // =========================================================================
316+ // View Operations
317+ // =========================================================================
318+
319+ /* *
320+ * Returns a view of the tensor with the specified sizes, strides, and
321+ * storage offset. The returned tensor shares the same underlying storage.
322+ *
323+ * @param sizes The sizes of the view.
324+ * @param strides The strides of the view.
325+ * @param storage_offset Offset into storage in number of elements.
326+ * @return A new SlimTensor that is a view of this tensor.
327+ */
328+ inline SlimTensor as_strided (
329+ IntArrayRef sizes,
330+ IntArrayRef strides,
331+ int64_t storage_offset) const ;
332+
333+ /* *
334+ * Overload for initializer lists.
335+ */
336+ inline SlimTensor as_strided (
337+ std::initializer_list<int64_t > sizes,
338+ std::initializer_list<int64_t > strides,
339+ int64_t storage_offset) const {
340+ return as_strided (
341+ makeArrayRef (sizes), makeArrayRef (strides), storage_offset);
342+ }
343+
344+ /* *
345+ * Modifies this tensor in-place to have the specified sizes, strides, and
346+ * storage offset. The underlying storage remains unchanged.
347+ *
348+ * @param sizes The new sizes.
349+ * @param strides The new strides.
350+ * @param storage_offset New offset into storage in number of elements.
351+ * @return Reference to this tensor.
352+ */
353+ inline SlimTensor&
354+ as_strided_ (IntArrayRef sizes, IntArrayRef strides, int64_t storage_offset);
355+
356+ /* *
357+ * Overload for initializer lists.
358+ */
359+ inline SlimTensor& as_strided_ (
360+ std::initializer_list<int64_t > sizes,
361+ std::initializer_list<int64_t > strides,
362+ int64_t storage_offset) {
363+ return as_strided_ (
364+ makeArrayRef (sizes), makeArrayRef (strides), storage_offset);
365+ }
366+
273367 // =========================================================================
274368 // Copy Operation
275369 // =========================================================================
@@ -278,7 +372,7 @@ class SlimTensor {
278372 * Copy data from another tensor to this tensor.
279373 *
280374 * Both tensors must have the same numel and dtype.
281- * Supports CPU-to-CPU and cross-device copies (CPU↔CUDA, CUDA↔CUDA ).
375+ * Currently only supports CPU-to-CPU copy (contiguous tensors only ).
282376 *
283377 * @param other The source tensor to copy from
284378 * @return Reference to this tensor
@@ -371,3 +465,7 @@ class SlimTensor {
371465};
372466
373467} // namespace executorch::backends::aoti::slim
468+
469+ // Include view operations implementations (must be after SlimTensor class
470+ // definition)
471+ #include < executorch/backends/aoti/slim/core/SlimTensorView-incl.h>
0 commit comments