Skip to content

Commit b186ee1

Browse files
committed
Update on "rename slimtensor fiies to consistent with executorch files"
This diff updates the slimtensor file names to follow ET style (use under_score instead of Camel). Note that we keep the c10 files as Camel since et/c10 files are also in Camel Case. Differential Revision: [D90143740](https://our.internmc.facebook.com/intern/diff/D90143740/) [ghstack-poisoned]
2 parents 81a06d5 + 1fc1f8f commit b186ee1

71 files changed

Lines changed: 1136 additions & 898 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.ci/scripts/export_model_artifact.sh

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -106,10 +106,6 @@ case "$HF_MODEL" in
106106
PREPROCESSOR_OUTPUT=""
107107
;;
108108
nvidia/parakeet-tdt)
109-
if [ "$DEVICE" = "metal" ]; then
110-
echo "Error: Export for device 'metal' is not yet tested for model '$HF_MODEL'"
111-
exit 1
112-
fi
113109
MODEL_NAME="parakeet"
114110
TASK=""
115111
MAX_SEQ_LEN=""

.github/workflows/cuda.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ jobs:
7171
strategy:
7272
fail-fast: false
7373
matrix:
74-
model: [linear, add, add_mul, resnet18, conv1d, sdpa]
74+
model: [linear, add, add_mul, resnet18, conv1d, sdpa, mv2, mv3]
7575
with:
7676
timeout: 90
7777
runner: linux.g5.4xlarge.nvidia.gpu

.github/workflows/metal.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,8 @@ jobs:
4444
name: "whisper-small"
4545
- repo: "openai"
4646
name: "whisper-large-v3-turbo"
47+
- repo: "nvidia"
48+
name: "parakeet-tdt"
4749
quant:
4850
- "non-quantized"
4951
with:
@@ -92,6 +94,8 @@ jobs:
9294
name: "whisper-small"
9395
- repo: "openai"
9496
name: "whisper-large-v3-turbo"
97+
- repo: "nvidia"
98+
name: "parakeet-tdt"
9599
quant:
96100
- "non-quantized"
97101
with:

backends/aoti/common_shims_slim.cpp

Lines changed: 1 addition & 103 deletions
Original file line numberDiff line numberDiff line change
@@ -64,110 +64,8 @@ int32_t aoti_torch_layout_strided() {
6464
return 0;
6565
}
6666

67-
// ============================================================
68-
// Storage & Device Property Getters - Implementations
69-
// ============================================================
70-
71-
AOTITorchError aoti_torch_get_storage_offset(
72-
Tensor* tensor,
73-
int64_t* ret_storage_offset) {
74-
if (tensor == nullptr || ret_storage_offset == nullptr) {
75-
return Error::InvalidArgument;
76-
}
77-
*ret_storage_offset = tensor->storage_offset();
78-
return Error::Ok;
79-
}
80-
81-
AOTITorchError aoti_torch_get_storage_size(Tensor* tensor, int64_t* ret_size) {
82-
if (tensor == nullptr || ret_size == nullptr) {
83-
return Error::InvalidArgument;
84-
}
85-
*ret_size = static_cast<int64_t>(tensor->storage()->nbytes());
86-
return Error::Ok;
87-
}
88-
89-
AOTITorchError aoti_torch_get_device_type(
90-
Tensor* tensor,
91-
int32_t* ret_device_type) {
92-
if (tensor == nullptr || ret_device_type == nullptr) {
93-
return Error::InvalidArgument;
94-
}
95-
*ret_device_type = static_cast<int32_t>(tensor->device_type());
96-
return Error::Ok;
97-
}
98-
99-
AOTITorchError aoti_torch_get_device_index(
100-
Tensor* tensor,
101-
int32_t* ret_device_index) {
102-
if (tensor == nullptr || ret_device_index == nullptr) {
103-
return Error::InvalidArgument;
104-
}
105-
*ret_device_index = static_cast<int32_t>(tensor->device_index());
106-
return Error::Ok;
107-
}
108-
109-
// ============================================================
110-
// DType Constants - Implementations
111-
// ============================================================
112-
113-
int32_t aoti_torch_dtype_float32() {
114-
return 6; // ScalarType::Float
115-
}
116-
117-
int32_t aoti_torch_dtype_bfloat16() {
118-
return 15; // ScalarType::BFloat16
119-
}
120-
121-
int32_t aoti_torch_dtype_int64() {
122-
return 4; // ScalarType::Long
123-
}
124-
125-
int32_t aoti_torch_dtype_int32() {
126-
return 3; // ScalarType::Int
127-
}
128-
129-
int32_t aoti_torch_dtype_int16() {
130-
return 2; // ScalarType::Short
131-
}
132-
133-
int32_t aoti_torch_dtype_int8() {
134-
return 1; // ScalarType::Char
135-
}
136-
137-
int32_t aoti_torch_dtype_bool() {
138-
return 11; // ScalarType::Bool
139-
}
140-
141-
// ============================================================
142-
// Device Type Constants - Implementations
143-
// ============================================================
144-
145-
int32_t aoti_torch_device_type_cpu() {
146-
return 0; // DeviceType::CPU
147-
}
148-
149-
int32_t aoti_torch_device_type_cuda() {
150-
return 1; // DeviceType::CUDA
151-
}
152-
153-
// ============================================================
154-
// Grad Mode Functions - Implementations
155-
// ============================================================
156-
157-
bool aoti_torch_grad_mode_is_enabled() {
158-
// ExecuTorch doesn't support autograd
159-
return false;
160-
}
161-
162-
AOTITorchError aoti_torch_grad_mode_set_enabled(bool enabled) {
163-
if (enabled) {
164-
// ExecuTorch doesn't support autograd
165-
return Error::NotSupported;
166-
}
167-
return Error::Ok;
168-
}
169-
17067
} // extern "C"
68+
17169
} // namespace aoti
17270
} // namespace backends
17371
} // namespace executorch

backends/aoti/common_shims_slim.h

Lines changed: 1 addition & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -50,49 +50,8 @@ aoti_torch_get_dim(Tensor* tensor, int64_t* ret_dim);
5050

5151
AOTI_SHIM_EXPORT int32_t aoti_torch_layout_strided();
5252

53-
// ============================================================
54-
// Storage & Device Property Getters - Declarations
55-
// ============================================================
56-
57-
AOTI_SHIM_EXPORT AOTITorchError
58-
aoti_torch_get_storage_offset(Tensor* tensor, int64_t* ret_storage_offset);
59-
60-
AOTI_SHIM_EXPORT AOTITorchError
61-
aoti_torch_get_storage_size(Tensor* tensor, int64_t* ret_size);
62-
63-
AOTI_SHIM_EXPORT AOTITorchError
64-
aoti_torch_get_device_type(Tensor* tensor, int32_t* ret_device_type);
65-
66-
AOTI_SHIM_EXPORT AOTITorchError
67-
aoti_torch_get_device_index(Tensor* tensor, int32_t* ret_device_index);
68-
69-
// ============================================================
70-
// DType Constants - Declarations
71-
// ============================================================
72-
73-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_float32();
74-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_bfloat16();
75-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int64();
76-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int32();
77-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int16();
78-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_int8();
79-
AOTI_SHIM_EXPORT int32_t aoti_torch_dtype_bool();
80-
81-
// ============================================================
82-
// Device Type Constants - Declarations
83-
// ============================================================
84-
85-
AOTI_SHIM_EXPORT int32_t aoti_torch_device_type_cpu();
86-
AOTI_SHIM_EXPORT int32_t aoti_torch_device_type_cuda();
87-
88-
// ============================================================
89-
// Grad Mode Functions - Declarations
90-
// ============================================================
91-
92-
AOTI_SHIM_EXPORT bool aoti_torch_grad_mode_is_enabled();
93-
AOTI_SHIM_EXPORT AOTITorchError aoti_torch_grad_mode_set_enabled(bool enabled);
94-
9553
} // extern "C"
54+
9655
} // namespace aoti
9756
} // namespace backends
9857
} // namespace executorch

backends/aoti/slim/core/test/BUCK

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
load("@fbcode_macros//build_defs:build_file_migration.bzl", "fbcode_target", "non_fbcode_target")
2+
load("targets.bzl", "define_common_targets")
3+
4+
fbcode_target(_kind = define_common_targets,)

backends/aoti/slim/core/test/TARGETS

Lines changed: 0 additions & 3 deletions
This file was deleted.

backends/aoti/slim/core/test/test_slimtensor_copy.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
#include <executorch/backends/aoti/slim/core/slim_tensor.h>
1212
#include <executorch/backends/aoti/slim/core/storage.h>
13+
#include <executorch/backends/aoti/slim/factory/empty.h>
1314

1415
namespace executorch::backends::aoti::slim {
1516

backends/aoti/slim/cuda/BUCK

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
load("@fbcode_macros//build_defs:build_file_migration.bzl", "fbcode_target", "non_fbcode_target")
2+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
3+
load(":targets.bzl", "define_common_targets")
4+
5+
oncall("executorch")
6+
7+
fbcode_target(_kind = define_common_targets,)

backends/aoti/slim/cuda/TARGETS

Lines changed: 0 additions & 6 deletions
This file was deleted.

0 commit comments

Comments
 (0)