Skip to content

Commit 102d9fa

Browse files
Yolo26 example
1 parent fa81941 commit 102d9fa

10 files changed

Lines changed: 114 additions & 156 deletions

File tree

Lines changed: 20 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@
66
# LICENSE file in the root directory of this source tree.
77

88
set -ex
9+
10+
# NOTE: Run .ci/scripts/setup-openvino.sh before this script to download and
11+
# install OpenVINO when using the openvino backend.
12+
913
# shellcheck source=/dev/null
1014
source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
1115

@@ -50,21 +54,21 @@ PT2E_QUANTIZE="${PT2E_QUANTIZE:-}"
5054
# Default CMake Build Type to release mode
5155
CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release}
5256

53-
if [[ $# -lt 5 ]]; then # Assuming 4 mandatory args
54-
echo "Expecting atleast 5 positional arguments"
55-
echo "Usage: [...]"
56-
fi
5757
if [[ -z "${MODEL_NAME:-}" ]]; then
5858
echo "Missing model name, exiting..."
5959
exit 1
6060
fi
6161

62-
6362
if [[ -z "${MODE:-}" ]]; then
6463
echo "Missing mode, choose openvino or xnnpack, exiting..."
6564
exit 1
6665
fi
6766

67+
if [[ -z "${VIDEO_PATH:-}" ]]; then
68+
echo "Missing video path, exiting..."
69+
exit 1
70+
fi
71+
6872
if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
6973
PYTHON_EXECUTABLE=python3
7074
fi
@@ -75,21 +79,10 @@ if [[ "${MODE}" =~ .*openvino.* ]]; then
7579
OPENVINO=ON
7680
TARGET_LIBS="$TARGET_LIBS openvino_backend "
7781

78-
git clone https://github.com/openvinotoolkit/openvino.git
79-
cd openvino && git b16b776ac119dafda51f69a80f1e6b7376d02c3b
80-
git submodule update --init --recursive
81-
sudo ./install_build_dependencies.sh
82-
mkdir build && cd build
83-
cmake .. -DCMAKE_BUILD_TYPE=Release -DENABLE_PYTHON=ON
84-
make -j$(nproc)
85-
86-
cd ..
87-
cmake --install build --prefix dist
88-
89-
source dist/setupvars.sh
90-
cd ../backends/openvino
91-
pip install -r requirements.txt
92-
cd ../../
82+
# Use existing openvino installation from the root directory
83+
# setup-openvino.sh extracts OpenVINO into openvino/
84+
source openvino/setupvars.sh
85+
pip install -r backends/openvino/requirements.txt
9386
else
9487
OPENVINO=OFF
9588
fi
@@ -104,8 +97,8 @@ fi
10497
which "${PYTHON_EXECUTABLE}"
10598

10699

107-
DIR="examples/models/yolo12"
108-
$PYTHON_EXECUTABLE -m pip install -r ${DIR}/requirements.txt
100+
DIR="examples/models/yolo26"
101+
$PYTHON_EXECUTABLE -m pip install --upgrade-strategy only-if-needed -r ${DIR}/requirements.txt
109102

110103
cmake_install_executorch_libraries() {
111104
rm -rf cmake-out
@@ -146,7 +139,7 @@ cmake_install_executorch_libraries() {
146139
}
147140

148141
cmake_build_demo() {
149-
echo "Building yolo12 runner"
142+
echo "Building yolo26 runner"
150143
retry cmake \
151144
-DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \
152145
-DUSE_OPENVINO_BACKEND="$OPENVINO" \
@@ -181,17 +174,17 @@ EXPORT_ARGS="--model_name=${MODEL_NAME} --backend=${MODE}"
181174
# Add dynamically linked library location
182175
cmake_install_executorch_libraries
183176

184-
$PYTHON_EXECUTABLE -m examples.models.yolo12.export_and_validate ${EXPORT_ARGS}
177+
$PYTHON_EXECUTABLE -m examples.models.yolo26.export_and_validate ${EXPORT_ARGS}
185178

186179

187180
RUNTIME_ARGS="--model_path=${EXPORTED_MODEL_NAME} --input_path=${VIDEO_PATH}"
188181
# Check build tool.
189182
cmake_build_demo
190-
# Run yolo12 runner
183+
# Run yolo26 runner
191184
NOW=$(date +"%H:%M:%S")
192-
echo "Starting to run yolo12 runner at ${NOW}"
185+
echo "Starting to run yolo26 runner at ${NOW}"
193186
# shellcheck source=/dev/null
194-
cmake-out/examples/models/yolo12/Yolo12DetectionDemo ${RUNTIME_ARGS} > result.txt
187+
cmake-out/examples/models/yolo26/Yolo26DetectionDemo ${RUNTIME_ARGS} > result.txt
195188
NOW=$(date +"%H:%M:%S")
196189
echo "Finished at ${NOW}"
197190

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -204,7 +204,7 @@ ExecuTorch powers on-device AI at scale across Meta's family of apps, VR/AR devi
204204

205205
**Multimodal:** [Llava](examples/models/llava/README.md) (vision-language), [Voxtral](examples/models/voxtral/README.md) (audio-language), [Gemma](examples/models/gemma3) (vision-language)
206206

207-
**Vision/Speech:** [MobileNetV2](https://github.com/meta-pytorch/executorch-examples/tree/main/mv2), [DeepLabV3](https://github.com/meta-pytorch/executorch-examples/tree/main/dl3), [Whisper](examples/models/whisper/README.md) <!-- @lint-ignore -->
207+
**Vision/Speech:** [MobileNetV2](https://github.com/meta-pytorch/executorch-examples/tree/main/mv2), [DeepLabV3](https://github.com/meta-pytorch/executorch-examples/tree/main/dl3), [YOLO26](examples/models/yolo26/README.md), [Whisper](examples/models/whisper/README.md) <!-- @lint-ignore -->
208208

209209
**Resources:** [`examples/`](examples/) directory • [executorch-examples](https://github.com/meta-pytorch/executorch-examples) out-of-tree demos • [Optimum-ExecuTorch](https://github.com/huggingface/optimum-executorch) for HuggingFace models • [Unsloth](https://docs.unsloth.ai/new/deploy-llms-phone) for fine-tuned LLM deployment <!-- @lint-ignore -->
210210

backends/openvino/quantizer/quantizer.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,8 @@ def set_ignored_scope(
192192
:param validate: If set to True, then a RuntimeError will be raised if any ignored scope does not match
193193
in the model graph.
194194
"""
195+
if subgraphs:
196+
subgraphs = [nncf.Subgraph(inputs=subgraph[0], outputs=subgraph[1]) for subgraph in subgraphs]
195197
self._algo.set_ignored_scope(
196198
nncf.IgnoredScope(
197199
names=names or [],

examples/models/yolo12/requirements.txt

Lines changed: 0 additions & 2 deletions
This file was deleted.
Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
cmake_minimum_required(VERSION 3.5)
22

3-
project(Yolo12DetectionDemo VERSION 0.1)
3+
project(Yolo26DetectionDemo VERSION 0.1)
44

55
option(USE_OPENVINO_BACKEND "Build the tutorial with the OPENVINO backend" OFF)
66
option(USE_XNNPACK_BACKEND "Build the tutorial with the XNNPACK backend" OFF)
@@ -63,14 +63,14 @@ set(PROJECT_SOURCES
6363
${EXECUTORCH_ROOT}/extension/runner_util/inputs_portable.cpp
6464
)
6565

66-
add_executable(Yolo12DetectionDemo ${PROJECT_SOURCES})
66+
add_executable(Yolo26DetectionDemo ${PROJECT_SOURCES})
6767
target_link_libraries(
68-
Yolo12DetectionDemo PUBLIC ${link_libraries} ${OpenCV_LIBS} executorch_core
68+
Yolo26DetectionDemo PUBLIC ${link_libraries} ${OpenCV_LIBS} executorch_core
6969
extension_module extension_tensor
7070
)
7171

7272
find_package(Threads REQUIRED)
73-
target_link_libraries(Yolo12DetectionDemo PRIVATE Threads::Threads)
73+
target_link_libraries(Yolo26DetectionDemo PRIVATE Threads::Threads)
7474
target_include_directories(
75-
Yolo12DetectionDemo PUBLIC ${_common_include_directories}
75+
Yolo26DetectionDemo PUBLIC ${_common_include_directories}
7676
)
Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,10 @@
1-
# YOLO12 Detection C++ Inference with ExecuTorch
1+
# YOLO26 Detection C++ Inference with ExecuTorch
22

3-
This example demonstrates how to perform inference of [YOLO12 family](https://docs.ultralytics.com/models/yolo12/) detection models in C++ leveraging the Executorch backends:
3+
This example demonstrates how to perform inference of [YOLO26 family](https://docs.ultralytics.com/models/yolo26/) detection models in C++ leveraging the Executorch backends:
44

55
- [OpenVINO](../../../backends/openvino/README.md)
66
- [XNNPACK](../../../backends/xnnpack/README.md)
77

8-
## Performance Evaluation
9-
10-
| CPU | Model | Backend | Device | Precision | Average Latency, ms |
11-
|--------------------------------|---------|----------|--------|-----------|---------------------|
12-
| Intel(R) Core(TM) Ultra 7 155H | yolo12s | openvino | CPU | FP32 | 88.3549 |
13-
| Intel(R) Core(TM) Ultra 7 155H | yolo12s | openvino | CPU | INT8 | 53.066 |
14-
| Intel(R) Core(TM) Ultra 7 155H | yolo12l | openvino | CPU | FP32 | 317.953 |
15-
| Intel(R) Core(TM) Ultra 7 155H | yolo12l | openvino | CPU | INT8 | 150.846 |
16-
| Intel(R) Core(TM) Ultra 7 155H | yolo12s | openvino | GPU | FP32 | 32.71 |
17-
| Intel(R) Core(TM) Ultra 7 155H | yolo12l | openvino | GPU | FP32 | 70.885 |
18-
| Intel(R) Core(TM) Ultra 7 155H | yolo12s | xnnpack | CPU | FP32 | 169.36 |
19-
| Intel(R) Core(TM) Ultra 7 155H | yolo12l | xnnpack | CPU | FP32 | 436.876 |
208

219
## Instructions
2210

@@ -38,32 +26,32 @@ The demo requires the `ultralytics` package, which depends on `torch` and `torch
3826
python -m pip install --upgrade-strategy only-if-needed -r requirements.txt
3927
```
4028

41-
### Step 4: Export the YOLO12 model to the ExecuTorch
29+
### Step 4: Export the YOLO26 model to the ExecuTorch
4230

4331
OpenVINO:
4432

4533
```bash
46-
python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend openvino --device CPU
34+
python export_and_validate.py --model_name yolo26s --input_dims=[1920,1080] --backend openvino --device CPU
4735
```
4836

4937
OpenVINO quantized model:
5038

5139
```bash
52-
python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend openvino --quantize --video_path /path/to/calibration/video --device CPU
40+
python export_and_validate.py --model_name yolo26s --input_dims=[1920,1080] --backend openvino --quantize --video_path /path/to/calibration/video --device CPU
5341
```
5442

5543
XNNPACK:
5644

5745
```bash
58-
python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend xnnpack
46+
python export_and_validate.py --model_name yolo26s --input_dims=[1920,1080] --backend xnnpack
5947
```
6048

6149
> **_NOTE:_** Quantization for XNNPACK backend is WIP. Please refere to <https://github.com/pytorch/executorch/issues/11523> for more details.
6250
6351
Exported model could be validated using the `--validate` key:
6452

6553
```bash
66-
python export_and_validate.py --model_name yolo12s --backend ... --validate dataset_name.yaml
54+
python export_and_validate.py --model_name yolo26s --backend ... --validate dataset_name.yaml
6755
```
6856

6957
A list of available datasets and instructions on how to use a custom dataset can be found [here](https://docs.ultralytics.com/datasets/detect/).
@@ -80,7 +68,7 @@ python export_and_validate.py --help
8068
OpenVINO:
8169

8270
```bash
83-
cd examples/models/yolo12
71+
cd examples/models/yolo26
8472
mkdir build && cd build
8573
cmake -DCMAKE_BUILD_TYPE=Release -DUSE_OPENVINO_BACKEND=ON ..
8674
make -j$(nproc)
@@ -89,7 +77,7 @@ make -j$(nproc)
8977
XNNPACK:
9078

9179
```bash
92-
cd examples/models/yolo12
80+
cd examples/models/yolo26
9381
mkdir build && cd build
9482
cmake -DCMAKE_BUILD_TYPE=Release -DUSE_XNNPACK_BACKEND=ON ..
9583
make -j$(nproc)
@@ -98,13 +86,13 @@ make -j$(nproc)
9886
### Step 6: Run the demo
9987

10088
```bash
101-
./build/Yolo12DetectionDemo -model_path /path/to/exported/model -input_path /path/to/video/file -output_path /path/to/output/annotated/video
89+
./build/Yolo26DetectionDemo -model_path /path/to/exported/model -input_path /path/to/video/file -output_path /path/to/output/annotated/video
10290
```
10391

10492
To get a full parameters description please use the following command:
10593

10694
```bash
107-
./build/Yolo12DetectionDemo --help
95+
./build/Yolo26DetectionDemo --help
10896
```
10997

11098
## Credits

examples/models/yolo12/export_and_validate.py renamed to examples/models/yolo26/export_and_validate.py

Lines changed: 42 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -76,52 +76,50 @@ def lower_to_openvino(
7676
subset_size: int,
7777
quantize: bool,
7878
) -> ExecutorchProgramManager:
79-
# Import openvino locally to avoid nncf side-effects
80-
import nncf.torch
8179
from executorch.backends.openvino.partitioner import OpenvinoPartitioner
8280
from executorch.backends.openvino.quantizer import OpenVINOQuantizer
8381
from executorch.backends.openvino.quantizer.quantizer import QuantizationMode
82+
import nncf
8483
from nncf.experimental.torch.fx import quantize_pt2e
8584

86-
with nncf.torch.disable_patching():
87-
if quantize:
88-
target_input_dims = tuple(example_args[0].shape[2:])
89-
90-
def ext_transform_fn(sample):
91-
sample = transform_fn(sample)
92-
return pad_to_target(sample, target_input_dims)
85+
if quantize:
86+
target_input_dims = tuple(example_args[0].shape[2:])
9387

94-
quantizer = OpenVINOQuantizer(mode=QuantizationMode.INT8_TRANSFORMER)
95-
quantizer.set_ignored_scope(
96-
types=["mul", "sub", "sigmoid", "__getitem__"],
97-
)
98-
quantized_model = quantize_pt2e(
99-
aten_dialect.module(),
100-
quantizer,
101-
nncf.Dataset(calibration_dataset, ext_transform_fn),
102-
subset_size=subset_size,
103-
smooth_quant=True,
104-
fold_quantize=False,
105-
)
88+
def ext_transform_fn(sample):
89+
sample = transform_fn(sample)
90+
return pad_to_target(sample, target_input_dims)
10691

107-
aten_dialect = torch.export.export(quantized_model, example_args)
108-
# Convert to edge dialect and lower the module to the backend with a custom partitioner
109-
compile_spec = [CompileSpec("device", device.encode())]
110-
lowered_module: EdgeProgramManager = to_edge_transform_and_lower(
111-
aten_dialect,
112-
partitioner=[
113-
OpenvinoPartitioner(compile_spec),
114-
],
115-
compile_config=EdgeCompileConfig(
116-
_skip_dim_order=True,
117-
),
92+
quantizer = OpenVINOQuantizer(mode=QuantizationMode.INT8_MIXED)
93+
quantizer.set_ignored_scope(
94+
subgraphs=[(["detach", "detach_1", "detach_2"], ["output"])]
11895
)
119-
120-
# Apply backend-specific passes
121-
return lowered_module.to_executorch(
122-
config=executorch.exir.ExecutorchBackendConfig()
96+
quantized_model = quantize_pt2e(
97+
aten_dialect.module(),
98+
quantizer,
99+
nncf.Dataset(calibration_dataset, ext_transform_fn),
100+
subset_size=subset_size,
101+
fast_bias_correction=True,
102+
fold_quantize=False,
123103
)
124104

105+
aten_dialect = torch.export.export(quantized_model, example_args)
106+
# Convert to edge dialect and lower the module to the backend with a custom partitioner
107+
compile_spec = [CompileSpec("device", device.encode())]
108+
lowered_module: EdgeProgramManager = to_edge_transform_and_lower(
109+
aten_dialect,
110+
partitioner=[
111+
OpenvinoPartitioner(compile_spec),
112+
],
113+
compile_config=EdgeCompileConfig(
114+
_skip_dim_order=True,
115+
),
116+
)
117+
118+
# Apply backend-specific passes
119+
return lowered_module.to_executorch(
120+
config=executorch.exir.ExecutorchBackendConfig()
121+
)
122+
125123

126124
def lower_to_xnnpack(
127125
aten_dialect: ExportedProgram,
@@ -208,7 +206,7 @@ def main(
208206
Main function to load, quantize, and export an Yolo model model.
209207
210208
:param model_name: The name of the YOLO model to load.
211-
:param input_dims: Input dims to use for the export of a YOLO12 model.
209+
:param input_dims: Input dims to use for the export of a YOLO26 model.
212210
:param quantize: Whether to quantize the model.
213211
:param video_path: Path to the video to use for the calibration
214212
:param subset_size: Subset size for the quantized model calibration. The default value is 300.
@@ -272,7 +270,7 @@ def transform_fn(frame):
272270
if input_dims != [640, 640]:
273271
raise NotImplementedError(
274272
f"Validation with the custom input shape {input_dims} is not implmenented."
275-
" Please use the default --input_dims=[640, 640] for the validation."
273+
" Please use the default --input_dims=[640,640] for the validation."
276274
)
277275
stats = validate_yolo(model, exec_prog, val_dataset_yaml_path)
278276
for stat, value in stats.items():
@@ -290,10 +288,13 @@ def _prepare_validation(
290288
} # highest priority args on the right
291289

292290
validator = model._smart_load("validator")(args=args, _callbacks=model.callbacks)
291+
validator.device = torch.device("cpu")
293292
stride = 32 # default stride
294293
validator.stride = stride # used in get_dataloader() for padding
295294
validator.data = check_det_dataset(dataset_yaml_path)
296295
validator.init_metrics(unwrap_model(model))
296+
validator.device = torch.device("cpu")
297+
validator.end2end = False
297298

298299
data_loader = validator.get_dataloader(
299300
validator.data.get(validator.args.split), validator.args.batch
@@ -337,9 +338,9 @@ def validate_yolo(
337338
parser.add_argument(
338339
"--model_name",
339340
type=str,
340-
default="yolo12s",
341-
choices=["yolo12n", "yolo12s", "yolo12m", "yolo12l", "yolo12x"],
342-
help="Ultralytics yolo12 model name.",
341+
default="yolo26s",
342+
choices=["yolo26n", "yolo26s", "yolo26m", "yolo26l", "yolo26x"],
343+
help="Ultralytics yolo26 model name.",
343344
)
344345
parser.add_argument(
345346
"--input_dims",

0 commit comments

Comments
 (0)