|
34 | 34 | QcomChipset, |
35 | 35 | QnnExecuTorchBackendType, |
36 | 36 | QnnExecuTorchHtpPerformanceMode, |
| 37 | + QnnExecuTorchLpaiTargetEnv, |
37 | 38 | QnnExecuTorchOpPackageOptions, |
38 | 39 | ) |
39 | 40 | from executorch.backends.qualcomm.utils.constants import ( |
40 | | - DSP_VERSION, |
41 | 41 | HEXAGON_SDK_ROOT, |
42 | 42 | HEXAGON_TOOLS_ROOT, |
43 | 43 | ) |
@@ -76,7 +76,7 @@ class QnnConfig: |
76 | 76 | backend (str): The target backend, such as htp, gpu, etc. QnnConfig will then parse this to type QnnExecuTorchBackendType. |
77 | 77 | soc_model (QcomChipset): The target Qualcomm System on Chip (SoC) model. |
78 | 78 | build_folder (str): Path to cmake binary directory for target platform, e.g., /path/to/build-android. |
79 | | - direct_build_folder (str): Path to cmake binary directory for direct_mode. E.g., path/to/build-hexagon. |
| 79 | + direct_build_folder (str): Path to cmake binary directory for direct_mode. E.g., path/to/build-direct. |
80 | 80 | target (str): Target platform for deployment. |
81 | 81 | online_prepare (bool): Compose QNN graph on device if set to True. |
82 | 82 | shared_buffer (bool): Enables usage of shared buffer(zero-copy mechanism) between application and backend for graph I/O during runtime. |
@@ -235,16 +235,14 @@ def __init__( |
235 | 235 | ) |
236 | 236 | self.runner = runner |
237 | 237 | if qnn_config.direct_build_folder: |
238 | | - required_env = [HEXAGON_SDK_ROOT, HEXAGON_TOOLS_ROOT, DSP_VERSION] |
| 238 | + required_env = [HEXAGON_SDK_ROOT, HEXAGON_TOOLS_ROOT] |
239 | 239 | assert all( |
240 | 240 | var in os.environ for var in required_env |
241 | 241 | ), f"Please ensure the following environment variables are set: {required_env}" |
242 | 242 | self.hexagon_sdk_root = os.getenv(HEXAGON_SDK_ROOT) |
243 | 243 | self.hexagon_tools_root = os.getenv(HEXAGON_TOOLS_ROOT) |
244 | | - self.dsp_arch = os.getenv(DSP_VERSION) |
245 | 244 | logging.info(f"{HEXAGON_SDK_ROOT}={self.hexagon_sdk_root}") |
246 | 245 | logging.info(f"{HEXAGON_TOOLS_ROOT}={self.hexagon_tools_root}") |
247 | | - logging.info(f"{DSP_VERSION}={self.dsp_arch}") |
248 | 246 | self.qnn_config = qnn_config |
249 | 247 | self.qnn_sdk = os.getenv("QNN_SDK_ROOT") |
250 | 248 | self.build_path = qnn_config.build_folder |
@@ -287,17 +285,25 @@ def __init__( |
287 | 285 | if self.direct_build_folder: |
288 | 286 | direct_general_artifacts = [ |
289 | 287 | f"{self.build_path}/examples/qualcomm/direct_executor_runner/libqnn_executorch_stub.so", |
290 | | - f"{self.direct_build_folder}/backends/qualcomm/libqnn_executorch_backend.so", |
291 | | - f"{self.direct_build_folder}/backends/qualcomm/qnn_executorch/direct_mode/libqnn_executorch_skel.so", |
292 | 288 | ] |
293 | 289 | self.backend_library_paths.update( |
294 | 290 | { |
295 | 291 | QnnExecuTorchBackendType.kHtpBackend: [ |
| 292 | + f"{self.direct_build_folder}/backends/qualcomm/libqnn_executorch_backend.so", |
| 293 | + f"{self.direct_build_folder}/backends/qualcomm/qnn_executorch/direct_mode/libqnn_executorch_skel.so", |
296 | 294 | f"{self.qnn_sdk}/lib/hexagon-v{self.htp_arch}/unsigned/libQnnHtpV{self.htp_arch}.so", |
297 | 295 | f"{self.qnn_sdk}/lib/hexagon-v{self.htp_arch}/unsigned/libQnnSystem.so", |
298 | 296 | f"{self.hexagon_tools_root}/Tools/target/hexagon/lib/v{self.htp_arch}/G0/pic/libc++abi.so.1", |
299 | 297 | f"{self.hexagon_tools_root}/Tools/target/hexagon/lib/v{self.htp_arch}/G0/pic/libc++.so.1", |
300 | | - ] |
| 298 | + ], |
| 299 | + QnnExecuTorchBackendType.kLpaiBackend: [ |
| 300 | + f"{self.qnn_sdk}/lib/lpai-v{self.lpai_hw_ver}/signed/libqnn_executorch_backend.so", |
| 301 | + f"{self.qnn_sdk}/lib/lpai-v{self.lpai_hw_ver}/signed/libqnn_executorch_skel.so", |
| 302 | + f"{self.qnn_sdk}/lib/lpai-v{self.lpai_hw_ver}/signed/libQnnLpai.so", |
| 303 | + f"{self.qnn_sdk}/lib/lpai-v{self.lpai_hw_ver}/signed/libQnnSystem.so", |
| 304 | + f"{self.qnn_sdk}/lib/lpai-v{self.lpai_hw_ver}/signed/libc++abi.so.1", |
| 305 | + f"{self.qnn_sdk}/lib/lpai-v{self.lpai_hw_ver}/signed/libc++.so.1", |
| 306 | + ], |
301 | 307 | } |
302 | 308 | ) |
303 | 309 | for _, library_paths in self.backend_library_paths.items(): |
@@ -378,6 +384,12 @@ def push( # noqa: C901 |
378 | 384 | # backend libraries |
379 | 385 | for backend in backends: |
380 | 386 | artifacts.extend(self.backend_library_paths[backend]) |
| 387 | + |
| 388 | + # Ensure that all necessary library artifacts exists. |
| 389 | + missing = [path for path in artifacts if not os.path.exists(path)] |
| 390 | + assert not missing, "Missing the following libraries:\n" + "\n".join( |
| 391 | + f" {p}" for p in missing |
| 392 | + ) |
381 | 393 | with tempfile.TemporaryDirectory() as tmp_dir: |
382 | 394 | input_list_file, input_files = generate_inputs( |
383 | 395 | tmp_dir, self.input_list_filename, inputs |
@@ -440,6 +452,13 @@ def execute( |
440 | 452 | ) |
441 | 453 | + self.extra_cmds |
442 | 454 | ) |
| 455 | + if self.qnn_config.direct_build_folder: |
| 456 | + qnn_executor_runner_args = " ".join( |
| 457 | + [ |
| 458 | + qnn_executor_runner_args, |
| 459 | + f"--domain_id {get_dsp_id(self.qnn_config.backend)}", |
| 460 | + ] |
| 461 | + ) |
443 | 462 | qnn_executor_runner_cmds = " ".join( |
444 | 463 | [ |
445 | 464 | f"cd {self.workspace} &&", |
@@ -526,7 +545,9 @@ def build_executorch_binary( |
526 | 545 | ): |
527 | 546 | raise RuntimeError("Currently LPAI backend only supports offline_prepare.") |
528 | 547 | backend_options = { |
529 | | - QnnExecuTorchBackendType.kLpaiBackend: generate_lpai_compiler_spec(), |
| 548 | + QnnExecuTorchBackendType.kLpaiBackend: generate_lpai_compiler_spec( |
| 549 | + target_env=get_lpai_target_env(qnn_config) |
| 550 | + ), |
530 | 551 | QnnExecuTorchBackendType.kGpuBackend: generate_gpu_compiler_spec(), |
531 | 552 | QnnExecuTorchBackendType.kHtpBackend: generate_htp_compiler_spec( |
532 | 553 | use_fp16=False if quant_dtype is not None else True, |
@@ -652,10 +673,31 @@ def make_quantizer( |
652 | 673 | return quantizer |
653 | 674 |
|
654 | 675 |
|
| 676 | +def get_lpai_target_env(qnn_config: QnnConfig): |
| 677 | + if qnn_config.enable_x86_64: |
| 678 | + return QnnExecuTorchLpaiTargetEnv.kX86 |
| 679 | + elif qnn_config.direct_build_folder: |
| 680 | + return QnnExecuTorchLpaiTargetEnv.kAdsp |
| 681 | + return QnnExecuTorchLpaiTargetEnv.kArm |
| 682 | + |
| 683 | + |
655 | 684 | def get_backend_type(backend: str): |
656 | 685 | return getattr(QnnExecuTorchBackendType, f"k{backend.title()}Backend") |
657 | 686 |
|
658 | 687 |
|
| 688 | +def get_dsp_id(backend): |
| 689 | + dsp_id_map = { |
| 690 | + QnnExecuTorchBackendType.kLpaiBackend: 0, |
| 691 | + QnnExecuTorchBackendType.kHtpBackend: 3, |
| 692 | + } |
| 693 | + if backend not in dsp_id_map: |
| 694 | + raise ValueError( |
| 695 | + f"Unsupported backend {backend} for direct mode. " |
| 696 | + f"Supported: {list(dsp_id_map.keys())}" |
| 697 | + ) |
| 698 | + return dsp_id_map[backend] |
| 699 | + |
| 700 | + |
659 | 701 | def setup_common_args_and_variables(): |
660 | 702 | parser = argparse.ArgumentParser() |
661 | 703 |
|
@@ -822,7 +864,7 @@ def setup_common_args_and_variables(): |
822 | 864 |
|
823 | 865 | parser.add_argument( |
824 | 866 | "--direct_build_folder", |
825 | | - help="Path to cmake binary directory for direct_mode. E.g., path/to/build-hexagon." |
| 867 | + help="Path to cmake binary directory for direct_mode. E.g., path/to/build-direct." |
826 | 868 | "If enabled, run self-defined protocol to control fastrpc communication.", |
827 | 869 | type=str, |
828 | 870 | ) |
|
0 commit comments