forked from pytorch/executorch
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathCMakeLists.txt
More file actions
263 lines (226 loc) · 7.66 KB
/
CMakeLists.txt
File metadata and controls
263 lines (226 loc) · 7.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
# Simple CMake build system for selective build demo.
#
# ### Editing this file ###
#
# This file should be formatted with
# ~~~
# cmake-format -i CMakeLists.txt
# ~~~
# It should also be cmake-lint clean.
#
cmake_minimum_required(VERSION 3.24) # 3.24 is required for WHOLE_ARCHIVE
project(llama_runner)
# Duplicating options as root CMakeLists.txt
option(EXECUTORCH_BUILD_KERNELS_OPTIMIZED "Build the optimized kernels" OFF)
include(CMakeDependentOption)
#
# pthreadpool: build pthreadpool library. Disable on unsupported platforms
#
cmake_dependent_option(
EXECUTORCH_BUILD_PTHREADPOOL "Build pthreadpool library." ON
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF
)
#
# cpuinfo: build cpuinfo library. Disable on unsupported platforms
#
cmake_dependent_option(
EXECUTORCH_BUILD_CPUINFO "Build cpuinfo library." ON
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF
)
option(EXECUTORCH_BUILD_KERNELS_TORCHAO_MPS "Build the torchao MPS kernels" OFF)
if(NOT PYTHON_EXECUTABLE)
set(PYTHON_EXECUTABLE python3)
endif()
set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
set(TORCH_ROOT ${EXECUTORCH_ROOT}/third-party/pytorch)
include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake)
if(NOT PYTHON_EXECUTABLE)
resolve_python_executable()
endif()
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
# Can't set to 11 due to executor_runner.cpp make_unique
endif()
if(CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$")
set(CMAKE_TOOLCHAIN_IOS ON)
else()
set(CMAKE_TOOLCHAIN_IOS OFF)
endif()
set(_common_compile_options -Wno-deprecated-declarations -fPIC)
# Let files say "include <executorch/path/to/header.h>".
set(_common_include_directories ${EXECUTORCH_ROOT}/..)
# For some reason android build is not able to find where gflags is and hence
# cannot find corresponding .cmake file
set(gflags_DIR ${CMAKE_CURRENT_BINARY_DIR}/../../../third-party/gflags)
find_package(gflags REQUIRED)
#
# llama_main: test binary to run llama, with tokenizer and sampler integrated
#
# find `executorch` libraries. CMAKE_PREFIX_PATH would work for host
# compilation, but CMAKE_FIND_ROOT_PATH appears to be necessary for
# cross-compiling (e.g., to Android) to work as well.
list(APPEND CMAKE_FIND_ROOT_PATH ${CMAKE_CURRENT_BINARY_DIR}/../../..)
find_package(executorch CONFIG REQUIRED FIND_ROOT_PATH_BOTH)
executorch_target_link_options_shared_lib(executorch)
# llama_runner library
add_subdirectory(runner)
set(link_libraries executorch gflags)
set(_srcs main.cpp)
if(TARGET optimized_native_cpu_ops_lib)
list(
APPEND
link_libraries
optimized_native_cpu_ops_lib
optimized_kernels
portable_kernels
cpublas
eigen_blas
)
executorch_target_link_options_shared_lib(optimized_native_cpu_ops_lib)
else()
list(APPEND link_libraries portable_ops_lib portable_kernels)
executorch_target_link_options_shared_lib(portable_ops_lib)
endif()
# quantized_ops_lib: Register quantized op kernels into the runtime
if(TARGET quantized_ops_lib)
list(APPEND link_libraries quantized_kernels quantized_ops_lib)
get_target_property(_quantized_imported quantized_ops_lib IMPORTED)
if(NOT _quantized_imported)
executorch_target_link_options_shared_lib(quantized_ops_lib)
endif()
endif()
if(TARGET custom_ops)
executorch_target_link_options_shared_lib(custom_ops)
list(APPEND link_libraries custom_ops)
endif()
if(TARGET torchao_ops_executorch)
executorch_target_link_options_shared_lib(torchao_ops_executorch)
list(APPEND link_libraries torchao_ops_executorch)
endif()
if(EXECUTORCH_BUILD_KERNELS_TORCHAO_MPS)
# Currently only enable this on Arm-based Macs
if(CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL
"arm64"
)
if(EXECUTORCH_BUILD_MPS)
add_subdirectory(
${CMAKE_CURRENT_SOURCE_DIR}/../../../third-party/ao/torchao/experimental/ops/mps
${CMAKE_CURRENT_BINARY_DIR}/../../../third-party/ao/torchao/experimental/ops/mps
)
executorch_target_link_options_shared_lib(torchao_ops_mps_executorch)
list(APPEND link_libraries torchao_ops_mps_executorch)
endif()
endif()
endif()
set(XNNPACK_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../backends/xnnpack)
# Extra compile option and include dir for pthreadpool
if(EXECUTORCH_BUILD_PTHREADPOOL)
list(APPEND link_libraries extension_threadpool pthreadpool)
list(APPEND _common_include_directories
${XNNPACK_ROOT}/third-party/pthreadpool/include
)
endif()
# Extra sources for cpuinfo
if(EXECUTORCH_BUILD_CPUINFO)
list(APPEND link_libraries extension_threadpool cpuinfo)
list(APPEND _common_include_directories
${XNNPACK_ROOT}/third-party/cpuinfo/include
)
endif()
# XNNPACK
if(TARGET xnnpack_backend)
set(xnnpack_backend_libs xnnpack_backend XNNPACK xnnpack-microkernels-prod)
if(TARGET kleidiai)
list(APPEND xnnpack_backend_libs kleidiai)
endif()
list(APPEND link_libraries ${xnnpack_backend_libs})
executorch_target_link_options_shared_lib(xnnpack_backend)
endif()
# CUDA backend
if(EXECUTORCH_BUILD_CUDA)
find_package(CUDAToolkit REQUIRED)
list(APPEND link_libraries aoti_cuda_backend)
if(NOT MSVC)
executorch_target_link_options_shared_lib(aoti_cuda_backend)
endif()
endif()
# Vulkan backend
if(TARGET vulkan_backend)
list(APPEND link_libraries vulkan_backend)
executorch_target_link_options_shared_lib(vulkan_backend)
endif()
# Qnn backend
if(TARGET qnn_executorch_backend)
list(APPEND link_libraries qnn_executorch_backend)
executorch_target_link_options_shared_lib(qnn_executorch_backend)
endif()
# MPS backend
if(TARGET mpsdelegate)
list(
APPEND
link_libraries
mpsdelegate
"-framework Foundation"
"-weak_framework MetalPerformanceShaders"
"-weak_framework MetalPerformanceShadersGraph"
"-weak_framework Metal"
)
executorch_target_link_options_shared_lib(mpsdelegate)
endif()
# MLX backend
if(TARGET mlxdelegate)
list(APPEND link_libraries mlxdelegate mlx)
executorch_target_link_options_shared_lib(mlxdelegate)
endif()
# Openvino backend
if(TARGET openvino_backend)
find_package(OpenVINO REQUIRED)
list(APPEND link_libraries openvino_backend)
executorch_target_link_options_shared_lib(openvino_backend)
endif()
if(TARGET coremldelegate)
find_library(SQLITE_LIBRARY sqlite3)
list(
APPEND
link_libraries
coremldelegate
sqlite3
"-framework Foundation"
"-framework CoreML"
"-framework Accelerate"
)
executorch_target_link_options_shared_lib(coremldelegate)
endif()
# This one is needed for cpuinfo where it uses android specific log lib
if(ANDROID)
list(APPEND link_libraries log)
endif()
add_executable(llama_main ${_srcs})
# Copy MLX metallib for runtime if MLX delegate is enabled
if(TARGET mlxdelegate)
executorch_target_copy_mlx_metallib(llama_main)
endif()
# Only strip symbols for Release and MinSizeRel builds.
if(CMAKE_BUILD_TYPE STREQUAL "Release" OR CMAKE_BUILD_TYPE STREQUAL
"MinSizeRel"
)
target_link_options_gc_sections(llama_main)
if(NOT APPLE)
target_link_options(llama_main PRIVATE "LINKER:-s")
endif()
endif()
target_include_directories(llama_main PUBLIC ${_common_include_directories})
target_link_libraries(llama_main PUBLIC llama_runner ${link_libraries})
target_compile_options(llama_main PUBLIC ${_common_compile_options})
if(APPLE)
target_link_options(llama_main PRIVATE -Wl,-rpath,@loader_path)
elseif(UNIX)
set_target_properties(llama_main PROPERTIES LINK_FLAGS "-Wl,-rpath='$ORIGIN'")
endif()
# Windows doesn't need rpath - DLLs are found via standard Windows search order