@@ -38,7 +38,6 @@ BUILD_DIR := build
3838DIST_DIR := dist
3939LLAMA_DIR := modules/llama.cpp
4040LLAMA_BUILD := $(LLAMA_DIR ) /build
41- GGML_PREFIX := $(BUILD_DIR ) /ggml
4241TEST_DIR := test
4342
4443# Version from header
@@ -153,6 +152,7 @@ else ifeq ($(PLATFORM),ios-sim)
153152 LDFLAGS := -dynamiclib -isysroot $(SDK ) -arch arm64 -arch x86_64 -miphonesimulator-version-min=14.0 -framework Security
154153endif
155154
155+ # Base llama.cpp cmake options (minimal build - no curl, httplib, server, rpc)
156156LLAMA_OPTIONS := $(LLAMA ) \
157157 -DCMAKE_BUILD_TYPE=Release \
158158 -DBUILD_SHARED_LIBS=OFF \
@@ -167,30 +167,22 @@ LLAMA_OPTIONS := $(LLAMA) \
167167# Conditional: Local embedding engine (llama.cpp)
168168ifeq ($(OMIT_LOCAL_ENGINE ) ,0)
169169 # Include llama.cpp
170- INCLUDES += -I$(GGML_PREFIX ) /include
170+ INCLUDES += -I$(LLAMA_DIR ) /include -I $( LLAMA_DIR ) /ggml /include
171171 C_SOURCES += $(SRC_DIR ) /dbmem-lembed.c
172172
173- # llama.cpp static libraries (Windows cmake doesn't add lib prefix to ggml libs)
174- ifeq ($(PLATFORM),windows)
175- LLAMA_LIBS := $(GGML_PREFIX ) /lib/libllama.a \
176- $(GGML_PREFIX ) /lib/ggml.a \
177- $(GGML_PREFIX ) /lib/ggml-base.a \
178- $(GGML_PREFIX ) /lib/ggml-cpu.a \
179- $(LLAMA_BUILD ) /common/libcommon.a
180- else
181- LLAMA_LIBS := $(GGML_PREFIX ) /lib/libllama.a \
182- $(GGML_PREFIX ) /lib/libggml.a \
183- $(GGML_PREFIX ) /lib/libggml-base.a \
184- $(GGML_PREFIX ) /lib/libggml-cpu.a \
185- $(LLAMA_BUILD ) /common/libcommon.a
186- endif
173+ # llama.cpp static libraries (base set)
174+ LLAMA_LIBS := $(LLAMA_BUILD ) /src/libllama.a \
175+ $(LLAMA_BUILD ) /ggml/src/libggml.a \
176+ $(LLAMA_BUILD ) /ggml/src/libggml-base.a \
177+ $(LLAMA_BUILD ) /ggml/src/libggml-cpu.a \
178+ $(LLAMA_BUILD ) /common/libcommon.a
187179
188180 # Platform-specific llama.cpp settings
189181 ifeq ($(PLATFORM),macos)
190182 LLAMA_OPTIONS += -DGGML_NATIVE=OFF -DGGML_OPENMP=OFF -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
191183 # Add Metal and BLAS libraries for macOS (cmake auto-detects and builds these)
192- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-metal.a
193- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-blas.a
184+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-metal /libggml-metal.a
185+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-blas /libggml-blas.a
194186 ifeq ($(ARCH),x86_64)
195187 LLAMA_OPTIONS += -DCMAKE_OSX_ARCHITECTURES="x86_64"
196188 else ifeq ($(ARCH),arm64)
@@ -228,24 +220,24 @@ ifeq ($(OMIT_LOCAL_ENGINE),0)
228220 else ifeq ($(PLATFORM),ios)
229221 LLAMA_OPTIONS += -DGGML_NATIVE=OFF -DGGML_OPENMP=OFF -DCMAKE_SYSTEM_NAME=iOS -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0
230222 # Add Metal and BLAS libraries for iOS
231- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-metal.a
232- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-blas.a
223+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-metal /libggml-metal.a
224+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-blas /libggml-blas.a
233225 LDFLAGS := -dynamiclib -isysroot $(SDK ) -arch arm64 -miphoneos-version-min=14.0 \
234226 -framework Metal -framework Foundation -framework Accelerate -framework CoreFoundation -framework Security \
235227 -ldl -lpthread -lm -headerpad_max_install_names
236228 else ifeq ($(PLATFORM),ios-sim)
237229 LLAMA_OPTIONS += -DGGML_NATIVE=OFF -DGGML_OPENMP=OFF -DCMAKE_SYSTEM_NAME=iOS -DCMAKE_OSX_SYSROOT=iphonesimulator -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 '-DCMAKE_OSX_ARCHITECTURES=x86_64;arm64'
238230 # Add Metal and BLAS libraries for iOS simulator
239- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-metal.a
240- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-blas.a
231+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-metal /libggml-metal.a
232+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-blas /libggml-blas.a
241233 LDFLAGS := -dynamiclib -isysroot $(SDK ) -arch arm64 -arch x86_64 -miphonesimulator-version-min=14.0 \
242234 -framework Metal -framework Foundation -framework Accelerate -framework CoreFoundation -framework Security \
243235 -ldl -lpthread -lm -headerpad_max_install_names
244236 endif
245237
246238 # Backend-specific libraries (detected from LLAMA cmake flags for explicit overrides)
247239 ifneq (,$(findstring GGML_VULKAN=ON,$(LLAMA)))
248- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-vulkan.a
240+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-vulkan /libggml-vulkan.a
249241 ifeq ($(PLATFORM),windows)
250242 ifdef VULKAN_SDK
251243 LDFLAGS += -L$(VULKAN_SDK ) /lib -lvulkan-1
@@ -261,7 +253,7 @@ ifeq ($(OMIT_LOCAL_ENGINE),0)
261253 endif
262254 endif
263255 ifneq (,$(findstring GGML_OPENCL=ON,$(LLAMA)))
264- LLAMA_LIBS += $(GGML_PREFIX ) /lib /libggml-opencl.a
256+ LLAMA_LIBS += $(LLAMA_BUILD ) /ggml/src/ggml-opencl /libggml-opencl.a
265257 LDFLAGS += -lOpenCL
266258 endif
267259
@@ -303,24 +295,17 @@ extension: $(BUILD_DEPS) $(TARGET)
303295
304296# Build llama.cpp (only if not omitted)
305297.PHONY : llama
306- llama : $(GGML_PREFIX ) /lib /libllama.a
298+ llama : $(LLAMA_BUILD ) /src /libllama.a
307299
308- $(GGML_PREFIX ) /lib /libllama.a :
300+ $(LLAMA_BUILD ) /src /libllama.a :
309301 @echo " Building llama.cpp with options: $( LLAMA_OPTIONS) "
310- @mkdir -p $(LLAMA_BUILD ) $( GGML_PREFIX )
302+ @mkdir -p $(LLAMA_BUILD )
311303 cmake -B $(LLAMA_BUILD ) $(LLAMA_OPTIONS ) $(LLAMA_DIR )
312304 cmake --build $(LLAMA_BUILD ) --config Release -j$(CPUS )
313- cmake --install $(LLAMA_BUILD ) --prefix $(GGML_PREFIX )
314305 @echo " llama.cpp build complete"
315306
316- # All LLAMA_LIBS are installed by cmake --install (with or without lib prefix depending on platform)
317- $(GGML_PREFIX ) /lib/libggml.a $(GGML_PREFIX ) /lib/libggml-base.a $(GGML_PREFIX ) /lib/libggml-cpu.a $(GGML_PREFIX ) /lib/libggml-metal.a $(GGML_PREFIX ) /lib/libggml-blas.a $(GGML_PREFIX ) /lib/libggml-vulkan.a $(GGML_PREFIX ) /lib/libggml-opencl.a : $(GGML_PREFIX ) /lib/libllama.a
318- @:
319- $(GGML_PREFIX ) /lib/ggml.a $(GGML_PREFIX ) /lib/ggml-base.a $(GGML_PREFIX ) /lib/ggml-cpu.a : $(GGML_PREFIX ) /lib/libllama.a
320- @:
321-
322- # libcommon.a is not installed, reference it from build dir
323- $(LLAMA_BUILD ) /common/libcommon.a : $(GGML_PREFIX ) /lib/libllama.a
307+ # All LLAMA_LIBS are built by the same cmake command as libllama.a
308+ $(LLAMA_BUILD ) /ggml/src/libggml.a $(LLAMA_BUILD ) /ggml/src/libggml-base.a $(LLAMA_BUILD ) /ggml/src/libggml-cpu.a $(LLAMA_BUILD ) /common/libcommon.a $(LLAMA_BUILD ) /ggml/src/ggml-metal/libggml-metal.a $(LLAMA_BUILD ) /ggml/src/ggml-blas/libggml-blas.a $(LLAMA_BUILD ) /ggml/src/ggml-vulkan/libggml-vulkan.a $(LLAMA_BUILD ) /ggml/src/ggml-opencl/libggml-opencl.a : $(LLAMA_BUILD ) /src/libllama.a
324309 @:
325310
326311# Create directories
0 commit comments