@@ -170,12 +170,20 @@ ifeq ($(OMIT_LOCAL_ENGINE),0)
170170 INCLUDES += -I$(GGML_PREFIX ) /include
171171 C_SOURCES += $(SRC_DIR ) /dbmem-lembed.c
172172
173- # llama.cpp static libraries
174- LLAMA_LIBS := $(GGML_PREFIX ) /lib/libllama.a \
175- $(GGML_PREFIX ) /lib/libggml.a \
176- $(GGML_PREFIX ) /lib/libggml-base.a \
177- $(GGML_PREFIX ) /lib/libggml-cpu.a \
178- $(LLAMA_BUILD ) /common/libcommon.a
173+ # llama.cpp static libraries (Windows cmake doesn't add lib prefix to ggml libs)
174+ ifeq ($(PLATFORM),windows)
175+ LLAMA_LIBS := $(GGML_PREFIX ) /lib/libllama.a \
176+ $(GGML_PREFIX ) /lib/ggml.a \
177+ $(GGML_PREFIX ) /lib/ggml-base.a \
178+ $(GGML_PREFIX ) /lib/ggml-cpu.a \
179+ $(LLAMA_BUILD ) /common/libcommon.a
180+ else
181+ LLAMA_LIBS := $(GGML_PREFIX ) /lib/libllama.a \
182+ $(GGML_PREFIX ) /lib/libggml.a \
183+ $(GGML_PREFIX ) /lib/libggml-base.a \
184+ $(GGML_PREFIX ) /lib/libggml-cpu.a \
185+ $(LLAMA_BUILD ) /common/libcommon.a
186+ endif
179187
180188 # Platform-specific llama.cpp settings
181189 ifeq ($(PLATFORM),macos)
@@ -305,9 +313,11 @@ $(GGML_PREFIX)/lib/libllama.a:
305313 cmake --install $(LLAMA_BUILD ) --prefix $(GGML_PREFIX )
306314 @echo " llama.cpp build complete"
307315
308- # All LLAMA_LIBS are installed by cmake --install
316+ # All LLAMA_LIBS are installed by cmake --install (with or without lib prefix depending on platform)
309317$(GGML_PREFIX ) /lib/libggml.a $(GGML_PREFIX ) /lib/libggml-base.a $(GGML_PREFIX ) /lib/libggml-cpu.a $(GGML_PREFIX ) /lib/libggml-metal.a $(GGML_PREFIX ) /lib/libggml-blas.a $(GGML_PREFIX ) /lib/libggml-vulkan.a $(GGML_PREFIX ) /lib/libggml-opencl.a : $(GGML_PREFIX ) /lib/libllama.a
310318 @:
319+ $(GGML_PREFIX ) /lib/ggml.a $(GGML_PREFIX ) /lib/ggml-base.a $(GGML_PREFIX ) /lib/ggml-cpu.a : $(GGML_PREFIX ) /lib/libllama.a
320+ @:
311321
312322# libcommon.a is not installed, reference it from build dir
313323$(LLAMA_BUILD ) /common/libcommon.a : $(GGML_PREFIX ) /lib/libllama.a
0 commit comments