Skip to content

Commit 4cd00d9

Browse files
committed
fix(build): update llama.cpp build targets for improved compatibility
1 parent 257d796 commit 4cd00d9

1 file changed

Lines changed: 4 additions & 3 deletions

File tree

.github/workflows/main.yml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -309,10 +309,11 @@ jobs:
309309
run: |
310310
call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
311311
call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
312-
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target common"
313-
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target llama"
312+
set CUDAHOSTCXX=cl.exe
314313
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target ggml"
315-
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target ggml-base"
314+
#make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target common"
315+
#make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target llama"
316+
#make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target ggml-base"
316317
317318
- name: copy backend modules to dist
318319
if: false #matrix.name == 'windows-gpu'

0 commit comments

Comments
 (0)