Skip to content

Commit 94841bf

Browse files
committed
fix(build): update llama.cpp build commands for windows-gpu to include multiple targets
1 parent 0398836 commit 94841bf

1 file changed

Lines changed: 6 additions & 2 deletions

File tree

.github/workflows/main.yml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -386,10 +386,14 @@ jobs:
386386
- name: build llama.cpp main library (mingw)
387387
if: matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
388388
shell: msys2 {0}
389-
run: make build/llama.cpp.stamp ${{ matrix.make }}
389+
run: |
390+
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target common"
391+
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target llama"
392+
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target ggml"
393+
make build/llama.cpp.stamp ${{ matrix.make }} LLAMA_ARGS="--target ggml-base"
390394
391395
- name: copy backend modules to dist
392-
if: matrix.name == 'windows-gpu'
396+
if: false #matrix.name == 'windows-gpu'
393397
shell: msys2 {0}
394398
run: |
395399
mkdir -p dist

0 commit comments

Comments
 (0)