@@ -104,8 +104,8 @@ jobs:
104104 - os : windows-latest
105105 arch : x86_64
106106 name : windows-gpu
107- make : LLAMA="-DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_CUDA=ON"
108- make-cuda : LLAMA_ARGS="--target ggml-cuda" LLAMA="-G \"Ninja Multi-Config\" -DGGML_BACKEND_DL=ON - DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_CUDA=ON -DGGML_HIP=ON -DGGML_SYCL =ON"
107+ make : LLAMA="-G \"Ninja Multi-Config\" - DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_CUDA=ON -DGGML_HIP=ON -DGGML_SYCL =ON"
108+ make-cuda : LLAMA_ARGS="--target ggml-cuda" LLAMA="-G \"Ninja Multi-Config\" -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_CUDA=ON"
109109 make-hip : LLAMA_ARGS="--target ggml-hip" LLAMA="-G \"Ninja\" -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_HIP=ON -DGGML_HIP_ROCWMMA_FATTN=ON -DAMDGPU_TARGETS=\"gfx1100;gfx1101;gfx1102;gfx1030;gfx1031;gfx1032\" -DCMAKE_C_COMPILER=\"%HIP_PATH%/bin/clang.exe\" -DCMAKE_CXX_COMPILER=\"%HIP_PATH%/bin/clang++.exe\" -DCMAKE_CXX_FLAGS=\"-I../../rocwmma/library/include/ -Wno-ignored-attributes -Wno-nested-anon-types\""
110110 make-sycl : LLAMA_ARGS="--target ggml-sycl" LLAMA="-G \"Ninja\" -DGGML_BACKEND_DL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DGGML_NATIVE=OFF -DGGML_CPU=OFF -DGGML_SYCL=ON"
111111
@@ -338,14 +338,14 @@ jobs:
338338 key : miniaudio-${{ matrix.name }}-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.submodule-hashes.outputs.miniaudio }}-${{ hashFiles('modules/miniaudio/**') }}-${{ steps.submodule-hashes.outputs.make }}
339339
340340 - name : build ggml-sycl
341- if : matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
341+ if : false # matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
342342 shell : cmd
343343 run : |
344344 call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
345345 make build/llama.cpp.stamp ${{ matrix.make-sycl }}
346346
347347 - name : build ggml-hip (msvc)
348- if : matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
348+ if : false # matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
349349 shell : cmd
350350 run : |
351351 call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
@@ -355,7 +355,7 @@ jobs:
355355 make build/llama.cpp.stamp ${{ matrix.make-hip }}
356356
357357 - name : build ggml-cuda (msvc)
358- if : matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
358+ if : false # matrix.name == 'windows-gpu' && steps.cache-llama.outputs.cache-hit != 'true'
359359 shell : cmd
360360 run : |
361361 call "C:\Program Files\Microsoft Visual Studio\2022\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
0 commit comments