-
Notifications
You must be signed in to change notification settings - Fork 13
471 lines (412 loc) · 21.7 KB
/
main.yml
File metadata and controls
471 lines (412 loc) · 21.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
name: build, test and release sqlite-ai
on:
push:
workflow_dispatch:
permissions:
contents: write
jobs:
build:
runs-on: ${{ matrix.os }}
name: ${{ matrix.name }}${{ matrix.arch && format('-{0}', matrix.arch) || '' }} build${{ matrix.arch != 'arm64-v8a' && matrix.name != 'isim' && matrix.name != 'ios' && ' + test' || ''}}
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
arch: x86_64
name: linux
make: LLAMA="-DGGML_NATIVE=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DGGML_VULKAN=ON -DGGML_OPENCL=ON -DGGML_LLAMAFILE=ON" WHISPER="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DGGML_VULKAN=ON -DGGML_OPENCL=ON -DGGML_LLAMAFILE=ON"
- os: LinuxARM64
arch: arm64
name: linux
make: LLAMA="-DGGML_NATIVE=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DGGML_VULKAN=ON -DGGML_OPENCL=ON -DGGML_LLAMAFILE=ON" WHISPER="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DGGML_VULKAN=ON -DGGML_OPENCL=ON -DGGML_LLAMAFILE=ON"
- os: macos-latest
name: macos
make: LLAMA="-DGGML_NATIVE=ON -DGGML_METAL=ON -DGGML_ACCELERATE=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Apple -DGGML_LLAMAFILE=ON" WHISPER="-DGGML_METAL=ON -DGGML_ACCELERATE=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Apple -DWHISPER_COREML=ON -DWHISPER_COREML_ALLOW_FALLBACK=ON"
- os: windows-latest
arch: x86_64
name: windows
make: LLAMA="-DGGML_NATIVE=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DGGML_VULKAN=ON -DGGML_OPENCL=ON -DGGML_LLAMAFILE=ON" WHISPER="-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DGGML_VULKAN=ON -DGGML_OPENCL=ON -DGGML_LLAMAFILE=ON"
- os: windows-latest
arch: x86_64
name: windows-gpu
make: LLAMA="-DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU=ON -DGGML_STATIC=ON"
# Android builds
- os: ubuntu-latest
arch: x86_64
name: android
make: PLATFORM=android ARCH=x86_64
sqlite-amalgamation-zip: https://sqlite.org/2025/sqlite-amalgamation-3490100.zip
- os: ubuntu-latest
arch: arm64-v8a
name: android
make: PLATFORM=android ARCH=arm64-v8a
- os: macos-latest
name: ios
make: PLATFORM=ios LLAMA="-DGGML_METAL=ON -DGGML_ACCELERATE=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Apple" WHISPER="-DGGML_METAL=ON -DGGML_ACCELERATE=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Apple -DWHISPER_COREML=ON -DWHISPER_COREML_ALLOW_FALLBACK=ON"
- os: macos-latest
name: isim
make: PLATFORM=isim LLAMA="-DGGML_METAL=ON -DGGML_ACCELERATE=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Apple" WHISPER="-DGGML_METAL=ON -DGGML_ACCELERATE=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Apple -DWHISPER_COREML=ON -DWHISPER_COREML_ALLOW_FALLBACK=ON"
defaults:
run:
shell: bash
env:
WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7cd9bba0-7aab-4e30-b3ae-2221006a4a05/intel-oneapi-base-toolkit-2025.1.1.34_offline.exe
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
CUDA_VERSION: '12.4'
ROCWMMA_VERSION: '6.2.4'
HIP_TOOLKIT_VERSION: '24.Q3'
steps:
- uses: actions/checkout@v4.2.2
with:
submodules: true
- name: calculate cache version hashes for modules and dependencies
id: submodule-hashes
env:
MATRIX_MAKE: ${{ matrix.make && matrix.make || '' }}
run: |
LLAMA_HASH=$(git -C modules/llama.cpp rev-parse HEAD)
WHISPER_HASH=$(git -C modules/whisper.cpp rev-parse HEAD)
MINIAUDIO_HASH=$(git -C modules/miniaudio rev-parse HEAD)
if command -v sha256sum >/dev/null 2>&1; then
MAKE_HASH=$(echo "$MATRIX_MAKE" | sha256sum | cut -d' ' -f1)
DPCPP_MKL_HASH=$(echo "$WINDOWS_DPCPP_MKL" | sha256sum | cut -d' ' -f1)
BASEKIT_URL_HASH=$(echo "$WINDOWS_BASEKIT_URL" | sha256sum | cut -d' ' -f1)
elif command -v shasum >/dev/null 2>&1; then
MAKE_HASH=$(echo "$MATRIX_MAKE" | shasum -a 256 | cut -d' ' -f1)
DPCPP_MKL_HASH=$(echo "$WINDOWS_DPCPP_MKL" | shasum -a 256 | cut -d' ' -f1)
BASEKIT_URL_HASH=$(echo "$WINDOWS_BASEKIT_URL" | shasum -a 256 | cut -d' ' -f1)
else
MAKE_HASH=$(echo "$MATRIX_MAKE" | openssl dgst -sha256 | cut -d' ' -f2)
DPCPP_MKL_HASH=$(echo "$WINDOWS_DPCPP_MKL" | openssl dgst -sha256 | cut -d' ' -f2)
BASEKIT_URL_HASH=$(echo "$WINDOWS_BASEKIT_URL" | openssl dgst -sha256 | cut -d' ' -f2)
fi
echo "llama=$LLAMA_HASH" >> $GITHUB_OUTPUT
echo "whisper=$WHISPER_HASH" >> $GITHUB_OUTPUT
echo "miniaudio=$MINIAUDIO_HASH" >> $GITHUB_OUTPUT
echo "make=$MAKE_HASH" >> $GITHUB_OUTPUT
echo "dpcpp-mkl=$DPCPP_MKL_HASH" >> $GITHUB_OUTPUT
echo "basekit-url=$BASEKIT_URL_HASH" >> $GITHUB_OUTPUT
- uses: msys2/setup-msys2@v2.27.0
if: matrix.os == 'windows-latest'
with:
msystem: mingw64
install: >-
git
make
sqlite
mingw-w64-x86_64-cc
mingw-w64-x86_64-cmake
mingw-w64-x86_64-vulkan-headers
mingw-w64-x86_64-vulkan-loader
mingw-w64-x86_64-shaderc
mingw-w64-x86_64-openblas
mingw-w64-x86_64-opencl-headers
mingw-w64-x86_64-opencl-icd
- name: linux install openblas and opencl
if: matrix.name == 'linux'
run: sudo apt-get install -y libopenblas-dev opencl-headers ocl-icd-opencl-dev
- name: linux-x86_64 install vulkan
if: matrix.name == 'linux' && matrix.arch == 'x86_64'
run: |
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
sudo apt-get update -y
sudo apt-get install -y mesa-vulkan-drivers
# Vulkan is no longer packed for Ubuntu
wget https://sdk.lunarg.com/sdk/download/latest/linux/vulkan-sdk.tar.xz?Human=true -O vulkan-sdk.tar.xz
tar -xf vulkan-sdk.tar.xz
cd $(ls -d 1.* | head -n1)
source setup-env.sh
echo "VULKAN_SDK=$VULKAN_SDK" >> $GITHUB_ENV
echo "PATH=$PATH" >> $GITHUB_ENV
echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" >> $GITHUB_ENV
echo "VK_ADD_LAYER_PATH=$VK_ADD_LAYER_PATH" >> $GITHUB_ENV
- name: linux-arm64 install vulkan
if: matrix.name == 'linux' && matrix.arch == 'arm64'
run: |
sudo dpkg --add-architecture arm64
# Add arch-specific repositories for non-amd64 architectures
cat << EOF | sudo tee /etc/apt/sources.list.d/arm64-ports.list
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble main universe
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble-updates main universe
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble-security main universe
deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports/ noble-backports main universe
EOF
sudo apt-get update || true ;# Prevent failure due to missing URLs.
sudo apt-get install -y --no-install-recommends build-essential glslc crossbuild-essential-arm64 libvulkan-dev:arm64
- name: cache cuda toolkit
if: false #matrix.name == 'windows-gpu'
id: cache-cuda
uses: actions/cache@v4
with:
path: |
C:\Program Files\NVIDIA GPU Computing Toolkit
key: cuda-${{ matrix.name }}-${{ matrix.os }}-${{ matrix.arch }}-${{ env.CUDA_VERSION }}
- name: windows install cuda
if: false #matrix.name == 'windows-gpu' && steps.cache-cuda.outputs.cache-hit != 'true'
uses: ./modules/llama.cpp/.github/actions/windows-setup-cuda
with:
cuda_version: ${{ env.CUDA_VERSION }}
- name: windows setup cuda toolkit
if: false #matrix.name == 'windows-gpu'
run: |
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${{ env.CUDA_VERSION }}\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${{ env.CUDA_VERSION }}\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${{ env.CUDA_VERSION }}" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v${{ env.CUDA_VERSION }}" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8
shell: pwsh
- name: cache hip toolkit
if: false #matrix.name == 'windows-gpu'
id: cache-hip
uses: actions/cache@v4
with:
path: |
C:\Program Files\AMD\ROCm
rocwmma
key: hip-toolkit-${{ env.HIP_TOOLKIT_VERSION }}-rocwmma-${{ env.ROCWMMA_VERSION }}
- name: windows install hip toolkit
if: false #matrix.name == 'windows-gpu' && steps.cache-hip.outputs.cache-hit != 'true'
run: |
git clone https://github.com/rocm/rocwmma --branch rocm-${{ env.ROCWMMA_VERSION }} --depth 1
$ErrorActionPreference = "Stop"
write-host "Downloading AMD HIP SDK Installer"
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-${{ env.HIP_TOOLKIT_VERSION }}-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
write-host "Installing AMD HIP SDK"
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
write-host "Completed AMD HIP SDK installation"
shell: pwsh
- name: setup hip environment
if: false #matrix.name == 'windows-gpu'
run: |
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
$HIP_PWSH = "$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)"
"HIP_PATH=$($HIP_PWSH.Replace('\', '/'))" | Out-File -FilePath $env:GITHUB_ENV -Append
"CMAKE_PREFIX_PATH=$($HIP_PWSH.Replace('\', '/'))" | Out-File -FilePath $env:GITHUB_ENV -Append
"HIP_PLATFORM=amd" | Out-File -FilePath $env:GITHUB_ENV -Append
# Ensure hip-config.cmake can find hipconfig
"PATH=$env:PATH;$($HIP_PWSH.Replace('\', '/'))/bin" | Out-File -FilePath $env:GITHUB_ENV -Append
shell: pwsh
- name: cache intel oneapi toolkit
if: false #matrix.name == 'windows-gpu'
id: cache-oneapi
uses: actions/cache@v4
with:
path: |
C:\Program Files (x86)\Intel\oneAPI
key: oneapi-${{ matrix.name }}-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.submodule-hashes.outputs.dpcpp-mkl }}-${{ steps.submodule-hashes.outputs.basekit-url }}
- name: windows install intel oneapi toolkit for sycl
if: false #matrix.name == 'windows-gpu' && steps.cache-oneapi.outputs.cache-hit != 'true'
run: modules/llama.cpp/scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL
- name: windows-gpu install ninja and sqlite
if: matrix.name == 'windows-gpu'
run: choco install ninja sqlite -y
shell: pwsh
- name: cache llama.cpp build
id: cache-llama
uses: actions/cache@v4
with:
path: |
build/llama.cpp
build/llama.cpp.stamp
key: llama-${{ matrix.name }}-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.submodule-hashes.outputs.llama }}-${{ hashFiles('modules/llama.cpp/**') }}-${{ steps.submodule-hashes.outputs.make }}
- name: cache whisper.cpp build
id: cache-whisper
uses: actions/cache@v4
with:
path: |
build/whisper.cpp
build/whisper.cpp.stamp
key: whisper-${{ matrix.name }}-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.submodule-hashes.outputs.whisper }}-${{ hashFiles('modules/whisper.cpp/**') }}-${{ steps.submodule-hashes.outputs.make }}
- name: cache miniaudio build
id: cache-miniaudio
uses: actions/cache@v4
with:
path: |
build/miniaudio
build/miniaudio.stamp
key: miniaudio-${{ matrix.name }}-${{ matrix.os }}-${{ matrix.arch }}-${{ steps.submodule-hashes.outputs.miniaudio }}-${{ hashFiles('modules/miniaudio/**') }}-${{ steps.submodule-hashes.outputs.make }}
- name: download backend modules from llama.cpp releases
if: false #matrix.name == 'windows-gpu'
shell: bash
run: |
# Get the latest llama.cpp release tag
LLAMA_VERSION=$(git -C modules/llama.cpp describe --tags --abbrev=0)
echo "Downloading backend modules for llama.cpp version: $LLAMA_VERSION"
# Create directories
mkdir -p build/llama.cpp/bin
# llama.cpp releases use this naming pattern for Windows builds
RELEASE_URL="https://github.com/ggerganov/llama.cpp/releases/download/${LLAMA_VERSION}"
# Download the main Windows release which contains backend DLLs
echo "Attempting to download llama.cpp Windows release..."
if curl -fsSL "${RELEASE_URL}/llama-${LLAMA_VERSION}-bin-win-x64.zip" -o llama-win.zip 2>/dev/null; then
echo "Downloaded llama.cpp Windows release"
# Extract only DLL files to the bin directory
unzip -j llama-win.zip "*.dll" -d build/llama.cpp/bin/ 2>/dev/null || echo "No DLLs found in main archive"
rm llama-win.zip
else
echo "Main Windows release not found, trying alternative download methods..."
fi
# Try alternative naming patterns for GPU-specific builds
for gpu_variant in "cuda-cu12.2.0-x64" "hip-rocm-win-x64" "sycl-x64"; do
filename="llama-${LLAMA_VERSION}-bin-win-${gpu_variant}.zip"
echo "Trying to download ${filename}..."
if curl -fsSL "${RELEASE_URL}/${filename}" -o "${gpu_variant}.zip" 2>/dev/null; then
echo "Downloaded ${filename}"
unzip -j "${gpu_variant}.zip" "*.dll" -d build/llama.cpp/bin/ 2>/dev/null || echo "No DLLs in ${filename}"
rm "${gpu_variant}.zip"
fi
done
# If no DLLs found, create dummy files to indicate we should build locally
if [ ! -d "build/llama.cpp/bin" ] || [ -z "$(ls -A build/llama.cpp/bin/ 2>/dev/null)" ]; then
echo "No backend modules downloaded, will build locally if needed"
mkdir -p build/llama.cpp/bin
touch build/llama.cpp/bin/.empty
else
echo "Downloaded backend modules:"
ls -la build/llama.cpp/bin/
fi
- name: copy backend modules to dist
if: false #matrix.name == 'windows-gpu'
shell: msys2 {0}
run: |
mkdir -p dist
cp build/llama.cpp/bin/*.dll dist/ 2>/dev/null || echo "No backend DLLs found, continuing..."
- name: windows build llama.cpp
if: matrix.os == 'windows-latest' && steps.cache-llama.outputs.cache-hit != 'true'
shell: msys2 {0}
run: make build/llama.cpp.stamp ${{ matrix.make && matrix.make || ''}}
env:
VULKAN_SDK: "C:/msys64/mingw64"
- name: unix build llama.cpp
if: matrix.os != 'windows-latest' && steps.cache-llama.outputs.cache-hit != 'true'
run: make build/llama.cpp.stamp ${{ matrix.make && matrix.make || ''}}
- name: windows build whisper.cpp
if: matrix.os == 'windows-latest' && steps.cache-whisper.outputs.cache-hit != 'true'
shell: msys2 {0}
run: make build/whisper.cpp.stamp ${{ matrix.make && matrix.make || ''}}
env:
VULKAN_SDK: "C:/msys64/mingw64"
- name: unix build whisper.cpp
if: matrix.os != 'windows-latest' && steps.cache-whisper.outputs.cache-hit != 'true'
run: make build/whisper.cpp.stamp ${{ matrix.make && matrix.make || ''}}
- name: windows build miniaudio
if: matrix.os == 'windows-latest' && steps.cache-miniaudio.outputs.cache-hit != 'true'
shell: msys2 {0}
run: make build/miniaudio.stamp ${{ matrix.make && matrix.make || ''}}
- name: unix build miniaudio
if: matrix.os != 'windows-latest' && steps.cache-miniaudio.outputs.cache-hit != 'true'
run: make build/miniaudio.stamp ${{ matrix.make && matrix.make || ''}}
- name: windows build sqlite-ai
if: matrix.os == 'windows-latest'
run: make extension ${{ matrix.make && matrix.make || ''}}
shell: msys2 {0}
env:
VULKAN_SDK: "C:/msys64/mingw64"
- name: unix build sqlite-ai
if: matrix.os != 'windows-latest'
run: make extension ${{ matrix.make && matrix.make || ''}}
- name: macos install sqlite3 without SQLITE_OMIT_LOAD_EXTENSION
if: matrix.name == 'macos'
run: brew link sqlite --force
- name: android setup test environment
if: matrix.name == 'android' && matrix.arch != 'arm64-v8a'
run: |
echo "::group::enable kvm group perms"
echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules
sudo udevadm control --reload-rules
sudo udevadm trigger --name-match=kvm
echo "::endgroup::"
echo "::group::download and build sqlite3 without SQLITE_OMIT_LOAD_EXTENSION"
curl -O ${{ matrix.sqlite-amalgamation-zip }}
unzip sqlite-amalgamation-*.zip
export ${{ matrix.make }}
$ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/${{ matrix.arch }}-linux-android26-clang sqlite-amalgamation-*/shell.c sqlite-amalgamation-*/sqlite3.c -o sqlite3 -ldl
# remove unused folders to save up space
rm -rf sqlite-amalgamation-*.zip sqlite-amalgamation-*
echo "::endgroup::"
echo "::group::prepare the test script"
make test PLATFORM=$PLATFORM ARCH=$ARCH || echo "It should fail. Running remaining commands in the emulator"
cat > commands.sh << EOF
mv -f /data/local/tmp/sqlite3 /system/xbin
cd /data/local/tmp
$(make test PLATFORM=$PLATFORM ARCH=$ARCH -n)
EOF
# remove big unused folders to avoid emulator errors
echo "::endgroup::"
- name: android test sqlite-ai
if: matrix.name == 'android' && matrix.arch != 'arm64-v8a'
uses: reactivecircus/android-emulator-runner@v2.34.0
with:
api-level: 26
arch: ${{ matrix.arch }}
script: |
adb root
adb remount
adb push ${{ github.workspace }}/commands.sh /data/local/tmp/
adb push ${{ github.workspace }}/sqlite3 /data/local/tmp/
adb push ${{ github.workspace }}/dist /data/local/tmp/
adb push ${{ github.workspace }}/Makefile /data/local/tmp/
adb shell "sh /data/local/tmp/commands.sh"
- name: windows test sqlite-ai
if: matrix.os == 'windows-latest'
run: make test ${{ matrix.make && matrix.make || ''}}
shell: msys2 {0}
env:
VULKAN_SDK: "C:/msys64/mingw64"
- name: unix test sqlite-ai
if: matrix.name == 'linux' || matrix.name == 'macos'
run: make test
- uses: actions/upload-artifact@v4.6.2
if: always()
with:
name: ai-${{ matrix.name }}${{ matrix.arch && format('-{0}', matrix.arch) || '' }}
path: dist/ai.*
if-no-files-found: error
release:
runs-on: ubuntu-latest
name: release
needs: build
if: github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4.2.2
- uses: actions/download-artifact@v4.2.1
with:
path: artifacts
- name: release tag version from sqlite-ai.h
id: tag
run: |
VERSION=$(make version)
if [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
LATEST=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/repos/${{ github.repository }}/releases/latest | jq -r '.name')
if [[ "$VERSION" != "$LATEST" || "$GITHUB_EVENT_NAME" == "workflow_dispatch" ]]; then
echo "version=$VERSION" >> $GITHUB_OUTPUT
else
echo "::warning file=src/sqlite-ai.h::To release a new version, please update the SQLITE_AI_VERSION in src/sqlite-ai.h to be different than the latest $LATEST"
fi
exit 0
fi
echo "❌ SQLITE_AI_VERSION not found in sqlite-ai.h"
exit 1
- name: zip artifacts
run: |
for folder in "artifacts"/*; do
if [ -d "$folder" ]; then
name=$(basename "$folder")
zip -jq "${name}-${{ steps.tag.outputs.version }}.zip" "$folder"/*
tar -cJf "${name}-${{ steps.tag.outputs.version }}.tar.xz" -C "$folder" .
tar -czf "${name}-${{ steps.tag.outputs.version }}.tar.gz" -C "$folder" .
fi
done
- uses: softprops/action-gh-release@v2.2.1
if: steps.tag.outputs.version != ''
with:
generate_release_notes: true
tag_name: ${{ steps.tag.outputs.version }}
files: |
ai-*-${{ steps.tag.outputs.version }}.zip
ai-*-${{ steps.tag.outputs.version }}.tar.xz
ai-*-${{ steps.tag.outputs.version }}.tar.gz
make_latest: true