Skip to content

Add ARM64 Linux builder with correct runner label #14

Add ARM64 Linux builder with correct runner label

Add ARM64 Linux builder with correct runner label #14

Workflow file for this run

name: CI
on:
push:
branches:
- main
- "pull-request/[0-9]+"
workflow_dispatch:
jobs:
test:
name: Test ${{ matrix.name }}
runs-on: ${{ matrix.runner }}
timeout-minutes: ${{ matrix.timeout }}
strategy:
fail-fast: false
matrix:
include:
- name: "GPU Linux x86_64"
runner: linux-amd64-gpu-rtxpro6000-latest-1
os: linux
gpu: true
timeout: 30
- name: "CPU Linux x86_64"
runner: ubuntu-latest
os: linux
gpu: false
timeout: 15
- name: "CPU Linux ARM64"
runner: ubuntu-24.04-arm
os: linux
gpu: false
timeout: 15
arch: arm64
- name: "CPU Windows x86_64"
runner: windows-latest
os: windows
gpu: false
timeout: 15
steps:
- name: Checkout
uses: actions/checkout@v4
timeout-minutes: 5
- name: Test Docker
timeout-minutes: 5
run: |
docker --version
docker run --rm hello-world
- name: Check Hardware (Linux)
if: matrix.os == 'linux'
timeout-minutes: 5
run: |
echo "=== Host Hardware Information ==="
echo "Runner: ${{ matrix.runner }}"
echo "Architecture: $(uname -m)"
if [ "${{ matrix.arch }}" = "arm64" ]; then
echo "ARM64 architecture detected"
fi
if [ "${{ matrix.gpu }}" = "true" ]; then
lspci | grep -i nvidia || echo "No NVIDIA GPU found via lspci"
nvidia-smi || echo "nvidia-smi not available on host"
else
echo "CPU-only runner (no GPU access)"
lscpu | head -10 || echo "CPU info not available"
fi
- name: Check Hardware (Windows)
if: matrix.os == 'windows'
timeout-minutes: 5
shell: cmd
run: |
echo === Host Hardware Information ===
echo Runner: ${{ matrix.runner }}
if "${{ matrix.gpu }}"=="true" (
echo Checking PATH for nvidia-smi:
where nvidia-smi
echo Trying nvidia-smi from PATH:
nvidia-smi || echo nvidia-smi failed from PATH
echo Trying common NVIDIA locations:
"C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" || echo Not in NVSMI folder
"C:\Windows\System32\nvidia-smi.exe" || echo Not in System32
echo Checking GPU via wmic:
wmic path win32_VideoController get name
) else (
echo CPU-only runner (no GPU access)
systeminfo | findstr /C:"Processor"
)
- name: Test CUDA Image with GPU (Linux)
if: matrix.gpu == true && matrix.os == 'linux'
timeout-minutes: 10
run: |
docker pull nvidia/cuda:13.0.2-base-ubuntu24.04
echo "=== Testing with GPU access ==="
docker run --rm --gpus all nvidia/cuda:13.0.2-base-ubuntu24.04 bash -c "
echo 'Container with GPU access:'
nvidia-smi || echo 'nvidia-smi not available in container'
cat /etc/os-release
echo 'CUDA Runtime version:'
cat /usr/local/cuda/version.txt 2>/dev/null || echo 'CUDA version file not found'
" || echo "GPU access failed - may not be available on this runner"
- name: Test CUDA Image CPU Only (Linux)
if: matrix.gpu == false && matrix.os == 'linux'
timeout-minutes: 10
run: |
docker pull nvidia/cuda:13.0.2-base-ubuntu24.04
echo "=== Testing without GPU access (CPU only) ==="
docker run --rm nvidia/cuda:13.0.2-base-ubuntu24.04 bash -c "
echo 'CUDA container test successful (CPU only - no GPU access)'
cat /etc/os-release
which nvcc || echo 'nvcc not in PATH'
nvidia-smi 2>/dev/null || echo 'nvidia-smi not available without --gpus flag (expected)'
"
- name: Test Windows Container with GPU
if: matrix.gpu == true && matrix.os == 'windows'
timeout-minutes: 10
shell: cmd
run: |
echo === Testing with GPU access ===
echo Trying nvidia-smi from different locations:
nvidia-smi || "C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" || "C:\Windows\System32\nvidia-smi.exe" || echo All nvidia-smi attempts failed
echo Checking Docker GPU support:
docker run --rm --gpus all hello-world || echo Docker GPU support not available
echo Windows GPU runner test completed
- name: Test Windows Container CPU Only
if: matrix.gpu == false && matrix.os == 'windows'
timeout-minutes: 10
shell: cmd
run: |
echo === Testing without GPU access (CPU only) ===
echo Windows CPU-only runner test successful
docker --version