Skip to content

Commit bf62c2b

Browse files
leofangclaude
andcommitted
Match CUDA_VER to TORCH_CUDA in nightly pytorch matrix
CUDA_VER in the test environment should match TORCH_CUDA in major.minor. BUILD_CUDA_VER (from build-ctk-ver input) is used for artifact names, so CUDA_VER can differ. - cu126 → CUDA_VER: 12.6.3 (was 12.9.1) - cu130 → CUDA_VER: 13.0.2 (was 13.2.1) For CUDA 12 entries, USE_BACKPORT_BINDINGS kicks in automatically since BUILD_CUDA_MAJOR (13) \!= TEST_CUDA_MAJOR (12), pulling bindings from the backport branch. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 0b7cc50 commit bf62c2b

4 files changed

Lines changed: 21 additions & 31 deletions

File tree

.github/workflows/test-wheel-linux.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -284,6 +284,7 @@ jobs:
284284
run: setup-sanitizer
285285

286286
- name: Set up test repetition on nightly runs
287+
if: ${{ inputs.test-mode == 'standard' }}
287288
run: echo "PYTEST_ADDOPTS=\"--count=${{ inputs.nruns }}\"" >> "$GITHUB_ENV"
288289

289290
# ── Standard test steps (skipped for nightly modes) ──

.github/workflows/test-wheel-windows.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -265,6 +265,7 @@ jobs:
265265
cuda-version: ${{ matrix.CUDA_VER }}
266266

267267
- name: Set up test repetition on nightly runs
268+
if: ${{ inputs.test-mode == 'standard' }}
268269
shell: bash --noprofile --norc -xeuo pipefail {0}
269270
run: echo "PYTEST_ADDOPTS=\"--count=${{ inputs.nruns }}\"" >> "$GITHUB_ENV"
270271

ci/test-matrix.yml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,10 @@ linux:
6464
- { ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'rtx4090', GPU_COUNT: '1', DRIVER: 'latest', FLAVOR: 'wsl' }
6565
nightly:
6666
# nightly-pytorch (amd64 only — PyTorch does not ship arm64 GPU wheels)
67-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.9.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: 'latest', TORCH_CUDA: 'cu126' }
68-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: 'latest', TORCH_CUDA: 'cu130' }
69-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.9.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu126' }
70-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu130' }
67+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.6.3', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: 'latest', TORCH_CUDA: 'cu126' }
68+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.0.2', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: 'latest', TORCH_CUDA: 'cu130' }
69+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.6.3', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu126' }
70+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.0.2', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu130' }
7171
# nightly-numba-cuda
7272
- { MODE: 'nightly-numba-cuda', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.9.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest' }
7373
- { MODE: 'nightly-numba-cuda', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest' }
@@ -97,10 +97,10 @@ windows:
9797
- { ARCH: 'amd64', PY_VER: '3.14t', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'a100', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'MCDM' }
9898
nightly:
9999
# nightly-pytorch
100-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.9.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: 'latest', TORCH_CUDA: 'cu126' }
101-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: 'latest', TORCH_CUDA: 'cu130' }
102-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.9.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu126' }
103-
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu130' }
100+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.6.3', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: 'latest', TORCH_CUDA: 'cu126' }
101+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.0.2', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: 'latest', TORCH_CUDA: 'cu130' }
102+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.6.3', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu126' }
103+
- { MODE: 'nightly-pytorch', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.0.2', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC', TORCH_VER: '2.9.1', TORCH_CUDA: 'cu130' }
104104
# nightly-numba-cuda
105105
- { MODE: 'nightly-numba-cuda', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '12.9.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC' }
106106
- { MODE: 'nightly-numba-cuda', ARCH: 'amd64', PY_VER: '3.12', CUDA_VER: '13.2.1', LOCAL_CTK: '0', GPU: 'l4', GPU_COUNT: '1', DRIVER: 'latest', DRIVER_MODE: 'TCC' }

ci/tools/run-tests

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -99,27 +99,12 @@ elif [[ "${test_module}" == "nightly-pytorch" || "${test_module}" == "nightly-nu
9999
# Install ALL wheels (pathfinder + bindings + core) and the optional dep
100100
# in a single pip call so pip resolves version constraints in one shot
101101
# and avoids costly uninstall/reinstall cycles.
102-
#
103-
# We pushd into cuda_core/ so that --group reads test dependency groups
104-
# from cuda_core/pyproject.toml (needed for numpy, cupy, ml_dtypes, etc.).
105-
# All other wheel paths use ../ to reach the repo root.
106102

107103
TEST_CUDA_MAJOR="$(cut -d '.' -f 1 <<< ${CUDA_VER})"
108104
CUDA_VER_MINOR="$(cut -d '.' -f 1-2 <<< "${CUDA_VER}")"
109105

110-
FREE_THREADING=""
111-
if python -c 'import sys; assert not sys._is_gil_enabled()' 2> /dev/null; then
112-
FREE_THREADING+="-ft"
113-
fi
114-
115-
# Resolve the pathfinder wheel path before pushd (it's relative to repo root).
116-
# CUDA_BINDINGS_ARTIFACTS_DIR and CUDA_CORE_ARTIFACTS_DIR are already absolute
106+
# CUDA_BINDINGS_ARTIFACTS_DIR and CUDA_CORE_ARTIFACTS_DIR are absolute
117107
# (set via realpath in env-vars).
118-
PATHFINDER_WHL=($(realpath ./cuda_pathfinder/*.whl))
119-
120-
pushd ./cuda_core
121-
122-
# Build wheel specs (paths are absolute, so pushd doesn't affect them)
123108
BINDINGS_WHL=("${CUDA_BINDINGS_ARTIFACTS_DIR}"/*.whl)
124109
if [[ "${LOCAL_CTK}" != 1 ]]; then
125110
BINDINGS_WHL=("${BINDINGS_WHL[0]}[all]")
@@ -130,31 +115,34 @@ elif [[ "${test_module}" == "nightly-pytorch" || "${test_module}" == "nightly-nu
130115
CORE_WHL=("${CORE_WHL[0]}[cu${TEST_CUDA_MAJOR}]")
131116
fi
132117

133-
# All packages in one pip call: pathfinder + bindings + core + test deps + optional dep
118+
# All packages in one pip call: pathfinder + bindings + core + deps + optional dep.
119+
# We list test dependencies explicitly rather than using --group to avoid
120+
# the group's looser cuda-toolkit constraint (==12.*) overriding our
121+
# tighter pin (==12.6.*).
134122
PIP_ARGS=(
135-
"${PATHFINDER_WHL[@]}"
123+
./cuda_pathfinder/*.whl
136124
"${BINDINGS_WHL[@]}"
137125
"${CORE_WHL[@]}"
138-
--group "test-cu${TEST_CUDA_MAJOR}${FREE_THREADING}"
126+
pytest
139127
"cuda-toolkit==${CUDA_VER_MINOR}.*"
140128
)
141129

142130
if [[ "${test_module}" == "nightly-pytorch" ]]; then
143131
# TORCH_VER and TORCH_CUDA must be set by the caller.
144-
echo "Installing pathfinder + bindings + core + test deps + PyTorch ${TORCH_VER} (${TORCH_CUDA})"
132+
echo "Installing pathfinder + bindings + core + PyTorch ${TORCH_VER} (${TORCH_CUDA})"
133+
PIP_ARGS+=(numpy ml_dtypes)
145134
if [[ "${TORCH_VER}" == "latest" ]]; then
146135
PIP_ARGS+=(torch)
147136
else
148137
PIP_ARGS+=("torch==${TORCH_VER}")
149138
fi
150139
PIP_ARGS+=(--extra-index-url "https://download.pytorch.org/whl/${TORCH_CUDA}")
151140
elif [[ "${test_module}" == "nightly-numba-cuda" ]]; then
152-
echo "Installing pathfinder + bindings + core + test deps + numba-cuda"
153-
PIP_ARGS+=("numba-cuda[cu${TEST_CUDA_MAJOR}]")
141+
echo "Installing pathfinder + bindings + core + numba-cuda"
142+
PIP_ARGS+=("numba-cuda[cu${TEST_CUDA_MAJOR}]" filecheck)
154143
fi
155144

156145
pip install "${PIP_ARGS[@]}"
157-
popd
158146

159147
echo "Nightly install complete — installed packages:"
160148
pip list

0 commit comments

Comments
 (0)