forked from NVIDIA/cuda-python
-
Notifications
You must be signed in to change notification settings - Fork 0
462 lines (414 loc) · 19.5 KB
/
build-wheel.yml
File metadata and controls
462 lines (414 loc) · 19.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
# SPDX-FileCopyrightText: Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
on:
workflow_call:
inputs:
host-platform:
required: true
type: string
cuda-version:
required: true
type: string
prev-cuda-version:
required: true
type: string
defaults:
run:
shell: bash --noprofile --norc -xeuo pipefail {0}
permissions:
contents: read # This is required for actions/checkout
jobs:
build:
strategy:
fail-fast: false
matrix:
python-version:
- "3.10"
- "3.11"
- "3.12"
- "3.13"
- "3.14"
- "3.14t"
name: py${{ matrix.python-version }}
runs-on: ${{ (inputs.host-platform == 'linux-64' && 'linux-amd64-cpu8') ||
(inputs.host-platform == 'linux-aarch64' && 'linux-arm64-cpu8') ||
(inputs.host-platform == 'win-64' && 'windows-2022') }}
steps:
- name: Checkout ${{ github.event.repository.name }}
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
# The env vars ACTIONS_CACHE_SERVICE_V2, ACTIONS_RESULTS_URL, and ACTIONS_RUNTIME_TOKEN
# are exposed by this action.
- name: Enable sccache
uses: mozilla-actions/sccache-action@7d986dd989559c6ecdb630a3fd2557667be217ad # 0.0.9
# xref: https://github.com/orgs/community/discussions/42856#discussioncomment-7678867
- name: Adding addtional GHA cache-related env vars
uses: actions/github-script@v8
with:
script: |
core.exportVariable('ACTIONS_CACHE_URL', process.env['ACTIONS_CACHE_URL'])
core.exportVariable('ACTIONS_RUNTIME_URL', process.env['ACTIONS_RUNTIME_URL'])
- name: Setup proxy cache
uses: nv-gha-runners/setup-proxy-cache@main
continue-on-error: true
# Skip cache on GitHub-hosted Windows runners.
if: ${{ inputs.host-platform != 'win-64' }}
with:
enable-apt: true
- name: Set up Python
id: setup-python1
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
# WAR: setup-python is not relocatable, and cibuildwheel hard-wires to 3.12...
# see https://github.com/actions/setup-python/issues/871
python-version: "3.12"
- name: Set up MSVC
if: ${{ startsWith(inputs.host-platform, 'win') }}
uses: ilammy/msvc-dev-cmd@v1 # TODO: ask admin to allow pinning commits
- name: Set up yq
# GitHub made an unprofessional decision to not provide it in their Windows VMs,
# see https://github.com/actions/runner-images/issues/7443.
if: ${{ startsWith(inputs.host-platform, 'win') }}
env:
# doesn't seem there's an easy way to avoid hard-coding it?
YQ_URL: https://github.com/mikefarah/yq/releases/latest/download/yq_windows_amd64.exe
YQ_DIR: yq_latest
shell: pwsh -command ". '{0}'"
run: |
mkdir -Force -ErrorAction SilentlyContinue "${env:YQ_DIR}" | Out-Null
Invoke-WebRequest -UseBasicParsing -OutFile "${env:YQ_DIR}/yq.exe" -Uri "$env:YQ_URL"
ls -l $env:YQ_DIR
echo "$((Get-Location).Path)\\$env:YQ_DIR" >> $env:GITHUB_PATH
$env:Path += ";$((Get-Location).Path)\\$env:YQ_DIR"
yq --version
- name: Set environment variables
env:
CUDA_VER: ${{ inputs.cuda-version }}
HOST_PLATFORM: ${{ inputs.host-platform }}
PY_VER: ${{ matrix.python-version }}
SHA: ${{ github.sha }}
run: ./ci/tools/env-vars build
- name: Dump environment
run: |
env
- name: Install twine
run: |
pip install twine
# To keep the build workflow simple, all matrix jobs will build a wheel for later use within this workflow.
- name: Build and check cuda.pathfinder wheel
run: |
pushd cuda_pathfinder
pip wheel -v --no-deps .
popd
- name: List the cuda.pathfinder artifacts directory
run: |
if [[ "${{ inputs.host-platform }}" == win* ]]; then
export CHOWN=chown
else
export CHOWN="sudo chown"
fi
$CHOWN -R $(whoami) cuda_pathfinder/*.whl
ls -lahR cuda_pathfinder
# We only need/want a single pure python wheel, pick linux-64 index 0.
# This is what we will use for testing & releasing.
- name: Check cuda.pathfinder wheel
if: ${{ strategy.job-index == 0 && inputs.host-platform == 'linux-64' }}
run: |
twine check --strict cuda_pathfinder/*.whl
- name: Upload cuda.pathfinder build artifacts
if: ${{ strategy.job-index == 0 && inputs.host-platform == 'linux-64' }}
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: cuda-pathfinder-wheel
path: cuda_pathfinder/*.whl
if-no-files-found: error
- name: Set up mini CTK
uses: ./.github/actions/fetch_ctk
continue-on-error: false
with:
host-platform: ${{ inputs.host-platform }}
cuda-version: ${{ inputs.cuda-version }}
- name: Build cuda.bindings wheel
uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0
with:
package-dir: ./cuda_bindings/
output-dir: ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}
env:
CIBW_BUILD: ${{ env.CIBW_BUILD }}
# CIBW mounts the host filesystem under /host
CIBW_ENVIRONMENT_LINUX: >
CUDA_PATH=/host/${{ env.CUDA_PATH }}
CUDA_PYTHON_PARALLEL_LEVEL=${{ env.CUDA_PYTHON_PARALLEL_LEVEL }}
CC="/host/${{ env.SCCACHE_PATH }} cc"
CXX="/host/${{ env.SCCACHE_PATH }} c++"
SCCACHE_GHA_ENABLED=true
ACTIONS_RUNTIME_TOKEN=${{ env.ACTIONS_RUNTIME_TOKEN }}
ACTIONS_RUNTIME_URL=${{ env.ACTIONS_RUNTIME_URL }}
ACTIONS_RESULTS_URL=${{ env.ACTIONS_RESULTS_URL }}
ACTIONS_CACHE_URL=${{ env.ACTIONS_CACHE_URL }}
ACTIONS_CACHE_SERVICE_V2=${{ env.ACTIONS_CACHE_SERVICE_V2 }}
SCCACHE_DIR=/host/${{ env.SCCACHE_DIR }}
SCCACHE_CACHE_SIZE=${{ env.SCCACHE_CACHE_SIZE }}
CIBW_ENVIRONMENT_WINDOWS: >
CUDA_PATH="$(cygpath -w ${{ env.CUDA_PATH }})"
CUDA_PYTHON_PARALLEL_LEVEL=${{ env.CUDA_PYTHON_PARALLEL_LEVEL }}
# check cache stats before leaving cibuildwheel
CIBW_BEFORE_TEST_LINUX: >
"/host/${{ env.SCCACHE_PATH }}" --show-stats
# force the test stage to be run (so that before-test is not skipped)
# TODO: we might want to think twice on adding this, it does a lot of
# things before reaching this command.
CIBW_TEST_COMMAND: >
echo "ok!"
- name: List the cuda.bindings artifacts directory
run: |
if [[ "${{ inputs.host-platform }}" == win* ]]; then
export CHOWN=chown
else
export CHOWN="sudo chown"
fi
$CHOWN -R $(whoami) ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}
ls -lahR ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}
- name: Check cuda.bindings wheel
run: |
twine check --strict ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}/*.whl
- name: Upload cuda.bindings build artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: ${{ env.CUDA_BINDINGS_ARTIFACT_NAME }}
path: ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}/*.whl
if-no-files-found: error
- name: Build cuda.core wheel
uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0
with:
package-dir: ./cuda_core/
output-dir: ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
env:
CIBW_BUILD: ${{ env.CIBW_BUILD }}
# CIBW mounts the host filesystem under /host
CIBW_ENVIRONMENT_LINUX: >
CUDA_PATH=/host/${{ env.CUDA_PATH }}
CUDA_PYTHON_PARALLEL_LEVEL=${{ env.CUDA_PYTHON_PARALLEL_LEVEL }}
CUDA_CORE_BUILD_MAJOR=${{ env.BUILD_CUDA_MAJOR }}
PIP_FIND_LINKS=/host/${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}
CC="/host/${{ env.SCCACHE_PATH }} cc"
CXX="/host/${{ env.SCCACHE_PATH }} c++"
SCCACHE_GHA_ENABLED=true
ACTIONS_RUNTIME_TOKEN=${{ env.ACTIONS_RUNTIME_TOKEN }}
ACTIONS_RUNTIME_URL=${{ env.ACTIONS_RUNTIME_URL }}
ACTIONS_RESULTS_URL=${{ env.ACTIONS_RESULTS_URL }}
ACTIONS_CACHE_URL=${{ env.ACTIONS_CACHE_URL }}
ACTIONS_CACHE_SERVICE_V2=${{ env.ACTIONS_CACHE_SERVICE_V2 }}
SCCACHE_DIR=/host/${{ env.SCCACHE_DIR }}
SCCACHE_CACHE_SIZE=${{ env.SCCACHE_CACHE_SIZE }}
CIBW_ENVIRONMENT_WINDOWS: >
CUDA_PATH="$(cygpath -w ${{ env.CUDA_PATH }})"
CUDA_PYTHON_PARALLEL_LEVEL=${{ env.CUDA_PYTHON_PARALLEL_LEVEL }}
CUDA_CORE_BUILD_MAJOR=${{ env.BUILD_CUDA_MAJOR }}
PIP_FIND_LINKS="$(cygpath -w ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }})"
# check cache stats before leaving cibuildwheel
CIBW_BEFORE_TEST_LINUX: >
"/host${{ env.SCCACHE_PATH }}" --show-stats
# force the test stage to be run (so that before-test is not skipped)
# TODO: we might want to think twice on adding this, it does a lot of
# things before reaching this command.
CIBW_TEST_COMMAND: >
echo "ok!"
- name: List the cuda.core artifacts directory and rename
run: |
if [[ "${{ inputs.host-platform }}" == win* ]]; then
export CHOWN=chown
else
export CHOWN="sudo chown"
fi
$CHOWN -R $(whoami) ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
# Rename wheel to include CUDA version suffix
mkdir -p "${{ env.CUDA_CORE_ARTIFACTS_DIR }}/cu${BUILD_CUDA_MAJOR}"
for wheel in ${{ env.CUDA_CORE_ARTIFACTS_DIR }}/*.whl; do
if [[ -f "${wheel}" ]]; then
base_name=$(basename "${wheel}" .whl)
new_name="${base_name}.cu${BUILD_CUDA_MAJOR}.whl"
mv "${wheel}" "${{ env.CUDA_CORE_ARTIFACTS_DIR }}/cu${BUILD_CUDA_MAJOR}/${new_name}"
echo "Renamed wheel to: ${new_name}"
fi
done
ls -lahR ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
# We only need/want a single pure python wheel, pick linux-64 index 0.
- name: Build and check cuda-python wheel
if: ${{ strategy.job-index == 0 && inputs.host-platform == 'linux-64' }}
run: |
pushd cuda_python
pip wheel -v --no-deps .
twine check --strict *.whl
popd
- name: List the cuda-python artifacts directory
if: ${{ strategy.job-index == 0 && inputs.host-platform == 'linux-64' }}
run: |
if [[ "${{ inputs.host-platform }}" == win* ]]; then
export CHOWN=chown
else
export CHOWN="sudo chown"
fi
$CHOWN -R $(whoami) cuda_python/*.whl
ls -lahR cuda_python
- name: Upload cuda-python build artifacts
if: ${{ strategy.job-index == 0 && inputs.host-platform == 'linux-64' }}
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: cuda-python-wheel
path: cuda_python/*.whl
if-no-files-found: error
- name: Set up Python
id: setup-python2
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
with:
python-version: ${{ matrix.python-version }}
- name: verify free-threaded build
if: endsWith(matrix.python-version, 't')
run: python -c 'import sys; assert not sys._is_gil_enabled()'
- name: Set up Python include paths
run: |
if [[ "${{ inputs.host-platform }}" == linux* ]]; then
echo "CPLUS_INCLUDE_PATH=${Python3_ROOT_DIR}/include/python${{ matrix.python-version }}" >> $GITHUB_ENV
elif [[ "${{ inputs.host-platform }}" == win* ]]; then
echo "CL=/I\"${Python3_ROOT_DIR}\include\python${{ matrix.python-version }}\"" >> $GITHUB_ENV
fi
# For caching
echo "PY_EXT_SUFFIX=$(python -c "import sysconfig; print(sysconfig.get_config_var('EXT_SUFFIX'))")" >> $GITHUB_ENV
- name: Install cuda.pathfinder (required for next step)
run: |
pip install cuda_pathfinder/*.whl
- name: Build cuda.bindings Cython tests
run: |
pip install ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}/*.whl --group ./cuda_bindings/pyproject.toml:test
pushd ${{ env.CUDA_BINDINGS_CYTHON_TESTS_DIR }}
bash build_tests.sh
popd
- name: Upload cuda.bindings Cython tests
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: ${{ env.CUDA_BINDINGS_ARTIFACT_NAME }}-tests
path: ${{ env.CUDA_BINDINGS_CYTHON_TESTS_DIR }}/test_*${{ env.PY_EXT_SUFFIX }}
if-no-files-found: error
- name: Build cuda.core Cython tests
run: |
pip install ${{ env.CUDA_CORE_ARTIFACTS_DIR }}/"cu${BUILD_CUDA_MAJOR}"/*.whl --group ./cuda_core/pyproject.toml:test
pushd ${{ env.CUDA_CORE_CYTHON_TESTS_DIR }}
bash build_tests.sh
popd
- name: Upload cuda.core Cython tests
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: ${{ env.CUDA_CORE_ARTIFACT_NAME }}-tests
path: ${{ env.CUDA_CORE_CYTHON_TESTS_DIR }}/test_*${{ env.PY_EXT_SUFFIX }}
if-no-files-found: error
# Note: This overwrites CUDA_PATH etc
- name: Set up mini CTK
uses: ./.github/actions/fetch_ctk
continue-on-error: false
with:
host-platform: ${{ inputs.host-platform }}
cuda-version: ${{ inputs.prev-cuda-version }}
cuda-path: "./cuda_toolkit_prev"
- name: Download cuda.bindings build artifacts from the prior branch
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
if ! (command -v gh 2>&1 >/dev/null); then
# See https://github.com/cli/cli/blob/trunk/docs/install_linux.md#debian-ubuntu-linux-raspberry-pi-os-apt.
# gh is needed for artifact fetching.
mkdir -p -m 755 /etc/apt/keyrings \
&& out=$(mktemp) && wget -nv -O$out https://cli.github.com/packages/githubcli-archive-keyring.gpg \
&& cat $out | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
&& chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& apt update \
&& apt install gh -y
fi
OLD_BRANCH=$(yq '.backport_branch' ci/versions.yml)
OLD_BASENAME="cuda-bindings-python${PYTHON_VERSION_FORMATTED}-cuda*-${{ inputs.host-platform }}*"
LATEST_PRIOR_RUN_ID=$(gh run list -b ${OLD_BRANCH} -L 1 -w "ci.yml" -s success -R NVIDIA/cuda-python --json databaseId | jq '.[]| .databaseId')
if [[ "$LATEST_PRIOR_RUN_ID" == "" ]]; then
echo "LATEST_PRIOR_RUN_ID not found!"
exit 1
fi
gh run download $LATEST_PRIOR_RUN_ID -p ${OLD_BASENAME} -R NVIDIA/cuda-python
rm -rf ${OLD_BASENAME}-tests # exclude cython test artifacts
ls -al $OLD_BASENAME
mkdir -p "${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}"
mv $OLD_BASENAME/*.whl "${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}"
rmdir $OLD_BASENAME
- name: Build cuda.core wheel
uses: pypa/cibuildwheel@ee02a1537ce3071a004a6b08c41e72f0fdc42d9a # v3.4.0
with:
package-dir: ./cuda_core/
output-dir: ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
env:
CIBW_BUILD: ${{ env.CIBW_BUILD }}
# CIBW mounts the host filesystem under /host
CIBW_ENVIRONMENT_LINUX: >
CUDA_PATH=/host/${{ env.CUDA_PATH }}
CUDA_PYTHON_PARALLEL_LEVEL=${{ env.CUDA_PYTHON_PARALLEL_LEVEL }}
CUDA_CORE_BUILD_MAJOR=${{ env.BUILD_PREV_CUDA_MAJOR }}
PIP_FIND_LINKS=/host/${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }}
CC="/host/${{ env.SCCACHE_PATH }} cc"
CXX="/host/${{ env.SCCACHE_PATH }} c++"
SCCACHE_GHA_ENABLED=true
ACTIONS_RUNTIME_TOKEN=${{ env.ACTIONS_RUNTIME_TOKEN }}
ACTIONS_RUNTIME_URL=${{ env.ACTIONS_RUNTIME_URL }}
ACTIONS_RESULTS_URL=${{ env.ACTIONS_RESULTS_URL }}
ACTIONS_CACHE_URL=${{ env.ACTIONS_CACHE_URL }}
ACTIONS_CACHE_SERVICE_V2=${{ env.ACTIONS_CACHE_SERVICE_V2 }}
SCCACHE_DIR=/host/${{ env.SCCACHE_DIR }}
SCCACHE_CACHE_SIZE=${{ env.SCCACHE_CACHE_SIZE }}
CIBW_ENVIRONMENT_WINDOWS: >
CUDA_PATH="$(cygpath -w ${{ env.CUDA_PATH }})"
CUDA_PYTHON_PARALLEL_LEVEL=${{ env.CUDA_PYTHON_PARALLEL_LEVEL }}
CUDA_CORE_BUILD_MAJOR=${{ env.BUILD_PREV_CUDA_MAJOR }}
PIP_FIND_LINKS="$(cygpath -w ${{ env.CUDA_BINDINGS_ARTIFACTS_DIR }})"
# check cache stats before leaving cibuildwheel
CIBW_BEFORE_TEST_LINUX: >
"/host${{ env.SCCACHE_PATH }}" --show-stats
# force the test stage to be run (so that before-test is not skipped)
# TODO: we might want to think twice on adding this, it does a lot of
# things before reaching this command.
CIBW_TEST_COMMAND: >
echo "ok!"
- name: List the cuda.core artifacts directory and rename
run: |
if [[ "${{ inputs.host-platform }}" == win* ]]; then
export CHOWN=chown
else
export CHOWN="sudo chown"
fi
$CHOWN -R $(whoami) ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
ls -lahR ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
# Rename wheel to include CUDA version suffix
mkdir -p "${{ env.CUDA_CORE_ARTIFACTS_DIR }}/cu${BUILD_PREV_CUDA_MAJOR}"
for wheel in ${{ env.CUDA_CORE_ARTIFACTS_DIR }}/*.whl; do
if [[ -f "${wheel}" ]]; then
base_name=$(basename "${wheel}" .whl)
new_name="${base_name}.cu${BUILD_PREV_CUDA_MAJOR}.whl"
mv "${wheel}" "${{ env.CUDA_CORE_ARTIFACTS_DIR }}/cu${BUILD_PREV_CUDA_MAJOR}/${new_name}"
echo "Renamed wheel to: ${new_name}"
fi
done
ls -lahR ${{ env.CUDA_CORE_ARTIFACTS_DIR }}
- name: Merge cuda.core wheels
run: |
pip install wheel
python ci/tools/merge_cuda_core_wheels.py \
"${{ env.CUDA_CORE_ARTIFACTS_DIR }}"/cu"${BUILD_CUDA_MAJOR}"/cuda_core*.whl \
"${{ env.CUDA_CORE_ARTIFACTS_DIR }}"/cu"${BUILD_PREV_CUDA_MAJOR}"/cuda_core*.whl \
--output-dir "${{ env.CUDA_CORE_ARTIFACTS_DIR }}"
- name: Check cuda.core wheel
run: |
twine check --strict ${{ env.CUDA_CORE_ARTIFACTS_DIR }}/*.whl
- name: Upload cuda.core build artifacts
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
with:
name: ${{ env.CUDA_CORE_ARTIFACT_NAME }}
path: ${{ env.CUDA_CORE_ARTIFACTS_DIR }}/*.whl
if-no-files-found: error