Skip to content

Commit 33e2561

Browse files
[CI] Test against PyTorch 2.10 (#1876)
* [CI] Test against PyTorch 2.10 * xfail tests with regressions for torch 2.10
1 parent 9dd8b70 commit 33e2561

File tree

4 files changed

+22
-7
lines changed

4 files changed

+22
-7
lines changed

.github/workflows/tests-nightly.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ jobs:
2020
platform: [linux-x64, linux-aarch64, macos, windows]
2121
# default runners don't have AVX-512 support, but icelake does
2222
cpu_type: ["", icelake]
23-
torch_version: ["2.3.1", "2.8.0", "2.9.1"]
23+
torch_version: ["2.3.1", "2.9.1", "2.10.0"]
2424

2525
exclude:
2626
# aarch64 minimum torch version is 2.5.1
@@ -65,13 +65,13 @@ jobs:
6565
torch_version: "2.3.1"
6666
pypi_index: "https://download.pytorch.org/whl/cu118"
6767
- cuda_version: "12.6.3"
68-
torch_version: "2.7.1"
68+
torch_version: "2.8.0"
6969
pypi_index: "https://download.pytorch.org/whl/cu126"
7070
- cuda_version: "12.8.1"
71-
torch_version: "2.8.0"
71+
torch_version: "2.9.1"
7272
pypi_index: "https://download.pytorch.org/whl/cu128"
7373
- cuda_version: "13.0.2"
74-
torch_version: "2.9.1"
74+
torch_version: "2.10.0"
7575
pypi_index: "https://download.pytorch.org/whl/cu130"
7676

7777
# Windows CUDA Tests - T4 GPU (CUDA 11.8 only, multiple torch versions)

.github/workflows/tests-pr.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ jobs:
3131
platform: [linux-x64, linux-aarch64, macos]
3232
# default runners don't have AVX-512 support, but icelake does
3333
cpu_type: ["", icelake]
34-
torch_version: ["2.3.1", "2.9.1"]
34+
torch_version: ["2.3.1", "2.10.0"]
3535

3636
exclude:
3737
# aarch64 minimum torch version is 2.5.1
@@ -73,10 +73,10 @@ jobs:
7373
torch_version: "2.3.1"
7474
pypi_index: "https://download.pytorch.org/whl/cu118"
7575
- cuda_version: "12.8.1"
76-
torch_version: "2.8.0"
76+
torch_version: "2.9.1"
7777
pypi_index: "https://download.pytorch.org/whl/cu128"
7878
- cuda_version: "13.0.2"
79-
torch_version: "2.9.1"
79+
torch_version: "2.10.0"
8080
pypi_index: "https://download.pytorch.org/whl/cu130"
8181

8282
# Windows CUDA test - single configuration

tests/test_functional.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -430,6 +430,15 @@ def test_approx_igemm(self, dim1, dim2, quant_methods, batched):
430430
@pytest.mark.parametrize("seq_dim", [16, 256], ids=id_formatter("seq_dim"))
431431
@pytest.mark.parametrize("transpose", BOOLEAN_TUPLES, ids=id_formatter("transpose"))
432432
def test_igemm(self, hidden_dim, batch_dim, transpose, seq_dim):
433+
if (
434+
torch.version.cuda == "13.0"
435+
and torch.__version__ >= (2, 10)
436+
and not any(transpose)
437+
and batch_dim == 256
438+
and seq_dim == 256
439+
):
440+
pytest.xfail("Failure due to regression in cuBLAS for CUDA Toolkit 13.0.2.")
441+
433442
hidden_dim = hidden_dim - (hidden_dim % 32)
434443
batch_dim = batch_dim - (batch_dim % 16)
435444
seq_dim = seq_dim - (seq_dim % 16)
@@ -570,6 +579,9 @@ def min_max(x):
570579
@pytest.mark.parametrize("dim4", [32, 256], ids=id_formatter("dim4"))
571580
@pytest.mark.parametrize("transpose", BOOLEAN_TUPLES, ids=id_formatter("transpose"))
572581
def test_ibmm(self, dim1, dim2, dim3, dim4, transpose):
582+
if torch.version.cuda == "13.0" and torch.__version__ >= (2, 10) and dim1 == 64:
583+
pytest.xfail("Failure due to regression in cuBLAS for CUDA Toolkit 13.0.2.")
584+
573585
dim2 = dim2 - (dim2 % 16)
574586
dim3 = dim3 - (dim3 % 16)
575587
dim4 = dim4 - (dim4 % 16)

tests/test_linear8bitlt.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,9 @@ def test_linear8bitlt_torch_compile(device, threshold, bias, fullgraph, mode):
243243
if device == "cuda" and platform.system() == "Windows":
244244
pytest.skip("Triton is not officially supported on Windows")
245245

246+
if device == "cuda" and mode == "reduce-overhead" and fullgraph and threshold > 0 and torch.__version__ >= (2, 10):
247+
pytest.xfail("Failure due to regression in torch 2.10 related to reduced overhead mode and CUDA.")
248+
246249
dim = 256
247250
batch_size = 16
248251

0 commit comments

Comments
 (0)