|
3 | 3 | # file, You can obtain one at https://mozilla.org/MPL/2.0/. |
4 | 4 |
|
5 | 5 | import importlib.machinery |
| 6 | +import json |
6 | 7 | import os |
| 8 | +import re |
7 | 9 | import struct |
8 | 10 | import subprocess |
9 | 11 | import sys |
@@ -318,5 +320,179 @@ def assertPythonWorks(path: Path, argv0: str = None): |
318 | 320 | assertPythonWorks(sys.executable, argv0="/dev/null") |
319 | 321 |
|
320 | 322 |
|
| 323 | +def run_us() -> int: |
| 324 | + t = unittest.TestProgram(exit=False) |
| 325 | + if not t.result.wasSuccessful(): |
| 326 | + return 1 |
| 327 | + elif t.result.testsRun == 0: |
| 328 | + return 1 |
| 329 | + else: |
| 330 | + return 0 |
| 331 | + |
| 332 | + |
| 333 | +def load_test_annotations(): |
| 334 | + here = Path(__file__).parent |
| 335 | + p = here / "test-annotations.json" |
| 336 | + with p.open("rb") as fh: |
| 337 | + return json.load(fh) |
| 338 | + |
| 339 | + |
| 340 | +def python_version_compare(a: str, b: str) -> int: |
| 341 | + a_parts = a.split(".") |
| 342 | + b_parts = b.split(".") |
| 343 | + |
| 344 | + a_ver = (int(a_parts[0]), int(a_parts[1])) |
| 345 | + b_ver = (int(b_parts[0]), int(b_parts[1])) |
| 346 | + |
| 347 | + if a_ver < b_ver: |
| 348 | + return -1 |
| 349 | + elif a_ver == b_ver: |
| 350 | + return 0 |
| 351 | + else: |
| 352 | + return 1 |
| 353 | + |
| 354 | + |
| 355 | +def run_stdlib() -> int: |
| 356 | + target_triple = os.environ["TARGET_TRIPLE"] |
| 357 | + build_options = os.environ["BUILD_OPTIONS"] |
| 358 | + build_options_set = set(build_options.split("+")) |
| 359 | + python_version = f"{sys.version_info.major}.{sys.version_info.minor}" |
| 360 | + |
| 361 | + # musl debug builds fail to run the test harness for some reason. Skip them. |
| 362 | + if target_triple.endswith("-unknown-linux-musl") and "debug" in build_options_set: |
| 363 | + print("stdlib tests being skipped for musl debug build because it doesn't work") |
| 364 | + return 0 |
| 365 | + |
| 366 | + annotations = load_test_annotations() |
| 367 | + |
| 368 | + expect_failures = [] |
| 369 | + excludes = [] |
| 370 | + dont_verify = set() |
| 371 | + |
| 372 | + for test in annotations["expected-failures"]: |
| 373 | + name = test["name"] |
| 374 | + |
| 375 | + if targets := test.get("targets"): |
| 376 | + matches_target = any(re.match(p, target_triple) for p in targets) |
| 377 | + else: |
| 378 | + matches_target = True |
| 379 | + |
| 380 | + for m in test.get("ignore-targets", []): |
| 381 | + if re.match(m, target_triple): |
| 382 | + matches_target = False |
| 383 | + |
| 384 | + if not matches_target: |
| 385 | + print(f"ignoring rule (target doesn't match): {name}") |
| 386 | + continue |
| 387 | + |
| 388 | + python_minimum_version = test.get("minimum-python-version", "1.0") |
| 389 | + python_maximum_version = test.get("maximum-python-version", "100.0") |
| 390 | + |
| 391 | + if python_version_compare(python_version, python_minimum_version) < 0: |
| 392 | + print( |
| 393 | + f"ignoring rule ({python_version} < {python_minimum_version}): {name}" |
| 394 | + ) |
| 395 | + continue |
| 396 | + |
| 397 | + if python_version_compare(python_version, python_maximum_version) > 0: |
| 398 | + print( |
| 399 | + f"ignoring rule ({python_version} > {python_maximum_version}): {name}" |
| 400 | + ) |
| 401 | + continue |
| 402 | + |
| 403 | + if option := test.get("build-option"): |
| 404 | + if option not in build_options_set: |
| 405 | + print(f"ignoring rule (build option {option} not present): {name}") |
| 406 | + continue |
| 407 | + |
| 408 | + if option := test.get("no-build-option"): |
| 409 | + if option in build_options_set: |
| 410 | + print(f"ignoring rule (build option {option} is present): {name}") |
| 411 | + continue |
| 412 | + |
| 413 | + print(f"expected test failure {name}: {test['reason']}") |
| 414 | + expect_failures.append(name) |
| 415 | + |
| 416 | + if test.get("exclude"): |
| 417 | + excludes.append(name) |
| 418 | + |
| 419 | + if test.get("dont-verify"): |
| 420 | + dont_verify.add(name) |
| 421 | + |
| 422 | + sys.stdout.flush() |
| 423 | + |
| 424 | + # Spawn the test harness as its own process. Otherwise it gets confused. |
| 425 | + base_args = [ |
| 426 | + sys.executable, |
| 427 | + "-u", |
| 428 | + "-W", |
| 429 | + "default", |
| 430 | + "-bb", |
| 431 | + "-E", |
| 432 | + "-m", |
| 433 | + "test", |
| 434 | + ] |
| 435 | + |
| 436 | + args = list(base_args) |
| 437 | + |
| 438 | + args.extend( |
| 439 | + [ |
| 440 | + "-w", # Re-run failed tests in verbose mode to aid debugging. |
| 441 | + "-j", |
| 442 | + "0", # Run tests in parallel using all available CPUs. |
| 443 | + ] |
| 444 | + ) |
| 445 | + |
| 446 | + for t in excludes: |
| 447 | + args.extend(["--exclude", t]) |
| 448 | + for i in expect_failures: |
| 449 | + args.extend(["--ignore", i]) |
| 450 | + |
| 451 | + code = subprocess.run(args).returncode |
| 452 | + if code != 0: |
| 453 | + print("main test harness failed") |
| 454 | + sys.stdout.flush() |
| 455 | + |
| 456 | + # Run ignored / expected failures tests and verify they actually fail. |
| 457 | + for i, t in enumerate(expect_failures): |
| 458 | + args = list(base_args) |
| 459 | + |
| 460 | + args.append("-v") |
| 461 | + |
| 462 | + # Always provide the test_* name as a positional argument to limit |
| 463 | + # which tests are loaded. Otherwise we load all test files and incur |
| 464 | + # substantial overhead. |
| 465 | + parts = t.split(".") |
| 466 | + if parts[0] == "test": |
| 467 | + args.extend(["-m", t, parts[1]]) |
| 468 | + else: |
| 469 | + args.append(t) |
| 470 | + |
| 471 | + print(f"[{i + 1}/{len(expect_failures)}] verifying {t} fails... ", end="") |
| 472 | + res = subprocess.run(args, capture_output=True) |
| 473 | + |
| 474 | + unexpected = False |
| 475 | + if res.returncode != 0: |
| 476 | + status = "yes" |
| 477 | + elif t in dont_verify: |
| 478 | + status = "no (ignored)" |
| 479 | + else: |
| 480 | + status = "no (unexpected pass)" |
| 481 | + unexpected = True |
| 482 | + |
| 483 | + print(status) |
| 484 | + |
| 485 | + if unexpected: |
| 486 | + code = 1 |
| 487 | + print(res.stdout.decode("utf-8", errors="ignore")) |
| 488 | + |
| 489 | + sys.stdout.flush() |
| 490 | + |
| 491 | + return code |
| 492 | + |
| 493 | + |
321 | 494 | if __name__ == "__main__": |
322 | | - unittest.main() |
| 495 | + res = [run_us(), run_stdlib()] |
| 496 | + for code in res: |
| 497 | + if code: |
| 498 | + sys.exit(1) |
0 commit comments