Skip to content

Commit 3776823

Browse files
committed
verify: run stdlib tests
1 parent 5207d8b commit 3776823

5 files changed

Lines changed: 1400 additions & 1 deletion

File tree

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ rustls = { version = "0.23" , default-features = false, features = ["aws_lc_rs"]
3030
semver = "1.0.27"
3131
serde = { version = "1.0.228", features = ["derive"] }
3232
serde_json = "1.0.145"
33+
serde_yaml = "0.9.33"
3334
sha2 = "0.10.9"
3435
tar = "0.4.44"
3536
tempfile = "3.23.0"

src/validation.rs

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -855,6 +855,7 @@ const SHARED_LIBRARY_EXTENSIONS: &[&str] = &[
855855
];
856856

857857
const PYTHON_VERIFICATIONS: &str = include_str!("verify_distribution.py");
858+
const TEST_ANNOTATIONS: &str = include_str!("../stdlib-test-annotations.yml");
858859

859860
fn allowed_dylibs_for_triple(triple: &str) -> Vec<MachOAllowedDylib> {
860861
match triple {
@@ -2171,6 +2172,12 @@ fn verify_distribution_behavior(dist_path: &Path) -> Result<Vec<String>> {
21712172
let test_file = temp_dir.path().join("verify.py");
21722173
std::fs::write(&test_file, PYTHON_VERIFICATIONS.as_bytes())?;
21732174

2175+
// Normalize annotations to JSON so the Python stdlib can read it.
2176+
let annotations_json: serde_json::Value = serde_yaml::from_str(TEST_ANNOTATIONS)?;
2177+
let annotations_pretty = serde_json::to_vec_pretty(&annotations_json)?;
2178+
let test_annotations_file = temp_dir.path().join("test-annotations.json");
2179+
std::fs::write(&test_annotations_file, annotations_pretty)?;
2180+
21742181
eprintln!(" running interpreter tests (output should follow)");
21752182
let output = duct::cmd(&python_exe, [test_file.display().to_string()])
21762183
.stdout_to_stderr()

src/verify_distribution.py

Lines changed: 177 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,9 @@
33
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
44

55
import importlib.machinery
6+
import json
67
import os
8+
import re
79
import struct
810
import subprocess
911
import sys
@@ -318,5 +320,179 @@ def assertPythonWorks(path: Path, argv0: str = None):
318320
assertPythonWorks(sys.executable, argv0="/dev/null")
319321

320322

323+
def run_us() -> int:
324+
t = unittest.TestProgram(exit=False)
325+
if not t.result.wasSuccessful():
326+
return 1
327+
elif t.result.testsRun == 0:
328+
return 1
329+
else:
330+
return 0
331+
332+
333+
def load_test_annotations():
334+
here = Path(__file__).parent
335+
p = here / "test-annotations.json"
336+
with p.open("rb") as fh:
337+
return json.load(fh)
338+
339+
340+
def python_version_compare(a: str, b: str) -> int:
341+
a_parts = a.split(".")
342+
b_parts = b.split(".")
343+
344+
a_ver = (int(a_parts[0]), int(a_parts[1]))
345+
b_ver = (int(b_parts[0]), int(b_parts[1]))
346+
347+
if a_ver < b_ver:
348+
return -1
349+
elif a_ver == b_ver:
350+
return 0
351+
else:
352+
return 1
353+
354+
355+
def run_stdlib() -> int:
356+
target_triple = os.environ["TARGET_TRIPLE"]
357+
build_options = os.environ["BUILD_OPTIONS"]
358+
build_options_set = set(build_options.split("+"))
359+
python_version = f"{sys.version_info.major}.{sys.version_info.minor}"
360+
361+
# musl debug builds fail to run the test harness for some reason. Skip them.
362+
if target_triple.endswith("-unknown-linux-musl") and "debug" in build_options_set:
363+
print("stdlib tests being skipped for musl debug build because it doesn't work")
364+
return 0
365+
366+
annotations = load_test_annotations()
367+
368+
expect_failures = []
369+
excludes = []
370+
dont_verify = set()
371+
372+
for test in annotations["expected-failures"]:
373+
name = test["name"]
374+
375+
if targets := test.get("targets"):
376+
matches_target = any(re.match(p, target_triple) for p in targets)
377+
else:
378+
matches_target = True
379+
380+
for m in test.get("ignore-targets", []):
381+
if re.match(m, target_triple):
382+
matches_target = False
383+
384+
if not matches_target:
385+
print(f"ignoring rule (target doesn't match): {name}")
386+
continue
387+
388+
python_minimum_version = test.get("minimum-python-version", "1.0")
389+
python_maximum_version = test.get("maximum-python-version", "100.0")
390+
391+
if python_version_compare(python_version, python_minimum_version) < 0:
392+
print(
393+
f"ignoring rule ({python_version} < {python_minimum_version}): {name}"
394+
)
395+
continue
396+
397+
if python_version_compare(python_version, python_maximum_version) > 0:
398+
print(
399+
f"ignoring rule ({python_version} > {python_maximum_version}): {name}"
400+
)
401+
continue
402+
403+
if option := test.get("build-option"):
404+
if option not in build_options_set:
405+
print(f"ignoring rule (build option {option} not present): {name}")
406+
continue
407+
408+
if option := test.get("no-build-option"):
409+
if option in build_options_set:
410+
print(f"ignoring rule (build option {option} is present): {name}")
411+
continue
412+
413+
print(f"expected test failure {name}: {test['reason']}")
414+
expect_failures.append(name)
415+
416+
if test.get("exclude"):
417+
excludes.append(name)
418+
419+
if test.get("dont-verify"):
420+
dont_verify.add(name)
421+
422+
sys.stdout.flush()
423+
424+
# Spawn the test harness as its own process. Otherwise it gets confused.
425+
base_args = [
426+
sys.executable,
427+
"-u",
428+
"-W",
429+
"default",
430+
"-bb",
431+
"-E",
432+
"-m",
433+
"test",
434+
]
435+
436+
args = list(base_args)
437+
438+
args.extend(
439+
[
440+
"-w", # Re-run failed tests in verbose mode to aid debugging.
441+
"-j",
442+
"0", # Run tests in parallel using all available CPUs.
443+
]
444+
)
445+
446+
for t in excludes:
447+
args.extend(["--exclude", t])
448+
for i in expect_failures:
449+
args.extend(["--ignore", i])
450+
451+
code = subprocess.run(args).returncode
452+
if code != 0:
453+
print("main test harness failed")
454+
sys.stdout.flush()
455+
456+
# Run ignored / expected failures tests and verify they actually fail.
457+
for i, t in enumerate(expect_failures):
458+
args = list(base_args)
459+
460+
args.append("-v")
461+
462+
# Always provide the test_* name as a positional argument to limit
463+
# which tests are loaded. Otherwise we load all test files and incur
464+
# substantial overhead.
465+
parts = t.split(".")
466+
if parts[0] == "test":
467+
args.extend(["-m", t, parts[1]])
468+
else:
469+
args.append(t)
470+
471+
print(f"[{i + 1}/{len(expect_failures)}] verifying {t} fails... ", end="")
472+
res = subprocess.run(args, capture_output=True)
473+
474+
unexpected = False
475+
if res.returncode != 0:
476+
status = "yes"
477+
elif t in dont_verify:
478+
status = "no (ignored)"
479+
else:
480+
status = "no (unexpected pass)"
481+
unexpected = True
482+
483+
print(status)
484+
485+
if unexpected:
486+
code = 1
487+
print(res.stdout.decode("utf-8", errors="ignore"))
488+
489+
sys.stdout.flush()
490+
491+
return code
492+
493+
321494
if __name__ == "__main__":
322-
unittest.main()
495+
res = [run_us(), run_stdlib()]
496+
for code in res:
497+
if code:
498+
sys.exit(1)

0 commit comments

Comments
 (0)