Skip to content

Commit 0b07147

Browse files
Bump pytest version to 9 (#420)
Ref: https://docs.pytest.org/en/stable/changelog.html#pytest-9-0-0-2025-11-05 --------- Co-authored-by: Adrien <avannson@buf.build>
1 parent 5c93556 commit 0b07147

3 files changed

Lines changed: 79 additions & 76 deletions

File tree

pyproject.toml

Lines changed: 50 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,17 @@ classifiers = [
2222
]
2323
dynamic = ["version"]
2424
dependencies = [
25-
"protobuf>=5",
26-
"cel-python==0.2.*",
27-
# We need at least this version, which started publishing wheels for Python 3.14.
28-
# Ref: https://github.com/google/re2/issues/580
29-
"google-re2>=1.1.20251105; python_version == '3.14'",
30-
# We need at least this version, which started publishing wheels for Python 3.13.
31-
# Ref: https://github.com/google/re2/issues/516
32-
"google-re2>=1.1.20250722; python_version == '3.13'",
33-
# 1.1 started supporting 3.12.
34-
"google-re2>=1.1; python_version == '3.12'",
35-
"google-re2>=1",
25+
"protobuf>=5",
26+
"cel-python==0.2.*",
27+
# We need at least this version, which started publishing wheels for Python 3.14.
28+
# Ref: https://github.com/google/re2/issues/580
29+
"google-re2>=1.1.20251105; python_version == '3.14'",
30+
# We need at least this version, which started publishing wheels for Python 3.13.
31+
# Ref: https://github.com/google/re2/issues/516
32+
"google-re2>=1.1.20250722; python_version == '3.13'",
33+
# 1.1 started supporting 3.12.
34+
"google-re2>=1.1; python_version == '3.12'",
35+
"google-re2>=1",
3636
]
3737

3838
[project.urls]
@@ -42,11 +42,11 @@ Issues = "https://github.com/bufbuild/protovalidate-python/issues"
4242

4343
[dependency-groups]
4444
dev = [
45-
"google-re2-stubs>=0.1.1",
46-
"mypy>=1.17.1",
47-
"pytest>=8.4.1",
48-
"ruff>=0.12.0",
49-
"types-protobuf>=5.29.1.20250315",
45+
"google-re2-stubs>=0.1.1",
46+
"mypy>=1.17.1",
47+
"pytest>=9.0.2",
48+
"ruff>=0.12.0",
49+
"types-protobuf>=5.29.1.20250315",
5050
]
5151

5252
[tool.uv]
@@ -59,40 +59,40 @@ raw-options = { fallback_version = "0.0.0" }
5959
[tool.ruff]
6060
line-length = 120
6161
lint.select = [
62-
"A",
63-
"ARG",
64-
"B",
65-
"C",
66-
"DTZ",
67-
"E",
68-
"EM",
69-
"F",
70-
"FBT",
71-
"I",
72-
"ICN",
73-
"N",
74-
"PLC",
75-
"PLE",
76-
"PLR",
77-
"PLW",
78-
"Q",
79-
"RUF",
80-
"S",
81-
"T",
82-
"TID",
83-
"UP",
84-
"W",
85-
"YTT",
62+
"A",
63+
"ARG",
64+
"B",
65+
"C",
66+
"DTZ",
67+
"E",
68+
"EM",
69+
"F",
70+
"FBT",
71+
"I",
72+
"ICN",
73+
"N",
74+
"PLC",
75+
"PLE",
76+
"PLR",
77+
"PLW",
78+
"Q",
79+
"RUF",
80+
"S",
81+
"T",
82+
"TID",
83+
"UP",
84+
"W",
85+
"YTT",
8686
]
8787
lint.ignore = [
88-
# Ignore complexity
89-
"C901",
90-
"PLR0911",
91-
"PLR0912",
92-
"PLR0913",
93-
"PLR0915",
94-
# Ignore magic values - in this library, most are obvious in context.
95-
"PLR2004",
88+
# Ignore complexity
89+
"C901",
90+
"PLR0911",
91+
"PLR0912",
92+
"PLR0913",
93+
"PLR0915",
94+
# Ignore magic values - in this library, most are obvious in context.
95+
"PLR2004",
9696
]
9797

9898
[tool.ruff.lint.isort]
@@ -105,7 +105,8 @@ ban-relative-imports = "all"
105105
# Tests can use assertions.
106106
"test/**/*" = ["S101"]
107107

108-
[tool.pytest.ini_options]
108+
[tool.pytest]
109+
strict = true
109110
# restrict testpaths to speed up test discovery
110111
testpaths = ["test"]
111112

test/test_format.py

Lines changed: 28 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -101,34 +101,36 @@ def get_eval_error_message(test: simple_pb2.SimpleTest) -> str | None:
101101
env = celpy.Environment(runner_class=InterpretedRunner)
102102

103103

104-
@pytest.mark.parametrize("format_test", _format_tests)
105-
def test_format_successes(format_test):
104+
def test_format_successes(subtests: pytest.Subtests):
106105
"""Tests success scenarios for string.format"""
107-
if format_test.name in skipped_tests:
108-
pytest.skip(f"skipped test: {format_test.name}")
109-
ast = env.compile(format_test.expr)
110-
prog = env.program(ast, functions=extra_func.make_extra_funcs())
106+
for format_test in _format_tests:
107+
with subtests.test(msg=format_test.name):
108+
if format_test.name in skipped_tests:
109+
pytest.skip(f"skipped test: {format_test.name}")
110+
ast = env.compile(format_test.expr)
111+
prog = env.program(ast, functions=extra_func.make_extra_funcs())
111112

112-
bindings = build_variables(format_test.bindings)
113-
result = prog.evaluate(bindings)
114-
expected = get_expected_result(format_test)
115-
assert expected is not None, f"[{format_test.name}]: expected a success result to be defined"
116-
assert result == expected
113+
bindings = build_variables(format_test.bindings)
114+
result = prog.evaluate(bindings)
115+
expected = get_expected_result(format_test)
116+
assert expected is not None, f"[{format_test.name}]: expected a success result to be defined"
117+
assert result == expected
117118

118119

119-
@pytest.mark.parametrize("format_error_test", _format_error_tests)
120-
def test_format_errors(format_error_test):
120+
def test_format_errors(subtests: pytest.Subtests):
121121
"""Tests error scenarios for string.format"""
122-
if format_error_test.name in skipped_error_tests:
123-
pytest.skip(f"skipped test: {format_error_test.name}")
124-
ast = env.compile(format_error_test.expr)
125-
prog = env.program(ast, functions=extra_func.make_extra_funcs())
126-
127-
bindings = build_variables(format_error_test.bindings)
128-
try:
129-
prog.evaluate(bindings)
130-
pytest.fail(f"[{format_error_test.name}]: expected an error to be raised during evaluation")
131-
except celpy.CELEvalError as e:
132-
msg = get_eval_error_message(format_error_test)
133-
assert msg is not None, f"[{format_error_test.name}]: expected an eval error to be defined"
134-
assert str(e) == msg
122+
for format_error_test in _format_error_tests:
123+
with subtests.test(msg=format_error_test.name):
124+
if format_error_test.name in skipped_error_tests:
125+
pytest.skip(f"skipped test: {format_error_test.name}")
126+
ast = env.compile(format_error_test.expr)
127+
prog = env.program(ast, functions=extra_func.make_extra_funcs())
128+
129+
bindings = build_variables(format_error_test.bindings)
130+
try:
131+
prog.evaluate(bindings)
132+
pytest.fail(f"[{format_error_test.name}]: expected an error to be raised during evaluation")
133+
except celpy.CELEvalError as e:
134+
msg = get_eval_error_message(format_error_test)
135+
assert msg is not None, f"[{format_error_test.name}]: expected an eval error to be defined"
136+
assert str(e) == msg

uv.lock

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)