forked from microsoft/vscode-python
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtest_execution.py
More file actions
474 lines (419 loc) · 16.1 KB
/
test_execution.py
File metadata and controls
474 lines (419 loc) · 16.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import pathlib
import sys
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from unittest.mock import patch
import pytest
sys.path.append(os.fspath(pathlib.Path(__file__).parent))
python_files_path = pathlib.Path(__file__).parent.parent.parent
sys.path.insert(0, os.fspath(python_files_path))
sys.path.insert(0, os.fspath(python_files_path / "lib" / "python"))
from tests.pytestadapter import helpers # noqa: E402
from unittestadapter.execution import run_tests # noqa: E402
if TYPE_CHECKING:
from unittestadapter.pvsc_utils import ExecutionPayloadDict
TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data"
def test_no_ids_run() -> None:
"""This test runs on an empty array of test_ids, therefore it should return an empty dict for the result."""
start_dir: str = os.fspath(TEST_DATA_PATH)
testids = []
pattern = "discovery_simple*"
actual = run_tests(start_dir, testids, pattern, None, 1, None)
assert actual
assert all(item in actual for item in ("cwd", "status"))
assert actual["status"] == "success"
assert actual["cwd"] == os.fspath(TEST_DATA_PATH)
if actual["result"] is not None:
assert len(actual["result"]) == 0
else:
raise AssertionError("actual['result'] is None")
@pytest.fixture
def mock_send_run_data():
with patch("unittestadapter.execution.send_run_data") as mock:
yield mock
def test_single_ids_run(mock_send_run_data):
"""This test runs on a single test_id, therefore it should return a dict with a single key-value pair for the result.
This single test passes so the outcome should be 'success'.
"""
id_ = "discovery_simple.DiscoverySimple.test_one"
os.environ["TEST_RUN_PIPE"] = "fake"
actual: ExecutionPayloadDict = run_tests(
os.fspath(TEST_DATA_PATH),
[id_],
"discovery_simple*",
None,
1,
None,
)
# Access the arguments
args, _ = mock_send_run_data.call_args
test_actual = args[0] # first argument is the result
assert test_actual
actual_result: Optional[Dict[str, Dict[str, Optional[str]]]] = actual["result"]
if actual_result is None:
raise AssertionError("actual_result is None")
else:
if not isinstance(actual_result, Dict):
raise AssertionError("actual_result is not a Dict")
assert len(actual_result) == 1
assert id_ in actual_result
id_result = actual_result[id_]
assert id_result is not None
assert "outcome" in id_result
assert id_result["outcome"] == "success"
def test_subtest_run(mock_send_run_data) -> None: # noqa: ARG001
"""This test runs on a the test_subtest which has a single method, test_even, that uses unittest subtest.
The actual result of run should return a dict payload with 6 entry for the 6 subtests.
"""
id_ = "test_subtest.NumbersTest.test_even"
os.environ["TEST_RUN_PIPE"] = "fake"
actual = run_tests(
os.fspath(TEST_DATA_PATH),
[id_],
"test_subtest.py",
None,
1,
None,
)
subtests_ids = [
"test_subtest.NumbersTest.test_even (i=0)",
"test_subtest.NumbersTest.test_even (i=1)",
"test_subtest.NumbersTest.test_even (i=2)",
"test_subtest.NumbersTest.test_even (i=3)",
"test_subtest.NumbersTest.test_even (i=4)",
"test_subtest.NumbersTest.test_even (i=5)",
]
assert actual
assert all(item in actual for item in ("cwd", "status"))
assert actual["status"] == "success"
assert actual["cwd"] == os.fspath(TEST_DATA_PATH)
assert actual["result"] is not None
result = actual["result"]
assert len(result) == 6
for id_ in subtests_ids:
assert id_ in result
@pytest.mark.parametrize(
("test_ids", "pattern", "cwd", "expected_outcome"),
[
(
[
"test_add.TestAddFunction.test_add_negative_numbers",
"test_add.TestAddFunction.test_add_positive_numbers",
],
"test_add.py",
os.fspath(TEST_DATA_PATH / "unittest_folder"),
"success",
),
(
[
"test_add.TestAddFunction.test_add_negative_numbers",
"test_add.TestAddFunction.test_add_positive_numbers",
"test_subtract.TestSubtractFunction.test_subtract_negative_numbers",
"test_subtract.TestSubtractFunction.test_subtract_positive_numbers",
],
"test*",
os.fspath(TEST_DATA_PATH / "unittest_folder"),
"success",
),
(
[
"pattern_a_test.DiscoveryA.test_one_a",
"pattern_a_test.DiscoveryA.test_two_a",
],
"*test.py",
os.fspath(TEST_DATA_PATH / "two_patterns"),
"success",
),
(
[
"test_pattern_b.DiscoveryB.test_one_b",
"test_pattern_b.DiscoveryB.test_two_b",
],
"test_*",
os.fspath(TEST_DATA_PATH / "two_patterns"),
"success",
),
(
[
"file_one.CaseTwoFileOne.test_one",
"file_one.CaseTwoFileOne.test_two",
"folder.file_two.CaseTwoFileTwo.test_one",
"folder.file_two.CaseTwoFileTwo.test_two",
],
"*",
os.fspath(TEST_DATA_PATH / "utils_nested_cases"),
"success",
),
(
[
"test_two_classes.ClassOne.test_one",
"test_two_classes.ClassTwo.test_two",
],
"test_two_classes.py",
os.fspath(TEST_DATA_PATH),
"success",
),
(
[
"test_scene.TestMathOperations.test_operations(add)",
"test_scene.TestMathOperations.test_operations(subtract)",
"test_scene.TestMathOperations.test_operations(multiply)",
],
"*",
os.fspath(TEST_DATA_PATH / "test_scenarios" / "tests"),
"success",
),
],
)
def test_multiple_ids_run(mock_send_run_data, test_ids, pattern, cwd, expected_outcome) -> None: # noqa: ARG001
"""
The following are all successful tests of different formats.
# 1. Two tests with the `pattern` specified as a file
# 2. Two test files in the same folder called `unittest_folder`
# 3. A folder with two different test file patterns, this test gathers pattern `*test`
# 4. A folder with two different test file patterns, this test gathers pattern `test_*`
# 5. A nested structure where a test file is on the same level as a folder containing a test file
# 6. Test file with two test classes
All tests should have the outcome of `success`.
"""
os.environ["TEST_RUN_PIPE"] = "fake"
actual = run_tests(cwd, test_ids, pattern, None, 1, None)
assert actual
assert all(item in actual for item in ("cwd", "status"))
assert actual["status"] == "success"
assert actual["cwd"] == cwd
assert actual["result"] is not None
result = actual["result"]
assert len(result) == len(test_ids)
for test_id in test_ids:
assert test_id in result
id_result = result[test_id]
assert id_result is not None
assert "outcome" in id_result
assert id_result["outcome"] == expected_outcome
assert True
def test_failed_tests(mock_send_run_data): # noqa: ARG001
"""This test runs on a single file `test_fail` with two tests that fail."""
os.environ["TEST_RUN_PIPE"] = "fake"
test_ids = [
"test_fail_simple.RunFailSimple.test_one_fail",
"test_fail_simple.RunFailSimple.test_two_fail",
]
actual = run_tests(
os.fspath(TEST_DATA_PATH),
test_ids,
"test_fail_simple*",
None,
1,
None,
)
assert actual
assert all(item in actual for item in ("cwd", "status"))
assert actual["status"] == "success"
assert actual["cwd"] == os.fspath(TEST_DATA_PATH)
assert actual["result"] is not None
result = actual["result"]
assert len(result) == len(test_ids)
for test_id in test_ids:
assert test_id in result
id_result = result[test_id]
assert id_result is not None
assert "outcome" in id_result
assert id_result["outcome"] == "failure"
assert "message" in id_result
assert "traceback" in id_result
assert "2 not greater than 3" in str(id_result["message"]) or "1 == 1" in str(
id_result["traceback"]
)
assert True
def test_unknown_id(mock_send_run_data): # noqa: ARG001
"""This test runs on a unknown test_id, therefore it should return an error as the outcome as it attempts to find the given test."""
os.environ["TEST_RUN_PIPE"] = "fake"
test_ids = ["unknown_id"]
actual = run_tests(
os.fspath(TEST_DATA_PATH),
test_ids,
"test_fail_simple*",
None,
1,
None,
)
assert actual
assert all(item in actual for item in ("cwd", "status"))
assert actual["status"] == "success"
assert actual["cwd"] == os.fspath(TEST_DATA_PATH)
assert actual["result"] is not None
result = actual["result"]
assert len(result) == len(test_ids)
assert "unittest.loader._FailedTest.unknown_id" in result
id_result = result["unittest.loader._FailedTest.unknown_id"]
assert id_result is not None
assert "outcome" in id_result
assert id_result["outcome"] == "error"
assert "message" in id_result
assert "traceback" in id_result
def test_incorrect_path():
"""This test runs on a non existent path, therefore it should return an error as the outcome as it attempts to find the given folder."""
test_ids = ["unknown_id"]
os.environ["TEST_RUN_PIPE"] = "fake"
actual = run_tests(
os.fspath(TEST_DATA_PATH / "unknown_folder"),
test_ids,
"test_fail_simple*",
None,
1,
None,
)
assert actual
assert all(item in actual for item in ("cwd", "status", "error"))
assert actual["status"] == "error"
assert actual["cwd"] == os.fspath(TEST_DATA_PATH / "unknown_folder")
def test_basic_run_django():
"""This test runs on a simple django project with three tests, two of which pass and one that fails."""
data_path: pathlib.Path = TEST_DATA_PATH / "simple_django"
manage_py_path: str = os.fsdecode(data_path / "manage.py")
execution_script: pathlib.Path = (
pathlib.Path(__file__).parent / "django_test_execution_script.py"
)
test_ids = [
"polls.tests.QuestionModelTests.test_was_published_recently_with_future_question",
"polls.tests.QuestionModelTests.test_was_published_recently_with_future_question_2",
"polls.tests.QuestionModelTests.test_question_creation_and_retrieval",
]
script_str = os.fsdecode(execution_script)
actual = helpers.runner_with_cwd_env(
[script_str, manage_py_path, *test_ids],
data_path,
{"MANAGE_PY_PATH": manage_py_path},
)
assert actual
actual_list: List[Dict[str, Dict[str, Any]]] = actual
actual_result_dict = {}
assert len(actual_list) == 3
for actual_item in actual_list:
assert all(item in actual_item for item in ("status", "cwd", "result"))
assert actual_item.get("cwd") == os.fspath(data_path)
actual_result_dict.update(actual_item["result"])
for test_id in test_ids:
assert test_id in actual_result_dict
id_result = actual_result_dict[test_id]
assert id_result is not None
assert "outcome" in id_result
if (
test_id
== "polls.tests.QuestionModelTests.test_was_published_recently_with_future_question_2"
):
assert id_result["outcome"] == "failure"
else:
assert id_result["outcome"] == "success"
def test_project_root_path_with_cwd_override(mock_send_run_data) -> None: # noqa: ARG001
"""Test unittest execution with cwd_override parameter.
This simulates project-based testing where the cwd in the payload should be
the project root (cwd_override) rather than the start_dir.
When cwd_override is provided:
- The cwd in the response should match cwd_override
- Test execution should still work correctly with start_dir
"""
# Use unittest_folder as our "project" directory
project_path = TEST_DATA_PATH / "unittest_folder"
start_dir = os.fsdecode(project_path)
pattern = "test_add*"
test_ids = [
"test_add.TestAddFunction.test_add_positive_numbers",
]
os.environ["TEST_RUN_PIPE"] = "fake"
# Call run_tests with cwd_override to simulate PROJECT_ROOT_PATH
actual = run_tests(
start_dir,
test_ids,
pattern,
None,
1,
None,
cwd_override=start_dir,
)
assert actual["status"] == "success"
# cwd in response should match the cwd_override (project root)
assert actual["cwd"] == os.fsdecode(project_path), (
f"Expected cwd '{os.fsdecode(project_path)}', got '{actual['cwd']}'"
)
assert actual["result"] is not None
assert test_ids[0] in actual["result"]
assert actual["result"][test_ids[0]]["outcome"] == "success"
def test_project_root_path_with_different_cwd_and_start_dir() -> None:
"""Test unittest execution where cwd_override differs from start_dir.
This simulates the scenario where:
- start_dir points to a subfolder where tests are located
- cwd_override (PROJECT_ROOT_PATH) points to the project root
The cwd in the response should be the project root, while execution
still runs from the start_dir.
"""
# Use utils_nested_cases as our test case
project_path = TEST_DATA_PATH / "utils_nested_cases"
start_dir = os.fsdecode(project_path)
pattern = "*"
test_ids = [
"file_one.CaseTwoFileOne.test_one",
]
os.environ["TEST_RUN_PIPE"] = "fake"
# Call run_tests with cwd_override set to project root
actual = run_tests(
start_dir,
test_ids,
pattern,
None,
1,
None,
cwd_override=os.fsdecode(project_path),
)
assert actual["status"] == "success"
# cwd should be the project root (cwd_override)
assert actual["cwd"] == os.fsdecode(project_path), (
f"Expected cwd '{os.fsdecode(project_path)}', got '{actual['cwd']}'"
)
assert actual["result"] is not None
assert test_ids[0] in actual["result"]
@pytest.mark.skipif(
sys.platform == "win32",
reason="Symlinks require elevated privileges on Windows",
)
def test_symlink_with_project_root_path(mock_send_run_data) -> None: # noqa: ARG001
"""Test unittest execution with both symlink and cwd_override set.
This tests the combination of:
1. A symlinked test directory
2. cwd_override (PROJECT_ROOT_PATH) set to the symlink path
This simulates project-based testing where the project root is a symlink,
ensuring execution payloads correctly use the symlink path.
"""
with helpers.create_symlink(TEST_DATA_PATH, "unittest_folder", "symlink_unittest_exec") as (
_source,
destination,
):
assert destination.is_symlink()
# Run execution with:
# - start_dir pointing to the symlink destination
# - cwd_override set to the symlink destination (simulating PROJECT_ROOT_PATH)
start_dir = os.fsdecode(destination)
pattern = "test_add*"
test_ids = [
"test_add.TestAddFunction.test_add_positive_numbers",
]
os.environ["TEST_RUN_PIPE"] = "fake"
actual = run_tests(
start_dir,
test_ids,
pattern,
None,
1,
None,
cwd_override=start_dir,
)
assert actual["status"] == "success", (
f"Status is not 'success', error is: {actual.get('error')}"
)
# cwd should be the symlink path (cwd_override)
assert actual["cwd"] == os.fsdecode(destination), (
f"CWD does not match symlink path: expected {os.fsdecode(destination)}, got {actual['cwd']}"
)