Skip to content

Commit a2fc9ad

Browse files
Detect skipped individual test cases, add basic unit test of test parser
1 parent 89fe2b4 commit a2fc9ad

File tree

12 files changed

+14060
-17144
lines changed

12 files changed

+14060
-17144
lines changed

Test-Result-Evaluator/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
venv
22
generated_site
3+
generated-site
34
mbed_tests.db

Test-Result-Evaluator/.idea/Test-Result-Evaluator.iml

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Test-Result-Evaluator/demo-test-configs/ci-shield-tests-SFE_ARTEMIS.xml

Lines changed: 1731 additions & 0 deletions
Large diffs are not rendered by default.

Test-Result-Evaluator/demo-test-configs/mbed-tests-SFE_ARTEMIS.xml

Lines changed: 11863 additions & 0 deletions
Large diffs are not rendered by default.

Test-Result-Evaluator/demo-test-configs/results.xml

Lines changed: 0 additions & 17025 deletions
This file was deleted.
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
junitparser~=3.0.0
22
graphviz~=0.20
3-
-r ../CI-Shield-Tests/mbed-os/tools/requirements.txt
3+
pytest

Test-Result-Evaluator/test_result_evaluator/mbed_test_database.py

Lines changed: 77 additions & 54 deletions
Large diffs are not rendered by default.

Test-Result-Evaluator/test_result_evaluator/result_page_generator.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -411,10 +411,11 @@ def generate_test_page(database: MbedTestDatabase, test_name: str, out_path: pat
411411
elif target_test_results[target] == TestResult.PRIOR_TEST_CASE_CRASHED:
412412
row_content.append('<div class="prior-crashed-marker">Prior Case Crashed</div>')
413413
else: # skipped
414-
row_content.append('<div class="skipped-marker">Skipped</div>')
414+
row_content.append(
415+
f'<div class="skipped-marker"><a href="{str(get_test_case_run_path(test_name, test_case_name, target))}">Skipped</a></div>')
415416
else:
416417
# Test case does not exist for this target, e.g. due to an ifdef
417-
row_content.append('<div class="skipped-marker">Skipped</div>')
418+
row_content.append('<div class="skipped-marker">Not Run</div>')
418419
test_table.add_row(row_content)
419420

420421
# Write the table to the page.
@@ -493,7 +494,7 @@ def generate_tests_and_targets_website(database: MbedTestDatabase, gen_path: pat
493494

494495
for test_case_name in test_details.keys():
495496
for target_name, result in test_details[test_case_name].items():
496-
if result == TestResult.PASSED or result == TestResult.FAILED:
497+
if result != TestResult.PRIOR_TEST_CASE_CRASHED:
497498
run_path = tests_dir / get_test_case_run_path(test_name, test_case_name, target_name)
498499
run_path.parent.mkdir(exist_ok=True, parents=True)
499500
generate_test_case_run_page(database, test_name, test_case_name, target_name, run_path)

Test-Result-Evaluator/test_result_evaluator/test_run_parser.py

Lines changed: 84 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,89 @@
2020
# Matches a test case which completes (either successfully or not) and allows extracting the output
2121
GREENTEA_TESTCASE_OUTPUT_RE = re.compile(r"(\{\{__testcase_start;[^|]+?}}.+?\{\{__testcase_finish;[^|]+?;(\d);\d}})", re.DOTALL)
2222

23+
# Matches if a test was marked as 'skipped' via TEST_SKIP() or TEST_SKIP_MESSAGE()
24+
TEST_SKIPPED_RE = re.compile(r"<greentea test suite>:[0-9]+::SKIP")
25+
26+
27+
def _parse_test_suite(database: mbed_test_database.MbedTestDatabase, mbed_target: str,
28+
test_suite_name: str, test_suite_output: str, test_suite_result: TestResult):
29+
"""
30+
Parse the output of one Greentea test suite and add it to the database.
31+
"""
32+
33+
# First use a regex to extract the list of test cases...
34+
test_case_names = re.findall(GREENTEA_TESTCASE_NAME_RE, test_suite_output)
35+
36+
# Next, we need some special handling for tests which reset. These tests print out the list of
37+
# test cases multiple times, which causes the previous operation to return duplicate results. Remove those
38+
# while preserving the test case order.
39+
test_case_names = list(dict.fromkeys(test_case_names))
40+
41+
test_case_records: List[Tuple[str, str]]
42+
43+
if len(test_case_names) > 0:
44+
# This is a "normal" test with test cases. Parse them.
45+
# Regex returns tuple of (output, passed/failed indicator)
46+
test_case_records = re.findall(GREENTEA_TESTCASE_OUTPUT_RE, test_suite_output)
47+
48+
if len(test_case_records) < len(test_case_names):
49+
# Did one test case crash the test?
50+
# See if we can find the start of this test case but no end.
51+
crashing_test_name = test_case_names[len(test_case_records)]
52+
crash_re = re.compile(r"\{\{__testcase_start;" + crashing_test_name + r"}}(.+?)teardown\(\) finished",
53+
re.DOTALL)
54+
test_case_crash_output = re.search(crash_re, test_suite_output)
55+
56+
if test_case_crash_output is not None:
57+
print(
58+
f"Note: Test case '{crashing_test_name}' in test {test_suite_name} for target {mbed_target} appears to have crashed and prevented {len(test_case_names) - len(test_case_records) - 1} subsequent tests from running")
59+
test_case_records.append((test_case_crash_output.group(0), "0"))
60+
else:
61+
# Otherwise the test simply didn't run the remaining test cases.
62+
pass
63+
64+
for test_case_idx, test_case_name in enumerate(test_case_names):
65+
66+
# If the test actually was run, save its output
67+
if test_case_idx < len(test_case_records):
68+
69+
test_case_output = test_case_records[test_case_idx][0]
70+
if len(re.findall(TEST_SKIPPED_RE, test_case_output)) > 0:
71+
result = TestResult.SKIPPED
72+
elif test_case_records[test_case_idx][1] == "1":
73+
result = TestResult.PASSED
74+
else:
75+
result = TestResult.FAILED
76+
77+
database.add_test_case_record(test_suite_name,
78+
test_case_name,
79+
test_case_idx,
80+
mbed_target,
81+
result,
82+
test_case_output)
83+
84+
# Otherwise, mark it as prior crashed
85+
else:
86+
database.add_test_case_record(test_suite_name,
87+
test_case_name,
88+
test_case_idx,
89+
mbed_target,
90+
TestResult.PRIOR_TEST_CASE_CRASHED,
91+
"")
92+
# However, there are some tests (e.g. test-mbed-drivers-dev-null) which don't use the greentea
93+
# system in a standard way and therefore can't be divided evenly into test cases. These tests need special
94+
# handling.
95+
else:
96+
# print(f"This test has non-standard output. Treating the entire test as one test case")
97+
98+
is_skipped = len(re.findall(TEST_SKIPPED_RE, test_suite_output)) > 0
99+
100+
database.add_test_case_record(test_suite_name,
101+
test_suite_name,
102+
0,
103+
mbed_target,
104+
TestResult.SKIPPED if is_skipped else test_suite_result,
105+
test_suite_output)
23106

24107
def parse_test_run(database: mbed_test_database.MbedTestDatabase, mbed_target: str, junit_xml_path: pathlib.Path):
25108
"""
@@ -45,65 +128,5 @@ def parse_test_run(database: mbed_test_database.MbedTestDatabase, mbed_target: s
45128
if test_suite_result != TestResult.SKIPPED:
46129
# Now things get a bit more complicated as we have to parse Greentea's output directly to determine
47130
# the list of tests.
48-
49-
# First use a regex to extract the list of test cases...
50-
test_case_names = re.findall(GREENTEA_TESTCASE_NAME_RE, test_report.system_out)
51-
52-
# Next, we need some special handling for tests which reset. These tests print out the list of
53-
# test cases multiple times, which causes the previous operation to return duplicate results. Remove those
54-
# while preserving the test case order.
55-
test_case_names = list(dict.fromkeys(test_case_names))
56-
57-
test_case_records: List[Tuple[str, str]]
58-
59-
if len(test_case_names) > 0:
60-
# This is a "normal" test with test cases. Parse them.
61-
# Regex returns tuple of (output, passed/failed indicator)
62-
test_case_records = re.findall(GREENTEA_TESTCASE_OUTPUT_RE, test_report.system_out)
63-
64-
if len(test_case_records) < len(test_case_names):
65-
# Did one test case crash the test?
66-
# See if we can find the start of this test case but no end.
67-
crashing_test_name = test_case_names[len(test_case_records)]
68-
crash_re = re.compile(r"\{\{__testcase_start;" + crashing_test_name + r"}}(.+?)teardown\(\) finished", re.DOTALL)
69-
test_case_crash_output = re.search(crash_re, test_report.system_out)
70-
71-
if test_case_crash_output is not None:
72-
print(f"Note: Test case '{crashing_test_name}' in test {test_report.classname} for target {mbed_target} appears to have crashed and prevented {len(test_case_names) - len(test_case_records) - 1} subsequent tests from running")
73-
test_case_records.append((test_case_crash_output.group(0), "0"))
74-
else:
75-
# Otherwise the test simply didn't run the remaining test cases.
76-
pass
77-
78-
for test_case_idx, test_case_name in enumerate(test_case_names):
79-
80-
# If the test actually was run, save its output
81-
if test_case_idx < len(test_case_records):
82-
database.add_test_case_record(test_report.classname,
83-
test_case_name,
84-
test_case_idx,
85-
mbed_target,
86-
TestResult.PASSED if test_case_records[test_case_idx][1] == "1" else TestResult.FAILED,
87-
test_case_records[test_case_idx][0])
88-
89-
# Otherwise, mark it as prior crashed
90-
else:
91-
database.add_test_case_record(test_report.classname,
92-
test_case_name,
93-
test_case_idx,
94-
mbed_target,
95-
TestResult.PRIOR_TEST_CASE_CRASHED,
96-
"")
97-
# However, there are some tests (e.g. test-mbed-drivers-dev-null) which don't use the greentea
98-
# system in a standard way and therefore can't be divided evenly into test cases. These tests need special
99-
# handling.
100-
else:
101-
#print(f"This test has non-standard output. Treating the entire test as one test case")
102-
database.add_test_case_record(test_report.classname,
103-
test_report.classname,
104-
0,
105-
mbed_target,
106-
test_suite_result,
107-
test_report.system_out)
108-
131+
_parse_test_suite(database, mbed_target, test_report.classname, test_report.system_out, test_suite_result)
109132

Test-Result-Evaluator/tests/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)