diff --git a/tools/test_xml_generator/BUILD b/tools/test_xml_generator/BUILD new file mode 100644 index 000000000..685838c12 --- /dev/null +++ b/tools/test_xml_generator/BUILD @@ -0,0 +1,25 @@ +load("@rules_python//python:py_binary.bzl", "py_binary") +load("@rules_python//python:py_library.bzl", "py_library") +load("@rules_python//python:py_test.bzl", "py_test") +load("//tools:binary_env.bzl", "binary_env") + +licenses(["notice"]) + +py_binary( + name = "generate_test_xml", + srcs = ["generate_test_xml.py"], + env = binary_env.common_python_utf8_env(), + python_version = "PY3", + srcs_version = "PY3", + visibility = ["//visibility:public"], +) + +py_library( + name = "generate_test_xml_lib", + srcs = [ + "generate_test_xml.py", + ], + srcs_version = "PY3", + visibility = ["//visibility:public"], +) + diff --git a/tools/test_xml_generator/README.md b/tools/test_xml_generator/README.md new file mode 100644 index 000000000..c86c06158 --- /dev/null +++ b/tools/test_xml_generator/README.md @@ -0,0 +1,191 @@ +# Test XML Generator + +This directory contains tools for generating proper JUnit XML output from iOS and macOS test runs. + +## Overview + +When running iOS/macOS unit tests with Bazel, the default test output may not always produce proper JUnit XML format that CI systems expect. This tool provides a post-action script that: + +1. Parses the test log output from xcodebuild +2. Extracts test results (passes, failures, timing) +3. Generates proper JUnit XML format +4. Writes the XML to the location Bazel expects (`$XML_OUTPUT_FILE`) + +## Components + +- **`generate_test_xml.py`**: Python script that parses test logs and generates JUnit XML +- **`test_runners.bzl`**: Pre-configured test runners with XML generation enabled +- **`BUILD`**: Bazel build definitions + +## Usage + +### Option 1: Use Pre-configured Test Runners + +The easiest way to use this tool is to use one of the pre-configured test runners: + +```python +load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test") + +ios_unit_test( + name = "MyAppTests", + minimum_os_version = "15.0", + deps = [":MyAppTestsLib"], + runner = "//tools/test_xml_generator:ios_xctestrun_runner_with_xml", +) +``` + +Available runners: +- `//tools/test_xml_generator:ios_xctestrun_runner_with_xml` - For iOS tests using xcodebuild +- `//tools/test_xml_generator:ios_test_runner_with_xml` - For iOS tests using custom runner +- `//tools/test_xml_generator:macos_test_runner_with_xml` - For macOS tests + +### Option 2: Create Your Own Custom Runner + +You can create your own test runner with custom configuration: + +```python +# In your BUILD file +load( + "@build_bazel_rules_apple//apple/testing/default_runner:ios_xctestrun_runner.bzl", + "ios_xctestrun_runner", +) + +ios_xctestrun_runner( + name = "my_custom_ios_runner", + post_action = "//tools/test_xml_generator:generate_test_xml", + post_action_determines_exit_code = False, # Don't fail build if XML generation fails + # Add other custom configuration here + device_type = "iPhone 14", + os_version = "16.0", +) + +# Use in your test +load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test") + +ios_unit_test( + name = "MyAppTests", + minimum_os_version = "15.0", + deps = [":MyAppTestsLib"], + runner = ":my_custom_ios_runner", +) +``` + +### Option 3: Add Post-Action to Existing Runner + +If you already have a custom runner, you can add the post-action: + +```python +ios_xctestrun_runner( + name = "my_existing_runner", + post_action = "//tools/test_xml_generator:generate_test_xml", + post_action_determines_exit_code = False, + # ... your existing configuration ... +) +``` + +## How It Works + +1. **Test Execution**: The test runner executes your tests normally +2. **Log Capture**: Test output is captured to `$TEST_LOG_FILE` (typically in `/tmp`) +3. **Post-Action**: After tests complete, the `generate_test_xml.py` script runs with these environment variables: + - `TEST_LOG_FILE`: Path to the test log + - `XML_OUTPUT_FILE`: Path where Bazel expects the XML output + - `TEST_EXIT_CODE`: Exit code from the test run + - `TEST_XCRESULT_BUNDLE_PATH`: Path to XCResult bundle (if available) + - `SIMULATOR_UDID`: Simulator ID (for iOS tests) +4. **XML Generation**: The script parses the log and generates JUnit XML +5. **Output**: XML is written to the correct location for Bazel and CI systems + +## Supported Test Formats + +The parser supports multiple test output formats: + +- **XCTest (Objective-C/Swift)**: Standard XCTest output format + ``` + Test Case '-[MyTests testExample]' started. + Test Case '-[MyTests testExample]' passed (0.001 seconds). + ``` + +- **Swift Testing (Xcode 16+)**: New Swift Testing framework + ``` + Test MyTests/testExample started. + Test MyTests/testExample passed after 0.001 seconds. + ``` + +## Output Format + +The generated XML follows the JUnit XML format: + +```xml + + + + + + + MyAppTests.swift:42: XCTAssertEqual failed: ("expected") is not equal to ("actual") + + + Test output and error messages... + + + + +``` + +## Troubleshooting + +### No test cases found in log output + +If you see this warning, it means the parser couldn't find test results in the log. This could be because: + +1. Tests didn't run at all (build failure, configuration issue) +2. Test output format is different than expected +3. The log file is empty or corrupted + +Check the test log manually to see what format the output is in. + +### XML file not created + +Make sure: +1. The runner has the post-action configured correctly +2. The `//tools/test_xml_generator:generate_test_xml` target is accessible +3. Check the test output for any error messages from the post-action script + +### Tests fail but XML shows all passed + +The post-action runs after tests complete but doesn't change the test exit code (unless `post_action_determines_exit_code = True`). The XML should reflect the actual test results from the log. + +## Development + +To test the XML generator locally: + +```bash +# Run tests and generate XML +bazel test //path/to:test --test_output=all + +# Check the generated XML +cat bazel-testlogs/path/to/test/test.xml + +# Debug with verbose output +TEST_LOG_FILE=/path/to/test.log \ +XML_OUTPUT_FILE=/tmp/test.xml \ +TEST_EXIT_CODE=0 \ +python3 tools/test_xml_generator/generate_test_xml.py +``` + +## Configuration Options + +### post_action_determines_exit_code + +By default, this is set to `False`, meaning XML generation failures won't fail your test. This is recommended because you still want to know if your tests failed even if XML generation has issues. + +Set to `True` if you want XML generation failures to fail the build: + +```python +ios_xctestrun_runner( + name = "strict_runner", + post_action = "//tools/test_xml_generator:generate_test_xml", + post_action_determines_exit_code = True, # Fail build if XML generation fails +) +``` diff --git a/tools/test_xml_generator/generate_test_xml.py b/tools/test_xml_generator/generate_test_xml.py new file mode 100644 index 000000000..3d98ecfb9 --- /dev/null +++ b/tools/test_xml_generator/generate_test_xml.py @@ -0,0 +1,612 @@ +#!/usr/bin/env python3 + +# Copyright 2024 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Post-action script to generate proper JUnit XML from iOS/macOS test output. + +This script runs after test execution and parses the test log to create +a properly formatted JUnit XML file that can be consumed by CI systems. + +Environment Variables: + TEST_LOG_FILE: Path to the test log file (contains xcodebuild output) + XML_OUTPUT_FILE: Path where the JUnit XML should be written + TEST_EXIT_CODE: Exit code from the test run + TEST_XCRESULT_BUNDLE_PATH: Optional path to XCResult bundle + SIMULATOR_UDID: Optional simulator ID +""" + +import os +import sys +import re +import xml.etree.ElementTree as ET +from datetime import datetime, timezone +from typing import Dict, List, Optional, Tuple + + +class TestCase: + """Represents a single test case.""" + + def __init__(self, classname: str, name: str): + self.classname = classname + self.name = name + self.time = 0.0 + self.status = 'unknown' + self.failure_message = None + self.failure_type = None + self.failure_details = None + self.system_out = [] + self.system_err = [] + + +class TestLogParser: + """Parser for XCTest output logs.""" + + # Regex patterns for parsing XCTest output + # Updated to handle method names with underscores and complex characters + # Precompile all patterns once at class level + TEST_START_PATTERN = re.compile( + r"Test Case '-\[([^\s]+) ([^\]]+)\]' started" + ) + TEST_PASS_PATTERN = re.compile( + r"Test Case '-\[([^\s]+) ([^\]]+)\]' passed \(([\d\.]+) seconds\)\." + ) + TEST_FAIL_PATTERN = re.compile( + r"Test Case '-\[([^\s]+) ([^\]]+)\]' failed \(([\d\.]+) seconds\)\." + ) + FAILURE_PATTERN = re.compile( + r"(.*):(\d+): error: -\[([^\s]+) ([^\]]+)\] : (.+)" + ) + SUITE_START_PATTERN = re.compile( + r"Test Suite '([^']+)' started at (.+)" + ) + SUITE_FINISH_PATTERN = re.compile( + r"Test Suite '([^']+)' (passed|failed) at (.+)" + ) + + # Patterns for Swift Testing (Xcode 16+) + # Format: ◇ Test test_methodName() started. + # Format: ✔ Test test_methodName() passed after 6.500 seconds. + # Format: ✘ Test test_methodName() failed after 6.500 seconds with 1 issue. + # Format: ✘ Test test_methodName() recorded an issue at File.swift:81:17: Issue recorded + SWIFT_TESTING_START_PATTERN = re.compile( + r"^[◇◆] Test ([^\s\(]+)\(\) started\." + ) + SWIFT_TESTING_PASS_PATTERN = re.compile( + r"^✔ Test ([^\s\(]+)\(\) passed after ([\d\.]+) seconds\." + ) + SWIFT_TESTING_FAIL_PATTERN = re.compile( + r"^[✘✗×] Test ([^\s\(]+)\(\) failed after ([\d\.]+) seconds" + ) + SWIFT_TESTING_ISSUE_PATTERN = re.compile( + r"^[✘✗×] Test ([^\s\(]+)\(\) recorded an issue at ([^:]+):(\d+):(\d+): (.+)" + ) + + # Pattern for suite information (to extract class names) + # Format: ◇ Suite ClassName started. + SWIFT_TESTING_SUITE_PATTERN = re.compile( + r"^◇ Suite ([^\s]+) started\." + ) + + def __init__(self, log_content: str): + self.log_content = log_content + # Store lines as a list for efficient iteration + self.lines = log_content.split('\n') if log_content else [] + + def parse(self) -> List[TestCase]: + """Parse the log content and extract test cases.""" + test_cases = [] + current_suite = None # Track current suite for SwiftTesting + + # For Swift Testing, tests run in parallel so we need to track multiple tests + # Dictionary: method_name -> TestCase + active_tests = {} + # Dictionary: method_name -> list of issues + test_issues = {} + # Dictionary: method_name -> list of context lines + test_contexts = {} + + # For XCTest, tests run sequentially + current_xctest = None + xctest_context = [] + + # Pre-compile pattern checks for faster iteration + for line in self.lines: + # Check for Swift Testing suite marker + if line.startswith('◇ Suite'): + suite_match = self.SWIFT_TESTING_SUITE_PATTERN.search(line) + if suite_match: + current_suite = suite_match.group(1) + continue + + # Check for Swift Testing issue recording (happens before the final failure line) + if line.startswith('✘') or line.startswith('✗') or line.startswith('×'): + issue_match = self.SWIFT_TESTING_ISSUE_PATTERN.search(line) + if issue_match: + method, file, line_num, col_num, message = issue_match.groups() + if method not in test_issues: + test_issues[method] = [] + test_issues[method].append({ + 'file': file, + 'line': line_num, + 'column': col_num, + 'message': message.strip() + }) + # Also add to context + if method in test_contexts: + test_contexts[method].append(line) + continue + + # Check for test start (XCTest format) + if "Test Case '-[" in line and 'started' in line: + start_match = self.TEST_START_PATTERN.search(line) + if start_match: + classname, method = start_match.groups() + current_xctest = TestCase(classname, method) + xctest_context = [] + continue + + # Check for test start (Swift Testing format) + if line.startswith('◇') and 'Test ' in line and 'started' in line: + swift_start_match = self.SWIFT_TESTING_START_PATTERN.search(line) + if swift_start_match: + method = swift_start_match.group(1) + # Use current suite as class name, or default if not available + classname = current_suite if current_suite else 'SwiftTestingSuite' + test = TestCase(classname, method) + active_tests[method] = test + test_contexts[method] = [] + if method not in test_issues: + test_issues[method] = [] + continue + + # Check for test pass (XCTest) + if "Test Case '-[" in line and 'passed' in line: + pass_match = self.TEST_PASS_PATTERN.search(line) + if pass_match: + classname, method, time = pass_match.groups() + if current_xctest and current_xctest.classname == classname and current_xctest.name == method: + current_xctest.time = float(time) + current_xctest.status = 'passed' + test_cases.append(current_xctest) + current_xctest = None + xctest_context = [] + continue + + # Check for test pass (Swift Testing) + if line.startswith('✔'): + swift_pass_match = self.SWIFT_TESTING_PASS_PATTERN.search(line) + if swift_pass_match: + method, time = swift_pass_match.groups() + if method in active_tests: + test = active_tests[method] + test.time = float(time) + test.status = 'passed' + test_cases.append(test) + # Clean up + del active_tests[method] + if method in test_contexts: + del test_contexts[method] + if method in test_issues: + del test_issues[method] + continue + + # Check for test failure (XCTest) + if "Test Case '-[" in line and 'failed' in line: + fail_match = self.TEST_FAIL_PATTERN.search(line) + if fail_match: + classname, method, time = fail_match.groups() + if current_xctest and current_xctest.classname == classname and current_xctest.name == method: + current_xctest.time = float(time) + current_xctest.status = 'failed' + + # Look for failure details in context + self._extract_failure_details(current_xctest, xctest_context) + + test_cases.append(current_xctest) + current_xctest = None + xctest_context = [] + continue + + # Check for test failure (Swift Testing) + if line.startswith('✘') or line.startswith('✗') or line.startswith('×'): + swift_fail_match = self.SWIFT_TESTING_FAIL_PATTERN.search(line) + if swift_fail_match: + method, time = swift_fail_match.groups() + if method in active_tests: + test = active_tests[method] + test.time = float(time) + test.status = 'failed' + + # Extract failure details from Swift Testing issues + issues = test_issues.get(method, []) + context = test_contexts.get(method, []) + self._extract_swift_testing_failure_details(test, issues, context) + + test_cases.append(test) + # Clean up + del active_tests[method] + if method in test_contexts: + del test_contexts[method] + if method in test_issues: + del test_issues[method] + continue + + # Collect context for all active Swift Testing tests and current XCTest + # Add to all active test contexts (for Swift Testing) + for method in list(active_tests.keys()): + if method in test_contexts: + test_contexts[method].append(line) + # Keep only last 30 lines for context + if len(test_contexts[method]) > 30: + test_contexts[method].pop(0) + + # Add to XCTest context + if current_xctest: + xctest_context.append(line) + if len(xctest_context) > 30: + xctest_context.pop(0) + + return test_cases + + def _extract_failure_details(self, test_case: TestCase, context_lines: List[str]): + """Extract failure details from context lines.""" + failure_index = -1 + + for i, line in enumerate(context_lines): + failure_match = self.FAILURE_PATTERN.search(line) + if failure_match: + file, line_num, fc, fm, message = failure_match.groups() + failure_index = i + + # Clean up the file path to make it more readable + clean_path = self._clean_file_path(file) + + # Build comprehensive failure message with context + message_parts = [message.strip()] + + # Look ahead for additional context (Received/Expected values, etc.) + for j in range(i + 1, min(i + 15, len(context_lines))): + next_line = context_lines[j].strip() + + # Stop at the next test case + if next_line.startswith('Test Case'): + break + + # Include important context lines + if any(keyword in next_line for keyword in ['Received:', 'Expected:', 'Actual:', 'but was:', 'but got:']): + message_parts.append(next_line) + # Include assertion failure details + elif next_line and not next_line.startswith('Test '): + # Check if it's a continuation of error details + if any(char in next_line for char in [':', '=']) or 'failed' in next_line.lower(): + # Limit length to avoid too much noise + if len('\n'.join(message_parts)) < 500: + message_parts.append(next_line) + + # Build full failure message + full_message = '\n'.join(message_parts) + + test_case.failure_message = full_message + test_case.failure_type = 'XCTestFailure' + test_case.failure_details = f"{clean_path}:{line_num}: {full_message}" + + # Include broader context in system-err + start_idx = max(0, i - 5) + end_idx = min(len(context_lines), i + 15) + test_case.system_err = context_lines[start_idx:end_idx] + return + + # If no specific failure pattern found, use generic failure + if not test_case.failure_message: + test_case.failure_message = "Test failed (see system-err for details)" + test_case.failure_type = 'TestFailure' + test_case.failure_details = '\n'.join(context_lines[-10:]) + test_case.system_err = context_lines[-10:] + + def _extract_swift_testing_failure_details( + self, test_case: TestCase, issues: List[Dict], context_lines: List[str] + ): + """Extract failure details from Swift Testing issues and context. + + Args: + test_case: The TestCase to populate with failure details + issues: List of recorded issues from Swift Testing + context_lines: Context lines surrounding the failure + """ + if issues: + # Use the first issue as the primary failure message + first_issue = issues[0] + clean_path = self._clean_file_path(first_issue['file']) + + # Build failure message from all issues + message_parts = [] + for issue in issues: + message_parts.append(f"{issue['message']}") + + # Look for additional context in the context lines + # Swift Testing often provides detailed error information after the issue line + for line in context_lines: + stripped = line.strip() + # Look for lines that start with special markers (↳ indicates continuation) + if stripped.startswith('↳'): + message_parts.append(stripped[2:].strip()) + + full_message = '\n'.join(message_parts) + + test_case.failure_message = full_message + test_case.failure_type = 'SwiftTestingIssue' + test_case.failure_details = f"{clean_path}:{first_issue['line']}: {full_message}" + + # Include context in system-err + test_case.system_err = context_lines[-20:] if context_lines else [] + else: + # No specific issues found, use generic failure + test_case.failure_message = "Test failed (see system-err for details)" + test_case.failure_type = 'SwiftTestingFailure' + test_case.failure_details = '\n'.join(context_lines[-10:]) + test_case.system_err = context_lines[-10:] + + def _clean_file_path(self, file_path: str) -> str: + """Clean up file path to make it more readable. + + Removes simulator device paths and other Bazel/system noise. + """ + # Remove simulator device paths + # Format: /Users/.../CoreSimulator/Devices/{UUID}/data/{workspace_path} + if '/CoreSimulator/Devices/' in file_path: + parts = file_path.split('/data/', 1) + if len(parts) == 2: + return parts[1] + + # Remove Bazel execroot paths + if '/execroot/_main/' in file_path: + parts = file_path.split('/execroot/_main/', 1) + if len(parts) == 2: + return parts[1] + + if '/execroot/__main__/' in file_path: + parts = file_path.split('/execroot/__main__/', 1) + if len(parts) == 2: + return parts[1] + + # Return last few meaningful components + parts = file_path.split('/') + if len(parts) > 3: + # Find first meaningful component (skip UUID-like paths) + for i in range(len(parts) - 1, max(0, len(parts) - 5), -1): + if parts[i] and not parts[i].startswith('.') and '-' not in parts[i][:8]: + return '/'.join(parts[i:]) + + return file_path + + +class JUnitXMLGenerator: + """Generator for JUnit XML format.""" + + def __init__(self, test_cases: List[TestCase], suite_name: str = 'iOS/macOS Tests'): + self.test_cases = test_cases + self.suite_name = suite_name + + def generate(self) -> str: + """Generate JUnit XML string.""" + # Group tests by class + test_suites = self._group_by_class() + + # Calculate totals once + total_tests = len(self.test_cases) + total_failures = sum(1 for t in self.test_cases if t.status == 'failed') + total_time = sum(t.time for t in self.test_cases) + + # Create XML structure + testsuites = ET.Element('testsuites') + testsuites.set('name', self.suite_name) + testsuites.set('tests', str(total_tests)) + testsuites.set('failures', str(total_failures)) + testsuites.set('errors', '0') + testsuites.set('time', str(total_time)) + testsuites.set('timestamp', datetime.now(timezone.utc).isoformat().replace('+00:00', 'Z')) + + # Create test suites + for suite_name, tests in test_suites.items(): + testsuite = ET.SubElement(testsuites, 'testsuite') + testsuite.set('name', suite_name) + testsuite.set('tests', str(len(tests))) + + suite_failures = sum(1 for t in tests if t.status == 'failed') + testsuite.set('failures', str(suite_failures)) + testsuite.set('errors', '0') + testsuite.set('time', str(sum(t.time for t in tests))) + + for test in tests: + self._add_test_case(testsuite, test) + + # Use faster XML serialization - avoid minidom pretty printing + # which is very slow for large XML documents + xml_str = ET.tostring(testsuites, encoding='unicode') + + # Use simple string formatting for indentation (much faster than minidom) + return self._pretty_print_fast(xml_str) + + def _pretty_print_fast(self, xml_str: str) -> str: + """Fast pretty-print using simple string manipulation.""" + # Add newlines and indentation + result = [''] + depth = 0 + i = 0 + + while i < len(xml_str): + # Find next tag + if xml_str[i] == '<': + # Find end of tag + end = xml_str.find('>', i) + if end == -1: + break + + tag = xml_str[i:end+1] + + # Adjust depth for closing tags before adding indentation + if tag.startswith(''): + result.append(' ' * depth + tag) + else: + result.append(' ' * depth + tag) + + # Adjust depth for opening tags + if not tag.startswith('') and not tag.startswith(' Dict[str, List[TestCase]]: + """Group test cases by class name.""" + suites = {} + for test in self.test_cases: + classname = test.classname + if classname not in suites: + suites[classname] = [] + suites[classname].append(test) + return suites + + def _add_test_case(self, parent: ET.Element, test: TestCase): + """Add a test case element to the parent.""" + testcase = ET.SubElement(parent, 'testcase') + testcase.set('classname', test.classname) + testcase.set('name', test.name) + testcase.set('time', str(test.time)) + + if test.status == 'failed': + failure = ET.SubElement(testcase, 'failure') + failure.set('message', test.failure_message or 'Test failed') + failure.set('type', test.failure_type or 'TestFailure') + if test.failure_details: + failure.text = test.failure_details + + if test.system_out: + system_out = ET.SubElement(testcase, 'system-out') + system_out.text = '\n'.join(test.system_out) + + if test.system_err: + system_err = ET.SubElement(testcase, 'system-err') + system_err.text = '\n'.join(test.system_err) + + +def main(): + """Main entry point.""" + # Get environment variables + test_log_file = os.environ.get('TEST_LOG_FILE') + xml_output_file = os.environ.get('XML_OUTPUT_FILE') + test_exit_code = os.environ.get('TEST_EXIT_CODE', '0') + + print("=" * 80) + print("Test XML Generator - Post Action") + print("=" * 80) + + # Validate inputs + if not test_log_file: + print("Warning: TEST_LOG_FILE environment variable not set", file=sys.stderr) + print("Skipping XML generation") + sys.exit(0) + + if not os.path.exists(test_log_file): + print(f"Warning: Test log file not found: {test_log_file}", file=sys.stderr) + print("Skipping XML generation") + sys.exit(0) + + if not xml_output_file: + print("Warning: XML_OUTPUT_FILE environment variable not set", file=sys.stderr) + print("Skipping XML generation") + sys.exit(0) + + print(f"Test Log File: {test_log_file}") + print(f"XML Output File: {xml_output_file}") + print(f"Test Exit Code: {test_exit_code}") + print() + + # Read test log + try: + with open(test_log_file, 'r', encoding='utf-8', errors='replace') as f: + log_content = f.read() + except Exception as e: + print(f"Error reading test log: {e}", file=sys.stderr) + sys.exit(0) # Don't fail the build + + # Parse test results + print("Parsing test log...") + parser = TestLogParser(log_content) + test_cases = parser.parse() + + if not test_cases: + print("Warning: No test cases found in log output") + print("This might be expected if tests didn't run or output format is different") + # Create minimal XML + testsuites = ET.Element('testsuites') + testsuites.set('name', 'Tests') + testsuites.set('tests', '0') + testsuites.set('failures', '0') + testsuites.set('errors', '0') + xml_content = '\n' + ET.tostring(testsuites, encoding='unicode') + else: + print(f"Found {len(test_cases)} test cases") + + passed = sum(1 for t in test_cases if t.status == 'passed') + failed = sum(1 for t in test_cases if t.status == 'failed') + print(f" Passed: {passed}") + print(f" Failed: {failed}") + print() + + # Generate JUnit XML + print("Generating JUnit XML...") + generator = JUnitXMLGenerator(test_cases) + xml_content = generator.generate() + + # Write XML file + try: + # Ensure output directory exists + os.makedirs(os.path.dirname(xml_output_file), exist_ok=True) + + with open(xml_output_file, 'w', encoding='utf-8') as f: + f.write(xml_content) + + print(f"✓ Successfully wrote JUnit XML to: {xml_output_file}") + except Exception as e: + print(f"Error writing XML file: {e}", file=sys.stderr) + sys.exit(0) # Don't fail the build + + print("=" * 80) + + # Always exit successfully - we don't want XML generation issues to fail the test + sys.exit(0) + + +if __name__ == '__main__': + main() + diff --git a/tools/test_xml_generator/test_runners.bzl b/tools/test_xml_generator/test_runners.bzl new file mode 100644 index 000000000..ab5557146 --- /dev/null +++ b/tools/test_xml_generator/test_runners.bzl @@ -0,0 +1,53 @@ +# Copyright 2024 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Custom test runners with XML generation post-action.""" + +load( + "//apple/testing/default_runner:ios_xctestrun_runner.bzl", + "ios_xctestrun_runner", +) +load( + "//apple/testing/default_runner:ios_test_runner.bzl", + "ios_test_runner", +) +load( + "//apple/testing/default_runner:macos_test_runner.bzl", + "macos_test_runner", +) + +# iOS XCTestRun Runner with XML generation +ios_xctestrun_runner( + name = "ios_xctestrun_runner_enhanced_junit_xml", + post_action = "//tools/test_xml_generator:generate_test_xml", + post_action_determines_exit_code = False, + visibility = ["//visibility:public"], +) + +# iOS Test Runner with XML generation +ios_test_runner( + name = "ios_test_runner_enhanced_junit_xml", + post_action = "//tools/test_xml_generator:generate_test_xml", + post_action_determines_exit_code = False, + visibility = ["//visibility:public"], +) + +# macOS Test Runner with XML generation +macos_test_runner( + name = "macos_test_runner_enhanced_junit_xml", + post_action = "//tools/test_xml_generator:generate_test_xml", + post_action_determines_exit_code = False, + visibility = ["//visibility:public"], +) +