diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6b4d27979700b41cb82969afe6973097dd450da8..28b6a18d8eac03d506b9b98c0310534af9a280ce 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -60,12 +60,17 @@ test-s3-image: if [ ! -e /tmp/exit_code ]; then echo "Job creation failed, log below." cat openqa.log - - fi + fi + - | + ./utils/openqa_junit_report.py $(cat /tmp/job_ids) > ./junit.xml artifacts: when: always paths: + - junit.xml - openqa.log - worker.log + reports: + junit: junit.xml expire_in: '1 week' tags: - x86_64 diff --git a/utils/openqa_junit_report.py b/utils/openqa_junit_report.py new file mode 100755 index 0000000000000000000000000000000000000000..af43d535ff40a3092a06d4fbe5a013ed7a04a509 --- /dev/null +++ b/utils/openqa_junit_report.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 + +""" +Pipeline report JUnit + +This script provides a report on an OpenQA test run, in JUnit XML format. This +format is used by Gitlab to show test results for a specific merge request. + +See: https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html + +""" + +from datetime import datetime +from typing import Optional, Tuple +from xml.etree import ElementTree as ET +import argparse +import json +import logging +import sys +import urllib.request + + +log = logging.getLogger() + + +def argument_parser(): + parser = argparse.ArgumentParser( + description="Pipeline report JUnit") + parser.add_argument('--debug', dest='debug', action='store_true', + help="Enable detailed logging to stderr") + parser.add_argument('job_id', type=int, nargs='+', + help="OpenQA job IDs to report on") + return parser + + + +class JUnitXMLGenerator: + def generate_xml(self, fd, test_suites): + testsuites = ET.Element('testsuites') + + for suite_name, test_results in test_suites.items(): + failures = [t for t in test_results if "failure" in t] + skipped = [t for t in test_results if "skipped" in t] + + testsuite = ET.SubElement(testsuites, 'testsuite') + testsuite.set('name', suite_name) + testsuite.set('tests', str(len(test_results))) + testsuite.set('failures', str(len(failures))) + testsuite.set('errors', '0') + testsuite.set('skipped', str(len(skipped))) + + for result in test_results: + testcase = ET.SubElement(testsuite, 'testcase') + testcase.set('name', result['name']) + testcase.set('classname', result['classname']) + if "time" in result: + testcase.set('time', str(result['time'])) + + if 'failure' in result: + failure = ET.SubElement(testcase, 'failure') + failure.set('message', result['failure']['message']) + failure.set('type', result['failure']['type']) + failure.text = result['failure']['stack_trace'] + elif 'skipped' in result: + skipped = ET.SubElement(testcase, 'skipped') + skipped.set('message', result['skipped']['message']) + + tree = ET.ElementTree(testsuites) + tree.write(fd, encoding='unicode') + + +class OpenqaAPIHelper(): + def get_job_details(self, job_id): + url = f"https://openqa.gnome.org/api/v1/jobs/{job_id}/details" + with urllib.request.urlopen(url) as response: + body = response.read() + return json.loads(body) + + +def parse_openqa_test_execution_time(execution: Optional[str]) -> Optional[float]: + if execution is None: + return None + + parts = execution.split(" ") + if len(parts) == 2: + time_format = "%Mm %Ss" # Example: "1m 2s" + parsed_time = datetime.strptime(execution, time_format) + elif len(parts) == 1: + time_format = "%Ss" # Example: "2s" + parsed_time = datetime.strptime(execution, time_format) + else: + raise ValueError(f"Unsupported time format: {execution}") + + return parsed_time.minute * 60 + parsed_time.second + + +def find_failed_test_message(test_details: dict) -> str: + for step in test_details["details"]: + if step.get("title") == "Failed": + return step["text_data"] + return "" + + +def openqa_job_details_to_junit_testsuite_report(job_details) -> Tuple[str, dict]: + testsuite_name = job_details["job"]["test"] + testsuite = [] + for details in job_details["job"]["testresults"]: + test_name = details["name"] + test_result = details["result"] + test = { + "name": test_name, + "classname": testsuite_name, + } + + execution_time = parse_openqa_test_execution_time(details["execution_time"]) + if execution_time: + test["time"] = execution_time + + if test_result == "none": + test["skipped"] = dict(message="Earlier test failed") + elif test_result == "failed": + test["failure"] = dict( + message=find_failed_test_message(details) + ) + testsuite.append(test) + + return testsuite_name, testsuite + + +def main(): + args = argument_parser().parse_args() + + if args.debug: + logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) + + api = OpenqaAPIHelper() + junit_test_results = {} + for job_id in args.job_id: + details = api.get_job_details(job_id) + + testsuite_name, testsuite = openqa_job_details_to_junit_testsuite_report(details) + junit_test_results[testsuite_name] = testsuite + + writer = JUnitXMLGenerator() + writer.generate_xml(sys.stdout, junit_test_results) + +try: + main() +except RuntimeError as e: + sys.stderr.write("ERROR: {}\n".format(e)) + sys.exit(1) diff --git a/utils/pipeline_report.py b/utils/pipeline_report.py index 120a2ec7ef23d432d610a949b7974daf853940f6..0c4f61feb0d9bafa7ad70f879b2c7f61481cba87 100755 --- a/utils/pipeline_report.py +++ b/utils/pipeline_report.py @@ -131,15 +131,15 @@ def find_in_list(l, predicate, error_text): TEMPLATE_GITLAB = """ Project: - * Repo: {gitlab_repo_name} + * Repo: {gitlab_project} * Commit: {gitlab_repo_commit_id} * Commit date: {gitlab_repo_commit_date} * Commit title: {gitlab_repo_commit_title} Integration tests status (Gitlab): - * Pipeline: https://gitlab.gnome.org/gnome/gnome-build-meta/-/pipelines/{gitlab_pipeline_id} - * test-s3-image job: https://gitlab.gnome.org/gnome/gnome-build-meta/-/jobs/{gitlab_job_id} + * Pipeline: https://gitlab.gnome.org/{gitlab_project}/-/pipelines/{gitlab_pipeline_id} + * test-s3-image job: https://gitlab.gnome.org/{gitlab_project}/-/jobs/{gitlab_job_id} * test-s3-image job status: {gitlab_job_status} * test-s3-image job finished at: {gitlab_job_finished_at}""" @@ -151,13 +151,13 @@ Integration tests status (OpenQA): class ScriptHelper: - def find_pipeline(self, api, pipeline_id=None, earlier=None, ref='master'): + def find_pipeline(self, api, project, pipeline_id=None, earlier=None, ref='master'): """Find the right pipeline based on the commandline options.""" if pipeline_id: pipeline = api.query_pipeline(pipeline_id) else: pipeline = api.query_latest_pipeline() - print(f"Latest gnome-build-meta pipeline on default branch is {pipeline['id']}. Pipeline status: {pipeline['status']}") + print(f"Latest {project} pipeline on default branch is {pipeline['id']}. Pipeline status: {pipeline['status']}") if earlier: earlier_pipelines = api.list_pipelines(ref=ref, updated_before=pipeline["updated_at"]) log.info( @@ -186,10 +186,10 @@ class ScriptHelper: log.debug("Found test-s3-image job with ID %s", test_s3_image_job_id) return test_s3_image_job - def generate_gitlab_report(self, api: GitlabAPIHelper, project_name: str, pipeline: dict, job: dict) -> dict: + def generate_gitlab_report(self, api: GitlabAPIHelper, project: str, pipeline: dict, job: dict) -> dict: """Generate the report for a specific Gitlab pipeline.""" return dict( - gitlab_repo_name=project_name, + gitlab_project=project, gitlab_repo_ref=pipeline["ref"], gitlab_repo_commit_id=pipeline["sha"], gitlab_repo_commit_date=job["commit"]["created_at"], @@ -283,7 +283,7 @@ def main(): api = GitlabAPIHelper(args.project) script = ScriptHelper() - pipeline = script.find_pipeline(api, args.pipeline, earlier=args.earlier) + pipeline = script.find_pipeline(api, args.project, args.pipeline, earlier=args.earlier) if pipeline["status"] == "running": raise RuntimeError("Cannot generate report for a running pipeline.") log.debug("Generate pipeline report for pipeline ID %s", pipeline["id"])