Neels Hofmeyr | 6a99bf1 | 2020-05-14 21:00:17 +0200 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
Neels Hofmeyr | b26196b | 2019-10-22 01:54:43 +0200 | [diff] [blame] | 2 | # Copyright 2018 sysmocom - s.f.m.c. GmbH |
| 3 | # |
| 4 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | # you may not use this file except in compliance with the License. |
| 6 | # You may obtain a copy of the License at |
| 7 | # |
| 8 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | # |
| 10 | # Unless required by applicable law or agreed to in writing, software |
| 11 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | # See the License for the specific language governing permissions and |
| 14 | # limitations under the License. |
| 15 | import argparse |
| 16 | import re |
| 17 | |
| 18 | doc = "Compare TTCN3 test run results with expected results by junit logs." |
| 19 | |
| 20 | # The nicest would be to use an XML library, but I don't want to introduce dependencies on the build slaves. |
| 21 | re_testcase = re.compile(r'''<testcase classname=['"]([^'"]+)['"].* name=['"]([^'"]+)['"].*>''') |
| 22 | re_testcase_end = re.compile(r'''(</testcase>|<testcase [^>]*/>)''') |
| 23 | re_failure = re.compile(r'''(<failure\b|<error\b)''') |
| 24 | |
Neels Hofmeyr | b05d16f | 2020-05-14 22:51:30 +0200 | [diff] [blame] | 25 | RED = "\033[1;31m" |
| 26 | GREEN = "\033[1;32m" |
| 27 | YELLOW = "\033[1;33m" |
| 28 | BLUE = "\033[1;34m" |
| 29 | NOCOLOR = "\033[0;m" |
| 30 | |
| 31 | def col(color, text): |
| 32 | return color + text + NOCOLOR |
| 33 | |
| 34 | RESULT_PASS = col(GREEN, 'pass') |
| 35 | RESULT_FAIL = col(RED, 'pass->FAIL') |
| 36 | RESULT_SKIP = col(BLUE, 'skip') |
| 37 | RESULT_XFAIL = col(YELLOW, 'xfail') |
| 38 | RESULT_FIXED = col(GREEN, 'xfail->PASS') |
| 39 | RESULT_NEW_PASS = col(GREEN, 'NEW: PASS') |
| 40 | RESULT_NEW_FAIL = col(RED, 'NEW: FAIL') |
Neels Hofmeyr | b26196b | 2019-10-22 01:54:43 +0200 | [diff] [blame] | 41 | |
| 42 | RESULTS = ( |
| 43 | RESULT_FAIL, |
| 44 | RESULT_NEW_FAIL, |
| 45 | RESULT_XFAIL, |
| 46 | RESULT_FIXED, |
| 47 | RESULT_PASS, |
| 48 | RESULT_NEW_PASS, |
| 49 | RESULT_SKIP, |
| 50 | ) |
| 51 | |
| 52 | def count(counter, name, result): |
| 53 | v = counter.get(result) or 0 |
| 54 | v += 1 |
| 55 | counter[result] = v |
| 56 | if result != RESULT_SKIP: |
| 57 | print('%s %s' % (result, name)) |
| 58 | |
| 59 | def compare_one(name, expect, result, counter): |
| 60 | if result is None: |
| 61 | count(counter, name, RESULT_SKIP) |
| 62 | elif result == RESULT_PASS: |
| 63 | if expect == RESULT_PASS: |
| 64 | count(counter, name, RESULT_PASS) |
| 65 | elif expect == RESULT_FAIL: |
| 66 | count(counter, name, RESULT_FIXED) |
| 67 | elif expect is None: |
| 68 | count(counter, name, RESULT_NEW_PASS) |
| 69 | elif result == RESULT_FAIL: |
| 70 | if expect == RESULT_PASS: |
| 71 | count(counter, name, RESULT_FAIL) |
| 72 | elif expect == RESULT_FAIL: |
| 73 | count(counter, name, RESULT_XFAIL) |
| 74 | elif expect is None: |
| 75 | count(counter, name, RESULT_NEW_FAIL) |
| 76 | |
| 77 | def compare(cmdline, f_expected, f_current): |
| 78 | expected_list = parse_results(f_expected) |
| 79 | current_list = parse_results(f_current) |
| 80 | |
| 81 | expected_dict = dict(expected_list) |
| 82 | current_dict = dict(current_list) |
| 83 | |
| 84 | counter = {} |
| 85 | |
| 86 | for expected_name, expected_result in expected_list: |
| 87 | compare_one(expected_name, expected_result, current_dict.get(expected_name), counter) |
| 88 | |
| 89 | # Also count new tests |
| 90 | for current_name, current_result in current_list: |
| 91 | if current_name in expected_dict: |
| 92 | continue |
| 93 | compare_one(current_name, None, current_result, counter) |
| 94 | |
| 95 | |
| 96 | print('\nSummary:') |
| 97 | for r in RESULTS: |
| 98 | v = counter.get(r) |
| 99 | if not v: |
| 100 | continue |
| 101 | print(' %s: %d' % (r, v)) |
| 102 | print('\n') |
| 103 | |
| 104 | def parse_results(f): |
| 105 | tests = [] |
| 106 | name = None |
| 107 | result = None |
| 108 | for line in f: |
| 109 | m = re_testcase.search(line) |
| 110 | if m: |
| 111 | class_name, test_name = m.groups() |
| 112 | name = '%s.%s' % (class_name, test_name) |
| 113 | |
| 114 | m = re_failure.search(line) |
| 115 | if m: |
| 116 | result = RESULT_FAIL |
| 117 | |
| 118 | m = re_testcase_end.search(line) |
| 119 | if m: |
| 120 | if not name: |
| 121 | continue |
| 122 | if result is None: |
| 123 | result = RESULT_PASS |
| 124 | tests.append((name, result)) |
| 125 | |
| 126 | name = None |
| 127 | result = None |
| 128 | |
| 129 | return tests |
| 130 | |
| 131 | def main(cmdline): |
| 132 | with open(cmdline.expected_results, 'r') as f_expected: |
| 133 | with open(cmdline.current_results, 'r') as f_current: |
| 134 | print('\nComparing expected results %r against results in %r\n--------------------' |
| 135 | % (cmdline.expected_results, cmdline.current_results)) |
| 136 | compare(cmdline, f_expected, f_current) |
| 137 | |
| 138 | if __name__ == '__main__': |
| 139 | parser = argparse.ArgumentParser(description=doc) |
| 140 | parser.add_argument('expected_results', metavar='expected.junit-xml', |
| 141 | help='junit XML file listing the expected test results.') |
| 142 | parser.add_argument('current_results', metavar='current.junit-xml', |
| 143 | help='junit XML file listing the current test results.') |
| 144 | |
| 145 | cmdline = parser.parse_args() |
| 146 | main(cmdline) |