blob: 556a063125bb3dbe01b395403c255f2491f44e7f [file] [log] [blame]
Neels Hofmeyr6a99bf12020-05-14 21:00:17 +02001#!/usr/bin/env python3
Neels Hofmeyrb26196b2019-10-22 01:54:43 +02002# Copyright 2018 sysmocom - s.f.m.c. GmbH
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15import argparse
16import re
17
18doc = "Compare TTCN3 test run results with expected results by junit logs."
19
20# The nicest would be to use an XML library, but I don't want to introduce dependencies on the build slaves.
21re_testcase = re.compile(r'''<testcase classname=['"]([^'"]+)['"].* name=['"]([^'"]+)['"].*>''')
22re_testcase_end = re.compile(r'''(</testcase>|<testcase [^>]*/>)''')
23re_failure = re.compile(r'''(<failure\b|<error\b)''')
24
25RESULT_PASS = 'pass'
26RESULT_FAIL = 'pass->FAIL'
27RESULT_SKIP = 'skip'
28RESULT_XFAIL = 'xfail'
29RESULT_FIXED = 'xfail->PASS'
30RESULT_NEW_PASS = 'NEW: PASS'
31RESULT_NEW_FAIL = 'NEW: FAIL'
32
33RESULTS = (
34 RESULT_FAIL,
35 RESULT_NEW_FAIL,
36 RESULT_XFAIL,
37 RESULT_FIXED,
38 RESULT_PASS,
39 RESULT_NEW_PASS,
40 RESULT_SKIP,
41 )
42
43def count(counter, name, result):
44 v = counter.get(result) or 0
45 v += 1
46 counter[result] = v
47 if result != RESULT_SKIP:
48 print('%s %s' % (result, name))
49
50def compare_one(name, expect, result, counter):
51 if result is None:
52 count(counter, name, RESULT_SKIP)
53 elif result == RESULT_PASS:
54 if expect == RESULT_PASS:
55 count(counter, name, RESULT_PASS)
56 elif expect == RESULT_FAIL:
57 count(counter, name, RESULT_FIXED)
58 elif expect is None:
59 count(counter, name, RESULT_NEW_PASS)
60 elif result == RESULT_FAIL:
61 if expect == RESULT_PASS:
62 count(counter, name, RESULT_FAIL)
63 elif expect == RESULT_FAIL:
64 count(counter, name, RESULT_XFAIL)
65 elif expect is None:
66 count(counter, name, RESULT_NEW_FAIL)
67
68def compare(cmdline, f_expected, f_current):
69 expected_list = parse_results(f_expected)
70 current_list = parse_results(f_current)
71
72 expected_dict = dict(expected_list)
73 current_dict = dict(current_list)
74
75 counter = {}
76
77 for expected_name, expected_result in expected_list:
78 compare_one(expected_name, expected_result, current_dict.get(expected_name), counter)
79
80 # Also count new tests
81 for current_name, current_result in current_list:
82 if current_name in expected_dict:
83 continue
84 compare_one(current_name, None, current_result, counter)
85
86
87 print('\nSummary:')
88 for r in RESULTS:
89 v = counter.get(r)
90 if not v:
91 continue
92 print(' %s: %d' % (r, v))
93 print('\n')
94
95def parse_results(f):
96 tests = []
97 name = None
98 result = None
99 for line in f:
100 m = re_testcase.search(line)
101 if m:
102 class_name, test_name = m.groups()
103 name = '%s.%s' % (class_name, test_name)
104
105 m = re_failure.search(line)
106 if m:
107 result = RESULT_FAIL
108
109 m = re_testcase_end.search(line)
110 if m:
111 if not name:
112 continue
113 if result is None:
114 result = RESULT_PASS
115 tests.append((name, result))
116
117 name = None
118 result = None
119
120 return tests
121
122def main(cmdline):
123 with open(cmdline.expected_results, 'r') as f_expected:
124 with open(cmdline.current_results, 'r') as f_current:
125 print('\nComparing expected results %r against results in %r\n--------------------'
126 % (cmdline.expected_results, cmdline.current_results))
127 compare(cmdline, f_expected, f_current)
128
129if __name__ == '__main__':
130 parser = argparse.ArgumentParser(description=doc)
131 parser.add_argument('expected_results', metavar='expected.junit-xml',
132 help='junit XML file listing the expected test results.')
133 parser.add_argument('current_results', metavar='current.junit-xml',
134 help='junit XML file listing the current test results.')
135
136 cmdline = parser.parse_args()
137 main(cmdline)