Add JUnit XML reports; refactor test reporting
* Add Junit output file support
* Differentiate between an expected failure test and an error in the
test, as described in JUnit.
* In case of an error/exception during test, record and attach it to the
Test object and continue running the tests, and show it at the end
during the trial report.
Change-Id: Iedf6d912b3cce3333a187a4ac6d5c6b70fe9d5c5
diff --git a/.gitignore b/.gitignore
index 4119e7f..1ab84d9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@
test_work
state
*.pyc
+selftest/trial_test/
diff --git a/selftest/suite_test.ok b/selftest/suite_test.ok
index fda77dc..30c6915 100644
--- a/selftest/suite_test.ok
+++ b/selftest/suite_test.ok
@@ -59,15 +59,30 @@
tst hello_world.py:[LINENR]: two [test_suite↪hello_world.py:[LINENR]]
tst hello_world.py:[LINENR]: three [test_suite↪hello_world.py:[LINENR]]
tst hello_world.py:[LINENR] PASS [test_suite↪hello_world.py]
-pass: all 1 tests passed.
+tst test_suite: PASS
+pass: all 6 tests passed (5 skipped).
- a test with an error
tst test_suite: Suite run start [suite.py:[LINENR]]
tst test_error.py:[LINENR] START [test_suite↪test_error.py] [suite.py:[LINENR]]
tst test_error.py:[LINENR]: I am 'test_suite' / 'test_error.py:[LINENR]' [test_suite↪test_error.py:[LINENR]] [test_error.py:[LINENR]]
-tst test_error.py:[LINENR]: FAIL [test_suite↪test_error.py:[LINENR]] [suite.py:[LINENR]]
-tst test_error.py:[LINENR]: ERR: AssertionError: [test_suite↪test_error.py:[LINENR]] [test_error.py:[LINENR]: assert False]
-FAIL: 1 of 1 tests failed:
- test_error.py
+tst test_error.py:[LINENR]: ERR: AssertionError: [test_error.py:[LINENR]: assert False]
+tst test_error.py:[LINENR] FAIL (AssertionError) [test_suite↪test_error.py] [suite.py:[LINENR]]
+tst test_suite: FAIL [suite.py:[LINENR]]
+
+- a test with a failure
+tst test_suite: Suite run start [suite.py:[LINENR]]
+tst test_fail.py:[LINENR] START [test_suite↪test_fail.py] [suite.py:[LINENR]]
+tst test_fail.py:[LINENR]: I am 'test_suite' / 'test_fail.py:[LINENR]' [test_suite↪test_fail.py:[LINENR]] [test_fail.py:[LINENR]]
+tst test_fail.py:[LINENR] FAIL (EpicFail) [test_suite↪test_fail.py] [suite.py:[LINENR]]
+tst test_suite: FAIL [suite.py:[LINENR]]
+
+- a test with a raised failure
+tst test_suite: Suite run start [suite.py:[LINENR]]
+tst test_fail_raise.py:[LINENR] START [test_suite↪test_fail_raise.py] [suite.py:[LINENR]]
+tst test_fail_raise.py:[LINENR]: I am 'test_suite' / 'test_fail_raise.py:[LINENR]' [test_suite↪test_fail_raise.py:[LINENR]] [test_fail_raise.py:[LINENR]]
+tst test_fail_raise.py:[LINENR]: ERR: Failure: ('EpicFail', 'This failure is expected') [test_fail_raise.py:[LINENR]: raise Failure('EpicFail', 'This failure is expected')]
+tst test_fail_raise.py:[LINENR] FAIL (EpicFail) [test_suite↪test_fail_raise.py] [suite.py:[LINENR]]
+tst test_suite: FAIL [suite.py:[LINENR]]
- graceful exit.
diff --git a/selftest/suite_test.py b/selftest/suite_test.py
index 315c683..16342c5 100755
--- a/selftest/suite_test.py
+++ b/selftest/suite_test.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
import os
import _prep
-from osmo_gsm_tester import log, suite, config
+from osmo_gsm_tester import log, suite, config, report
config.ENV_CONF = './suite_test'
@@ -22,13 +22,33 @@
print('- run hello world test')
s = suite.SuiteRun(None, 'test_suite', s_def)
results = s.run_tests('hello_world.py')
-print(str(results))
+print(report.suite_to_text(s))
log.style_change(src=True)
#log.style_change(trace=True)
print('\n- a test with an error')
results = s.run_tests('test_error.py')
-print(str(results))
+output = report.suite_to_text(s)
+assert 'FAIL: [test_suite] 1 failed ' in output
+assert 'FAIL: [test_error.py]' in output
+assert "type:'AssertionError' message: AssertionError()" in output
+assert 'assert False' in output
+
+print('\n- a test with a failure')
+results = s.run_tests('test_fail.py')
+output = report.suite_to_text(s)
+assert 'FAIL: [test_suite] 1 failed ' in output
+assert 'FAIL: [test_fail.py]' in output
+assert "type:'EpicFail' message: This failure is expected" in output
+assert "test.set_fail('EpicFail', 'This failure is expected')" in output
+
+print('\n- a test with a raised failure')
+results = s.run_tests('test_fail_raise.py')
+output = report.suite_to_text(s)
+assert 'FAIL: [test_suite] 1 failed ' in output
+assert 'FAIL: [test_fail_raise.py]' in output
+assert "type:'EpicFail' message: This failure is expected" in output
+assert "raise Failure('EpicFail', 'This failure is expected')" in output
print('\n- graceful exit.')
# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/suite_test/test_suite/test_fail.py b/selftest/suite_test/test_suite/test_fail.py
new file mode 100755
index 0000000..6880c81
--- /dev/null
+++ b/selftest/suite_test/test_suite/test_fail.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.test import *
+
+print('I am %r / %r' % (suite.name(), test.name()))
+
+test.set_fail('EpicFail', 'This failure is expected')
diff --git a/selftest/suite_test/test_suite/test_fail_raise.py b/selftest/suite_test/test_suite/test_fail_raise.py
new file mode 100755
index 0000000..a7b0b61
--- /dev/null
+++ b/selftest/suite_test/test_suite/test_fail_raise.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.test import *
+
+print('I am %r / %r' % (suite.name(), test.name()))
+
+raise Failure('EpicFail', 'This failure is expected')
diff --git a/selftest/trial_test.ok b/selftest/trial_test.ok
index 6ad39a9..7434a10 100644
--- a/selftest/trial_test.ok
+++ b/selftest/trial_test.ok
@@ -4,7 +4,7 @@
[TMP]/third
- fetch trial dirs in order
first
-['taken']
+['last_run', 'run.[TIMESTAMP]', 'taken']
second
third
- no more trial dirs left
diff --git a/src/osmo-gsm-tester.py b/src/osmo-gsm-tester.py
index 0a04708..504c6a9 100755
--- a/src/osmo-gsm-tester.py
+++ b/src/osmo-gsm-tester.py
@@ -169,44 +169,32 @@
t.verify()
trials.append(t)
- trials_passed = []
- trials_failed = []
+ trials_run = []
+ any_failed = False
for current_trial in trials:
try:
with current_trial:
- suites_passed = []
- suites_failed = []
for suite_scenario_str, suite_def, scenarios in suite_scenarios:
log.large_separator(current_trial.name(), suite_scenario_str)
suite_run = suite.SuiteRun(current_trial, suite_scenario_str, suite_def, scenarios)
- result = suite_run.run_tests(test_names)
- if result.all_passed:
- suites_passed.append(suite_scenario_str)
- suite_run.log('PASS')
- else:
- suites_failed.append(suite_scenario_str)
- suite_run.err('FAIL')
- if not suites_failed:
- current_trial.log('PASS')
- trials_passed.append(current_trial.name())
- else:
- current_trial.err('FAIL')
- trials_failed.append((current_trial.name(), suites_passed, suites_failed))
+ current_trial.add_suite(suite_run)
+
+ status = current_trial.run_suites(test_names)
+ if status == trial.Trial.FAIL:
+ any_failed = True
+ trials_run.append(current_trial)
except:
current_trial.log_exn()
sys.stderr.flush()
sys.stdout.flush()
- log.large_separator()
- if trials_passed:
- print('Trials passed:\n ' + ('\n '.join(trials_passed)))
- if trials_failed:
- print('Trials failed:')
- for trial_name, suites_passed, suites_failed in trials_failed:
- print(' %s (%d of %d suite runs failed)' % (trial_name, len(suites_failed), len(suites_failed) + len(suites_passed)))
- for suite_failed in suites_failed:
- print(' FAIL:', suite_failed)
+ if not any_failed:
+ log.large_separator('All trials passed:\n ' + ('\n '.join(mytrial.name() for mytrial in trials_run)))
+ else:
+ for mytrial in trials_run:
+ log.large_separator('Trial Report for %s' % mytrial.name())
+ mytrial.log_report()
exit(1)
if __name__ == '__main__':
diff --git a/src/osmo_gsm_tester/report.py b/src/osmo_gsm_tester/report.py
new file mode 100644
index 0000000..da0a004
--- /dev/null
+++ b/src/osmo_gsm_tester/report.py
@@ -0,0 +1,84 @@
+# osmo_gsm_tester: report: directory of binaries to be tested
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import math
+from datetime import datetime
+import xml.etree.ElementTree as et
+from . import log, suite
+
+def trial_to_junit_write(trial, junit_path):
+ elements = et.ElementTree(element=trial_to_junit(trial))
+ elements.write(junit_path)
+
+def trial_to_junit(trial):
+ testsuites = et.Element('testsuites')
+ for suite in trial.suites:
+ testsuite = suite_to_junit(suite)
+ testsuites.append(testsuite)
+ return testsuites
+
+def suite_to_junit(suite):
+ testsuite = et.Element('testsuite')
+ testsuite.set('name', suite.name())
+ testsuite.set('hostname', 'localhost')
+ testsuite.set('timestamp', datetime.fromtimestamp(round(suite.start_timestamp)).isoformat())
+ testsuite.set('time', str(math.ceil(suite.duration)))
+ testsuite.set('tests', str(len(suite.tests)))
+ testsuite.set('failures', str(suite.test_failed_ctr))
+ for test in suite.tests:
+ testcase = test_to_junit(test)
+ testsuite.append(testcase)
+ return testsuite
+
+def test_to_junit(test):
+ testcase = et.Element('testcase')
+ testcase.set('name', test.name())
+ testcase.set('time', str(math.ceil(test.duration)))
+ if test.status == suite.Test.SKIP:
+ skip = et.SubElement(testcase, 'skipped')
+ elif test.status == suite.Test.FAIL:
+ failure = et.SubElement(testcase, 'failure')
+ failure.set('type', test.fail_type)
+ failure.text = test.fail_message
+ return testcase
+
+def trial_to_text(trial):
+ msg = '\n%s [%s]\n ' % (trial.status, trial.name())
+ msg += '\n '.join(suite_to_text(result) for result in trial.suites)
+ return msg
+
+def suite_to_text(suite):
+ if suite.test_failed_ctr:
+ return 'FAIL: [%s] %d failed out of %d tests run (%d skipped):\n %s' % (
+ suite.name(), suite.test_failed_ctr, len(suite.tests), suite.test_skipped_ctr,
+ '\n '.join([test_to_text(t) for t in suite.tests]))
+ if not suite.tests:
+ return 'no tests were run.'
+ return 'pass: all %d tests passed (%d skipped).' % (len(suite.tests), suite.test_skipped_ctr)
+
+def test_to_text(test):
+ ret = "%s: [%s]" % (test.status, test.name())
+ if test.status != suite.Test.SKIP:
+ ret += " (%s, %d sec)" % (datetime.fromtimestamp(round(test.start_timestamp)).isoformat(), test.duration)
+ if test.status == suite.Test.FAIL:
+ ret += " type:'%s' message: %s" % (test.fail_type, test.fail_message.replace('\n', '\n '))
+ return ret
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/suite.py b/src/osmo_gsm_tester/suite.py
index 43e55af..e05f0d7 100644
--- a/src/osmo_gsm_tester/suite.py
+++ b/src/osmo_gsm_tester/suite.py
@@ -21,12 +21,23 @@
import sys
import time
import copy
+import traceback
from . import config, log, template, util, resource, schema, ofono_client, osmo_nitb
from . import test
class Timeout(Exception):
pass
+class Failure(Exception):
+ '''Test failure exception, provided to be raised by tests. fail_type is
+ usually a keyword used to quickly identify the type of failure that
+ occurred. fail_msg is a more extensive text containing information about
+ the issue.'''
+
+ def __init__(self, fail_type, fail_msg):
+ self.fail_type = fail_type
+ self.fail_msg = fail_msg
+
class SuiteDefinition(log.Origin):
'''A test suite reserves resources for a number of tests.
Each test requires a specific number of modems, BTSs etc., which are
@@ -78,9 +89,11 @@
raise ValueError('add_test(): test already belongs to another suite')
self.tests.append(test)
-
-
class Test(log.Origin):
+ UNKNOWN = 'UNKNOWN'
+ SKIP = 'SKIP'
+ PASS = 'PASS'
+ FAIL = 'FAIL'
def __init__(self, suite, test_basename):
self.suite = suite
@@ -89,26 +102,43 @@
super().__init__(self.path)
self.set_name(self.basename)
self.set_log_category(log.C_TST)
+ self.status = Test.UNKNOWN
+ self.start_timestamp = 0
+ self.duration = 0
+ self.fail_type = None
+ self.fail_message = None
def run(self, suite_run):
assert self.suite is suite_run.definition
- with self:
- test.setup(suite_run, self, ofono_client, sys.modules[__name__])
- success = False
- try:
+ try:
+ with self:
+ self.status = Test.UNKNOWN
+ self.start_timestamp = time.time()
+ test.setup(suite_run, self, ofono_client, sys.modules[__name__])
self.log('START')
with self.redirect_stdout():
util.run_python_file('%s.%s' % (self.suite.name(), self.name()),
self.path)
- success = True
- except resource.NoResourceExn:
- self.err('Current resource state:\n', repr(suite_run.reserved_resources))
- raise
- finally:
- if success:
- self.log('PASS')
- else:
- self.log('FAIL')
+ if self.status == Test.UNKNOWN:
+ self.set_pass()
+ except Exception as e:
+ self.log_exn()
+ if isinstance(e, Failure):
+ ftype = e.fail_type
+ fmsg = e.fail_msg + '\n' + traceback.format_exc().rstrip()
+ else:
+ ftype = type(e).__name__
+ fmsg = repr(e) + '\n' + traceback.format_exc().rstrip()
+ if isinstance(e, resource.NoResourceExn):
+ msg += '\n' + 'Current resource state:\n' + repr(suite_run.reserved_resources)
+ self.set_fail(ftype, fmsg, False)
+
+ finally:
+ if self.status == Test.PASS or self.status == Test.SKIP:
+ self.log(self.status)
+ else:
+ self.log('%s (%s)' % (self.status, self.fail_type))
+ return self.status
def name(self):
l = log.get_line_for_src(self.path)
@@ -116,7 +146,26 @@
return '%s:%s' % (self._name, l)
return super().name()
+ def set_fail(self, fail_type, fail_message, tb=True):
+ self.status = Test.FAIL
+ self.duration = time.time() - self.start_timestamp
+ self.fail_type = fail_type
+ self.fail_message = fail_message
+ if tb:
+ self.fail_message += '\n' + ''.join(traceback.format_stack()[:-1]).rstrip()
+
+ def set_pass(self):
+ self.status = Test.PASS
+ self.duration = time.time() - self.start_timestamp
+
+ def set_skip(self):
+ self.status = Test.SKIP
+ self.duration = 0
+
class SuiteRun(log.Origin):
+ UNKNOWN = 'UNKNOWN'
+ PASS = 'PASS'
+ FAIL = 'FAIL'
trial = None
resources_pool = None
@@ -133,6 +182,14 @@
self.set_log_category(log.C_TST)
self.resources_pool = resource.ResourcesPool()
+ def mark_start(self):
+ self.tests = []
+ self.start_timestamp = time.time()
+ self.duration = 0
+ self.test_failed_ctr = 0
+ self.test_skipped_ctr = 0
+ self.status = SuiteRun.UNKNOWN
+
def combined(self, conf_name):
self.dbg(combining=conf_name)
with log.Origin(combining_scenarios=conf_name):
@@ -157,32 +214,6 @@
self._config = self.combined('config')
return self._config
- class Results:
- def __init__(self):
- self.passed = []
- self.failed = []
- self.all_passed = None
-
- def add_pass(self, test):
- self.passed.append(test)
-
- def add_fail(self, test):
- self.failed.append(test)
-
- def conclude(self):
- self.all_passed = bool(self.passed) and not bool(self.failed)
- return self
-
- def __str__(self):
- if self.failed:
- return 'FAIL: %d of %d tests failed:\n %s' % (
- len(self.failed),
- len(self.failed) + len(self.passed),
- '\n '.join([t.name() for t in self.failed]))
- if not self.passed:
- return 'no tests were run.'
- return 'pass: all %d tests passed.' % len(self.passed)
-
def reserve_resources(self):
if self.reserved_resources:
raise RuntimeError('Attempt to reserve resources twice for a SuiteRun')
@@ -192,24 +223,28 @@
def run_tests(self, names=None):
self.log('Suite run start')
+ self.mark_start()
if not self.reserved_resources:
self.reserve_resources()
- results = SuiteRun.Results()
for test in self.definition.tests:
if names and not test.name() in names:
+ test.set_skip()
+ self.test_skipped_ctr += 1
+ self.tests.append(test)
continue
- self._run_test(test, results)
- self.stop_processes()
- return results.conclude()
-
- def _run_test(self, test, results):
- try:
with self:
- test.run(self)
- results.add_pass(test)
- except:
- results.add_fail(test)
- self.log_exn()
+ st = test.run(self)
+ if st == Test.FAIL:
+ self.test_failed_ctr += 1
+ self.tests.append(test)
+ self.stop_processes()
+ self.duration = time.time() - self.start_timestamp
+ if self.test_failed_ctr:
+ self.status = SuiteRun.FAIL
+ else:
+ self.status = SuiteRun.PASS
+ self.log(self.status)
+ return self.status
def remember_to_stop(self, process):
if self._processes is None:
diff --git a/src/osmo_gsm_tester/test.py b/src/osmo_gsm_tester/test.py
index 871e3ae..f584c92 100644
--- a/src/osmo_gsm_tester/test.py
+++ b/src/osmo_gsm_tester/test.py
@@ -32,9 +32,10 @@
poll = None
prompt = None
Timeout = None
+Failure = None
def setup(suite_run, _test, ofono_client, suite_module):
- global trial, suite, test, resources, log, dbg, err, wait, sleep, poll, prompt, Timeout
+ global trial, suite, test, resources, log, dbg, err, wait, sleep, poll, prompt, Failure, Timeout
trial = suite_run.trial
suite = suite_run
test = _test
@@ -46,6 +47,7 @@
sleep = suite_run.sleep
poll = suite_run.poll
prompt = suite_run.prompt
+ Failure = suite_module.Failure
Timeout = suite_module.Timeout
# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/trial.py b/src/osmo_gsm_tester/trial.py
index 171061d..67746f7 100644
--- a/src/osmo_gsm_tester/trial.py
+++ b/src/osmo_gsm_tester/trial.py
@@ -22,7 +22,7 @@
import shutil
import tarfile
-from . import log, util
+from . import log, util, suite, report
FILE_MARK_TAKEN = 'taken'
FILE_CHECKSUMS = 'checksums.md5'
@@ -32,6 +32,10 @@
FILE_LOG_BRIEF = 'log_brief'
class Trial(log.Origin):
+ UNKNOWN = 'UNKNOWN'
+ PASS = 'PASS'
+ FAIL = 'FAIL'
+
path = None
dir = None
_run_dir = None
@@ -58,6 +62,9 @@
self.dir = util.Dir(self.path)
self.inst_dir = util.Dir(self.dir.child('inst'))
self.bin_tars = []
+ self.suites = []
+ self.junit_path = self.get_run_dir().new_file(self.name()+'.xml')
+ self.status = Trial.UNKNOWN
def __repr__(self):
return self.name()
@@ -176,4 +183,24 @@
except:
pass
+ def add_suite(self, suite_run):
+ self.suites.append(suite_run)
+
+ def run_suites(self, names=None):
+ self.status = Trial.UNKNOWN
+ for suite_run in self.suites:
+ st = suite_run.run_tests(names)
+ if st == suite.SuiteRun.FAIL:
+ self.status = Trial.FAIL
+ elif self.status == Trial.UNKNOWN:
+ self.status = Trial.PASS
+ self.log(self.status)
+ junit_path = self.get_run_dir().new_file(self.name()+'.xml')
+ self.log('Storing JUnit report in', junit_path)
+ report.trial_to_junit_write(self, junit_path)
+ return self.status
+
+ def log_report(self):
+ self.log(report.trial_to_text(self))
+
# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/suites/debug/error.py b/suites/debug/error.py
new file mode 100644
index 0000000..8e146fa
--- /dev/null
+++ b/suites/debug/error.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.test import *
+
+# This can be used to verify that a test error is reported properly.
+assert False
diff --git a/suites/debug/fail.py b/suites/debug/fail.py
index 1b412b5..fcd56e0 100644
--- a/suites/debug/fail.py
+++ b/suites/debug/fail.py
@@ -2,4 +2,4 @@
from osmo_gsm_tester.test import *
# This can be used to verify that a test failure is reported properly.
-assert False
+test.set_fail('EpicFail', 'This failure is expected')
diff --git a/suites/debug/fail_raise.py b/suites/debug/fail_raise.py
new file mode 100644
index 0000000..22fb940
--- /dev/null
+++ b/suites/debug/fail_raise.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.test import *
+
+# This can be used to verify that a test failure is reported properly.
+raise Failure('EpicFail', 'This failure is expected')
diff --git a/suites/debug/pass.py b/suites/debug/pass.py
new file mode 100644
index 0000000..42c1f25
--- /dev/null
+++ b/suites/debug/pass.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.test import *
+
+# This can be used to verify that a test passes correctly.
+pass