core implementation

code bomb implementing the bulk of the osmo-gsm-tester

Change-Id: I53610becbf643ed51b90cfd9debc6992fe211ec9
diff --git a/src/osmo_gsm_tester/bts_model.py b/src/osmo_gsm_tester/bts_model.py
new file mode 100644
index 0000000..e5f9682
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_model.py
@@ -0,0 +1,29 @@
+# osmo_gsm_tester: bts model specifics
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from . import log, schema, util
+
+class TestContext(log.Origin):
+    '''
+    API to allow testing various BTS models.
+    '''
+
+    def __init__(self, 
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts_osmotrx.py b/src/osmo_gsm_tester/bts_osmotrx.py
new file mode 100644
index 0000000..5880870
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_osmotrx.py
@@ -0,0 +1,104 @@
+# osmo_gsm_tester: specifics for running a sysmoBTS
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from . import log, config, util, template, process
+
+class OsmoBtsTrx(log.Origin):
+    suite_run = None
+    nitb = None
+    run_dir = None
+    processes = None
+    inst = None
+    env = None
+
+    BIN_TRX = 'osmo-trx'
+    BIN_BTS_TRX = 'osmo-bts-trx'
+    BIN_PCU = 'osmo-pcu'
+
+    def __init__(self, suite_run, conf):
+        self.suite_run = suite_run
+        self.conf = conf
+        self.set_name('osmo-bts-trx')
+        self.set_log_category(log.C_RUN)
+        self.processes = {}
+        self.inst = None
+        self.env = {}
+
+    def start(self):
+        if self.nitb is None:
+            raise RuntimeError('BTS needs to be added to a NITB before it can be started')
+        self.suite_run.poll()
+
+        self.log('Starting to connect to', self.nitb)
+        self.run_dir = util.Dir(self.suite_run.trial.get_run_dir().new_dir(self.name()))
+        self.configure()
+
+        self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-bts-trx')))
+        self.env = { 'LD_LIBRARY_PATH': str(self.inst) }
+
+        self.launch_process(OsmoBtsTrx.BIN_TRX)
+        self.launch_process(OsmoBtsTrx.BIN_BTS_TRX, '-r', '1', '-c', os.path.abspath(self.config_file))
+        #self.launch_process(OsmoBtsTrx.BIN_PCU, '-r', '1')
+        self.suite_run.poll()
+
+    def launch_process(self, binary_name, *args):
+        if self.processes.get(binary_name) is not None:
+            raise RuntimeError('Attempt to launch twice: %r' % binary_name)
+
+        binary = os.path.abspath(self.inst.child('bin', binary_name))
+        run_dir = self.run_dir.new_dir(binary_name)
+        if not os.path.isfile(binary):
+            raise RuntimeError('Binary missing: %r' % binary)
+        proc = process.Process(binary_name, run_dir,
+                               (binary,) + args,
+                               env=self.env)
+        self.processes[binary_name] = proc
+        self.suite_run.remember_to_stop(proc)
+        proc.launch()
+
+    def configure(self):
+        if self.nitb is None:
+            raise RuntimeError('BTS needs to be added to a NITB before it can be configured')
+        self.config_file = self.run_dir.new_file('osmo-bts-trx.cfg')
+        self.dbg(config_file=self.config_file)
+
+        values = dict(osmo_bts_trx=config.get_defaults('osmo_bts_trx'))
+        config.overlay(values, self.suite_run.config())
+        config.overlay(values, dict(osmo_bts_trx=dict(oml_remote_ip=self.nitb.addr())))
+        config.overlay(values, dict(osmo_bts_trx=self.conf))
+        self.dbg(conf=values)
+
+        with open(self.config_file, 'w') as f:
+            r = template.render('osmo-bts-trx.cfg', values)
+            self.dbg(r)
+            f.write(r)
+
+    def conf_for_nitb(self):
+        values = config.get_defaults('nitb_bts')
+        config.overlay(values, config.get_defaults('osmo_bts_sysmo'))
+        config.overlay(values, self.conf)
+        config.overlay(values, { 'type': 'sysmobts' })
+        self.dbg(conf=values)
+        return values
+
+    def set_nitb(self, nitb):
+        self.nitb = nitb
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts_sysmo.py b/src/osmo_gsm_tester/bts_sysmo.py
new file mode 100644
index 0000000..de79f65
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_sysmo.py
@@ -0,0 +1,69 @@
+# osmo_gsm_tester: specifics for running a sysmoBTS
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from . import log, config, util, template
+
+class SysmoBts(log.Origin):
+    suite_run = None
+    nitb = None
+    run_dir = None
+
+    def __init__(self, suite_run, conf):
+        self.suite_run = suite_run
+        self.conf = conf
+        self.set_name('osmo-bts-sysmo')
+        self.set_log_category(log.C_RUN)
+
+    def start(self):
+        if self.nitb is None:
+            raise RuntimeError('BTS needs to be added to a NITB before it can be started')
+        self.log('Starting sysmoBTS to connect to', self.nitb)
+        self.run_dir = util.Dir(self.suite_run.trial.get_run_dir().new_dir(self.name()))
+        self.configure()
+        self.err('SysmoBts is not yet implemented')
+
+    def configure(self):
+        if self.nitb is None:
+            raise RuntimeError('BTS needs to be added to a NITB before it can be configured')
+        self.config_file = self.run_dir.new_file('osmo-bts-sysmo.cfg')
+        self.dbg(config_file=self.config_file)
+
+        values = { 'osmo_bts_sysmo': config.get_defaults('osmo_bts_sysmo') }
+        config.overlay(values, self.suite_run.config())
+        config.overlay(values, { 'osmo_bts_sysmo': { 'oml_remote_ip': self.nitb.addr() } })
+        config.overlay(values, { 'osmo_bts_sysmo': self.conf })
+        self.dbg(conf=values)
+
+        with open(self.config_file, 'w') as f:
+            r = template.render('osmo-bts-sysmo.cfg', values)
+            self.dbg(r)
+            f.write(r)
+
+    def conf_for_nitb(self):
+        values = config.get_defaults('nitb_bts')
+        config.overlay(values, config.get_defaults('osmo_bts_sysmo'))
+        config.overlay(values, self.conf)
+        config.overlay(values, { 'type': 'sysmobts' })
+        self.dbg(conf=values)
+        return values
+
+    def set_nitb(self, nitb):
+        self.nitb = nitb
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/config.py b/src/osmo_gsm_tester/config.py
index 18b209e..0c820c3 100644
--- a/src/osmo_gsm_tester/config.py
+++ b/src/osmo_gsm_tester/config.py
@@ -1,4 +1,4 @@
-# osmo_gsm_tester: read and validate config files
+# osmo_gsm_tester: read and manage config files and global config
 #
 # Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
 #
@@ -28,35 +28,124 @@
 #
 # JSON has too much braces and quotes to be easy to type
 #
-# YAML formatting is lean, but too powerful. The normal load() allows arbitrary
-# code execution. There is safe_load(). But YAML also allows several
-# alternative ways of formatting, better to have just one authoritative style.
-# Also it would be better to receive every setting as simple string rather than
-# e.g. an IMSI as an integer.
+# YAML formatting is lean, but:
+# - too powerful. The normal load() allows arbitrary code execution. There is
+#   safe_load().
+# - allows several alternative ways of formatting, better to have just one
+#   authoritative style.
+# - tries to detect types. It would be better to receive every setting as
+#   simple string rather than e.g. an IMSI as an integer.
+# - e.g. an IMSI starting with a zero is interpreted as octal value, resulting
+#   in super confusing error messages if the user merely forgets to quote it.
+# - does not tell me which line a config item came from, so no detailed error
+#   message is possible.
 #
-# The Python ConfigParserShootout page has numerous contestants, but it we want
-# to use widely used, standardized parsing code without re-inventing the wheel.
+# The Python ConfigParserShootout page has numerous contestants, but many of
+# those seem to be not widely used / standardized or even tested.
 # https://wiki.python.org/moin/ConfigParserShootout
 #
 # The optimum would be a stripped down YAML format.
 # In the lack of that, we shall go with yaml.load_safe() + a round trip
 # (feeding back to itself), converting keys to lowercase and values to string.
+# There is no solution for octal interpretations nor config file source lines
+# unless, apparently, we implement our own config parser.
 
 import yaml
-import re
 import os
 
-from . import log
+from . import log, schema, util
+from .util import is_dict, is_list, Dir, get_tempdir
 
-def read(path, schema=None):
+ENV_PREFIX = 'OSMO_GSM_TESTER_'
+ENV_CONF = os.getenv(ENV_PREFIX + 'CONF')
+
+DEFAULT_CONFIG_LOCATIONS = [
+    '.',
+    os.path.join(os.getenv('HOME'), '.config', 'osmo_gsm_tester'),
+    '/usr/local/etc/osmo_gsm_tester',
+    '/etc/osmo_gsm_tester'
+    ]
+
+PATHS_CONF = 'paths.conf'
+PATH_STATE_DIR = 'state_dir'
+PATH_SUITES_DIR = 'suites_dir'
+PATH_SCENARIOS_DIR = 'scenarios_dir'
+PATHS_SCHEMA = {
+        PATH_STATE_DIR: schema.STR,
+        PATH_SUITES_DIR: schema.STR,
+        PATH_SCENARIOS_DIR: schema.STR,
+    }
+
+PATHS_TEMPDIR_STR = '$TEMPDIR'
+
+PATHS = None
+
+def get_config_file(basename, fail_if_missing=True):
+    if ENV_CONF:
+        locations = [ ENV_CONF ]
+    else:
+        locations = DEFAULT_CONFIG_LOCATIONS
+
+    for l in locations:
+        p = os.path.join(l, basename)
+        if os.path.isfile(p):
+            return p
+    if not fail_if_missing:
+        return None
+    raise RuntimeError('configuration file not found: %r in %r' % (basename,
+        [os.path.abspath(p) for p in locations]))
+
+def read_config_file(basename, validation_schema=None, if_missing_return=False):
+    fail_if_missing = True
+    if if_missing_return is not False:
+        fail_if_missing = False
+    path = get_config_file(basename, fail_if_missing=fail_if_missing)
+    return read(path, validation_schema=validation_schema, if_missing_return=if_missing_return)
+
+def get_configured_path(label, allow_unset=False):
+    global PATHS
+
+    env_name = ENV_PREFIX + label.upper()
+    env_path = os.getenv(env_name)
+    if env_path:
+        return env_path
+
+    if PATHS is None:
+        paths_file = get_config_file(PATHS_CONF)
+        PATHS = read(paths_file, PATHS_SCHEMA)
+    p = PATHS.get(label)
+    if p is None and not allow_unset:
+        raise RuntimeError('missing configuration in %s: %r' % (PATHS_CONF, label))
+
+    if p.startswith(PATHS_TEMPDIR_STR):
+        p = os.path.join(get_tempdir(), p[len(PATHS_TEMPDIR_STR):])
+    return p
+
+def get_state_dir():
+    return Dir(get_configured_path(PATH_STATE_DIR))
+
+def get_suites_dir():
+    return Dir(get_configured_path(PATH_SUITES_DIR))
+
+def get_scenarios_dir():
+    return Dir(get_configured_path(PATH_SCENARIOS_DIR))
+
+def read(path, validation_schema=None, if_missing_return=False):
     with log.Origin(path):
+        if not os.path.isfile(path) and if_missing_return is not False:
+            return if_missing_return
         with open(path, 'r') as f:
             config = yaml.safe_load(f)
         config = _standardize(config)
-        if schema:
-            validate(config, schema)
+        if validation_schema:
+            schema.validate(config, validation_schema)
         return config
 
+def write(path, config):
+    with log.Origin(path):
+        with open(path, 'w') as f:
+            f.write(tostr(config))
+
 def tostr(config):
     return _tostr(_standardize(config))
 
@@ -74,88 +163,84 @@
     config = yaml.safe_load(_tostr(_standardize_item(config)))
     return config
 
+def get_defaults(for_kind):
+    defaults = read_config_file('default.conf', if_missing_return={})
+    return defaults.get(for_kind, {})
 
-KEY_RE = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
+def get_scenario(name, validation_schema=None):
+    scenarios_dir = get_scenarios_dir()
+    if not name.endswith('.conf'):
+        name = name + '.conf'
+    path = scenarios_dir.child(name)
+    if not os.path.isfile(path):
+        raise RuntimeError('No such scenario file: %r' % path)
+    return read(path, validation_schema=validation_schema)
 
-def band(val):
-    if val in ('GSM-1800', 'GSM-1900'):
+def add(dest, src):
+    if is_dict(dest):
+        if not is_dict(src):
+            raise ValueError('cannot add to dict a value of type: %r' % type(src))
+
+        for key, val in src.items():
+            dest_val = dest.get(key)
+            if dest_val is None:
+                dest[key] = val
+            else:
+                with log.Origin(key=key):
+                    add(dest_val, val)
         return
-    raise ValueError('Unknown GSM band: %r' % val)
+    if is_list(dest):
+        if not is_list(src):
+            raise ValueError('cannot add to list a value of type: %r' % type(src))
+        dest.extend(src)
+        return
+    if dest == src:
+        return
+    raise ValueError('cannot add dicts, conflicting items (values %r and %r)'
+                     % (dest, src))
 
-INT = 'int'
-STR = 'str'
-BAND = 'band'
-SCHEMA_TYPES = {
-        INT: int,
-        STR: str,
-        BAND: band,
-    }
+def combine(dest, src):
+    if is_dict(dest):
+        if not is_dict(src):
+            raise ValueError('cannot combine dict with a value of type: %r' % type(src))
 
-def is_dict(l):
-    return isinstance(l, dict)
+        for key, val in src.items():
+            dest_val = dest.get(key)
+            if dest_val is None:
+                dest[key] = val
+            else:
+                with log.Origin(key=key):
+                    combine(dest_val, val)
+        return
+    if is_list(dest):
+        if not is_list(src):
+            raise ValueError('cannot combine list with a value of type: %r' % type(src))
+        for i in range(len(src)):
+            with log.Origin(idx=i):
+                combine(dest[i], src[i])
+        return
+    if dest == src:
+        return
+    raise ValueError('cannot combine dicts, conflicting items (values %r and %r)'
+                     % (dest, src))
 
-def is_list(l):
-    return isinstance(l, (list, tuple))
+def overlay(dest, src):
+    if is_dict(dest):
+        if not is_dict(src):
+            raise ValueError('cannot combine dict with a value of type: %r' % type(src))
 
-def validate(config, schema):
-    '''Make sure the given config dict adheres to the schema.
-       The schema is a dict of 'dict paths' in dot-notation with permitted
-       value type. All leaf nodes are validated, nesting dicts are implicit.
-
-       validate( { 'a': 123, 'b': { 'b1': 'foo', 'b2': [ 1, 2, 3 ] } },
-                 { 'a': int,
-                   'b.b1': str,
-                   'b.b2[]': int } )
-
-       Raise a ValueError in case the schema is violated.
-    '''
-
-    def validate_item(path, value, schema):
-        want_type = schema.get(path)
-
-        if is_list(value):
-            if want_type:
-                raise ValueError('config item is a list, should be %r: %r' % (want_type, path))
-            path = path + '[]'
-            want_type = schema.get(path)
-
-        if not want_type:
-            if is_dict(value):
-                nest(path, value, schema)
-                return
-            if is_list(value) and value:
-                for list_v in value:
-                    validate_item(path, list_v, schema)
-                return
-            raise ValueError('config item not known: %r' % path)
-
-        if want_type not in SCHEMA_TYPES:
-            raise ValueError('unknown type %r at %r' % (want_type, path))
-
-        if is_dict(value):
-            raise ValueError('config item is dict but should be a leaf node of type %r: %r'
-                             % (want_type, path))
-
-        if is_list(value):
-            for list_v in value:
-                validate_item(path, list_v, schema)
-            return
-
-        with log.Origin(item=path):
-            type_validator = SCHEMA_TYPES.get(want_type)
-            type_validator(value)
-
-    def nest(parent_path, config, schema):
-        if parent_path:
-            parent_path = parent_path + '.'
-        else:
-            parent_path = ''
-        for k,v in config.items():
-            if not KEY_RE.fullmatch(k):
-                raise ValueError('invalid config key: %r' % k)
-            path = parent_path + k
-            validate_item(path, v, schema)
-
-    nest(None, config, schema)
+        for key, val in src.items():
+            dest_val = dest.get(key)
+            with log.Origin(key=key):
+                dest[key] = overlay(dest_val, val)
+        return dest
+    if is_list(dest):
+        if not is_list(src):
+            raise ValueError('cannot combine list with a value of type: %r' % type(src))
+        for i in range(len(src)):
+            with log.Origin(idx=i):
+                dest[i] = overlay(dest[i], src[i])
+        return dest
+    return src
 
 # vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/log.py b/src/osmo_gsm_tester/log.py
index 27194a9..2ad82aa 100644
--- a/src/osmo_gsm_tester/log.py
+++ b/src/osmo_gsm_tester/log.py
@@ -29,15 +29,25 @@
 L_DBG = 10
 L_TRACEBACK = 'TRACEBACK'
 
+LEVEL_STRS = {
+            'err': L_ERR,
+            'log': L_LOG,
+            'dbg': L_DBG,
+        }
+
 C_NET = 'net'
 C_RUN = 'run'
 C_TST = 'tst'
 C_CNF = 'cnf'
+C_BUS = 'bus'
 C_DEFAULT = '---'
 
 LONG_DATEFMT = '%Y-%m-%d_%H:%M:%S'
 DATEFMT = '%H:%M:%S'
 
+# may be overridden by regression tests
+get_process_id = lambda: '%d-%d' % (os.getpid(), time.time())
+
 class LogTarget:
     do_log_time = None
     do_log_category = None
@@ -47,6 +57,7 @@
     do_log_src = None
     origin_width = None
     origin_fmt = None
+    all_levels = None
 
     # redirected by logging test
     get_time_str = lambda self: time.strftime(self.log_time_fmt)
@@ -101,10 +112,16 @@
         'set global logging log.L_* level for a given log.C_* category'
         self.category_levels[category] = level
 
+    def set_all_levels(self, level):
+        self.all_levels = level
+
     def is_enabled(self, category, level):
         if level == L_TRACEBACK:
             return self.do_log_traceback
-        is_level = self.category_levels.get(category)
+        if self.all_levels is not None:
+            is_level = self.all_levels
+        else:
+            is_level = self.category_levels.get(category)
         if is_level is None:
             is_level = L_LOG
         if level < is_level:
@@ -128,19 +145,26 @@
         if self.do_log_category:
             log_pre.append(category)
 
+        deeper_origins = ''
         if self.do_log_origin:
             if origin is None:
                 name = '-'
+            elif isinstance(origin, Origins):
+                name = origin[-1]
+                if len(origin) > 1:
+                    deeper_origins = str(origin)
             elif isinstance(origin, str):
                 name = origin or None
-            elif hasattr(origin, '_name'):
-                name = origin._name
+            elif hasattr(origin, 'name'):
+                name = origin.name()
             if not name:
                 name = str(origin.__class__.__name__)
             log_pre.append(self.origin_fmt.format(name))
 
         if self.do_log_level and level != L_LOG:
-            log_pre.append(level_str(level) or ('loglevel=' + str(level)) )
+            loglevel = '%s: ' % (level_str(level) or ('loglevel=' + str(level)))
+        else:
+            loglevel = ''
 
         log_line = [str(m) for m in messages]
 
@@ -150,11 +174,15 @@
                             (', '.join(['%s=%r' % (k,v)
                              for k,v in sorted(named_items.items())])))
 
+        if deeper_origins:
+            log_line.append(' [%s]' % deeper_origins)
+
         if self.do_log_src and src:
             log_line.append(' [%s]' % str(src))
 
-        log_str = '%s%s%s' % (' '.join(log_pre),
+        log_str = '%s%s%s%s' % (' '.join(log_pre),
                               ': ' if log_pre else '',
+                              loglevel,
                               ' '.join(log_line))
 
         self.log_sink(log_str.strip() + '\n')
@@ -173,6 +201,9 @@
 
 def _log_all_targets(origin, category, level, src, messages, named_items=None):
     global targets
+
+    if origin is None:
+        origin = Origin._global_current_origin
     if isinstance(src, int):
         src = get_src_from_caller(src + 1)
     for target in targets:
@@ -188,6 +219,20 @@
     f = os.path.basename(f)
     return '%s:%s: %s' % (f, l, c)
 
+def get_line_for_src(src_path):
+    etype, exception, tb = sys.exc_info()
+    if tb:
+        ftb = traceback.extract_tb(tb)
+        for f,l,m,c in ftb:
+            if f.endswith(src_path):
+                return l
+
+    for frame in stack():
+        caller = getframeinfo(frame[0])
+        if caller.filename.endswith(src_path):
+            return caller.lineno
+    return None
+
 
 class Origin:
     '''
@@ -198,13 +243,14 @@
     This will log 'my name' as an origin for the Problem.
     '''
 
+    _global_current_origin = None
+    _global_id = None
+
     _log_category = None
     _src = None
     _name = None
-    _log_line_buf = None
-    _prev_stdout = None
+    _origin_id = None
 
-    _global_current_origin = None
     _parent_origin = None
 
     def __init__(self, *name_items, category=None, **detail_items):
@@ -226,7 +272,17 @@
         self._name = name + details
 
     def name(self):
-        return self._name
+        return self._name or self.__class__.__name__
+
+    __str__ = name
+    __repr__ = name
+
+    def origin_id(self):
+        if not self._origin_id:
+            if not Origin._global_id:
+                Origin._global_id = get_process_id()
+            self._origin_id = '%s-%s' % (self.name(), Origin._global_id)
+        return self._origin_id
 
     def set_log_category(self, category):
         self._log_category = category
@@ -249,11 +305,9 @@
         log_exn(self, self._log_category, exc_info)
 
     def __enter__(self):
-        if self._parent_origin is not None:
+        if not self.set_child_of(Origin._global_current_origin):
             return
-        if Origin._global_current_origin == self:
-            return
-        self._parent_origin, Origin._global_current_origin = Origin._global_current_origin, self
+        Origin._global_current_origin = self
 
     def __exit__(self, *exc_info):
         rc = None
@@ -263,10 +317,54 @@
         return rc
 
     def redirect_stdout(self):
-        return contextlib.redirect_stdout(self)
+        return contextlib.redirect_stdout(SafeRedirectStdout(self))
+
+    def gather_origins(self):
+        origins = Origins()
+        origins.add(self)
+        origin = self._parent_origin
+        if origin is None and Origin._global_current_origin is not None:
+            origin = Origin._global_current_origin
+        while origin is not None:
+            origins.add(origin)
+            origin = origin._parent_origin
+        return origins
+
+    def set_child_of(self, parent_origin):
+        # avoid loops
+        if self._parent_origin is not None:
+            return False
+        if parent_origin == self:
+            return False
+        self._parent_origin = parent_origin
+        return True
+
+class LineInfo(Origin):
+    def __init__(self, src_file, *name_items, **detail_items):
+        self.src_file = src_file
+        self.set_name(*name_items, **detail_items)
+
+    def name(self):
+        l = get_line_for_src(self.src_file)
+        if l is not None:
+            return '%s:%s' % (self._name, l)
+        return super().name()
+
+class SafeRedirectStdout:
+    '''
+    To be able to use 'print' in test scripts, this is used to redirect stdout
+    to a test class' log() function. However, it turns out doing that breaks
+    python debugger sessions -- it uses extended features of stdout, and will
+    fail dismally if it finds this wrapper in sys.stdout. Luckily, overriding
+    __getattr__() to return the original sys.__stdout__ attributes for anything
+    else than write() makes the debugger session work nicely again!
+    '''
+    _log_line_buf = None
+
+    def __init__(self, origin):
+        self._origin = origin
 
     def write(self, message):
-        'to redirect stdout to the log'
         lines = message.splitlines()
         if not lines:
             return
@@ -276,21 +374,12 @@
         if not message.endswith('\n'):
             self._log_line_buf = lines[-1]
             lines = lines[:-1]
-        origins = self.gather_origins()
+        origins = self._origin.gather_origins()
         for line in lines:
-            self._log(L_LOG, (line,), origins=origins)
+            self._origin._log(L_LOG, (line,), origins=origins)
 
-    def flush(self):
-        pass
-
-    def gather_origins(self):
-        origins = Origins()
-        origin = self
-        while origin:
-            origins.add(origin)
-            origin = origin._parent_origin
-        return str(origins)
-
+    def __getattr__(self, name):
+        return sys.__stdout__.__getattribute__(name)
 
 
 def dbg(origin, category, *messages, **named_items):
@@ -337,7 +426,7 @@
 
     # if there are origins recorded with the Exception, prefer that
     if hasattr(exception, 'origins'):
-        origin = str(exception.origins)
+        origin = exception.origins
 
     # if there is a category recorded with the Exception, prefer that
     if hasattr(exception, 'category'):
@@ -363,16 +452,23 @@
         if origin is not None:
             self.add(origin)
     def add(self, origin):
-        if hasattr(origin, '_name'):
-            origin_str = origin._name
+        if hasattr(origin, 'name'):
+            origin_str = origin.name()
         else:
-            origin_str = str(origin)
+            origin_str = repr(origin)
+        if origin_str is None:
+            raise RuntimeError('origin_str is None for %r' % origin)
         self.insert(0, origin_str)
     def __str__(self):
-        return '->'.join(self)
+        return '↪'.join(self)
 
 
 
+def set_all_levels(level):
+    global targets
+    for target in targets:
+        target.set_all_levels(level)
+
 def set_level(category, level):
     global targets
     for target in targets:
diff --git a/src/osmo_gsm_tester/ofono_client.py b/src/osmo_gsm_tester/ofono_client.py
new file mode 100644
index 0000000..622a18f
--- /dev/null
+++ b/src/osmo_gsm_tester/ofono_client.py
@@ -0,0 +1,117 @@
+# osmo_gsm_tester: DBUS client to talk to ofono
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from . import log
+
+from pydbus import SystemBus, Variant
+import time
+import pprint
+
+from gi.repository import GLib
+glib_main_loop = GLib.MainLoop()
+glib_main_ctx = glib_main_loop.get_context()
+bus = SystemBus()
+
+def poll():
+    global glib_main_ctx
+    while glib_main_ctx.pending():
+        glib_main_ctx.iteration()
+
+def get(path):
+    global bus
+    return bus.get('org.ofono', path)
+
+def list_modems():
+    root = get('/')
+    return sorted(root.GetModems())
+
+
+class Modem(log.Origin):
+    'convenience for ofono Modem interaction'
+    msisdn = None
+
+    def __init__(self, conf):
+        self.conf = conf
+        self.path = conf.get('path')
+        self.set_name(self.path)
+        self.set_log_category(log.C_BUS)
+        self._dbus_obj = None
+        self._interfaces_was = set()
+        poll()
+
+    def set_msisdn(self, msisdn):
+        self.msisdn = msisdn
+
+    def imsi(self):
+        return self.conf.get('imsi')
+
+    def ki(self):
+        return self.conf.get('ki')
+
+    def set_powered(self, on=True):
+        self.dbus_obj.SetProperty('Powered', Variant('b', on))
+
+    def dbus_obj(self):
+        if self._dbus_obj is not None:
+            return self._dbus_obj
+        self._dbus_obj = get(self.path)
+        self._dbus_obj.PropertyChanged.connect(self._on_property_change)
+        self._on_interfaces_change(self.properties().get('Interfaces'))
+
+    def properties(self):
+        return self.dbus_obj().GetProperties()
+
+    def _on_property_change(self, name, value):
+        if name == 'Interfaces':
+            self._on_interfaces_change(value)
+
+    def _on_interfaces_change(self, interfaces_now):
+        now = set(interfaces_now)
+        additions = now - self._interfaces_was
+        removals = self._interfaces_was - now
+        self._interfaces_was = now
+        for iface in removals:
+            with log.Origin('modem.disable(%s)' % iface):
+                try:
+                    self._on_interface_disabled(iface)
+                except:
+                    self.log_exn()
+        for iface in additions:
+            with log.Origin('modem.enable(%s)' % iface):
+                try:
+                    self._on_interface_enabled(iface)
+                except:
+                    self.log_exn()
+
+    def _on_interface_enabled(self, interface_name):
+        self.dbg('Interface enabled:', interface_name)
+        # todo: when the messages service comes up, connect a message reception signal
+
+    def _on_interface_disabled(self, interface_name):
+        self.dbg('Interface disabled:', interface_name)
+
+    def connect(self, nitb):
+        'set the modem up to connect to MCC+MNC from NITB config'
+        self.log('connect to', nitb)
+
+    def sms_send(self, msisdn):
+        self.log('send sms to MSISDN', msisdn)
+        return 'todo'
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_ctrl.py b/src/osmo_gsm_tester/osmo_ctrl.py
new file mode 100644
index 0000000..c3a09db
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_ctrl.py
@@ -0,0 +1,88 @@
+
+# osmo_gsm_tester: specifics for running a sysmoBTS
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import socket
+import struct
+
+from . import log
+
+class CtrlInterfaceExn(Exception):
+    pass
+
+class OsmoCtrl(log.Origin):
+
+    def __init__(self, host, port):
+        self.set_name('Ctrl', host=host, port=port)
+        self.set_log_category(log.C_BUS)
+        self.host = host
+        self.port = port
+        self.sck = None
+
+    def prefix_ipa_ctrl_header(self, data):
+        if isinstance(data, str):
+            data = data.encode('utf-8')
+        s = struct.pack(">HBB", len(data)+1, 0xee, 0)
+        return s + data
+
+    def remove_ipa_ctrl_header(self, data):
+        if (len(data) < 4):
+            raise CtrlInterfaceExn("Answer too short!")
+        (plen, ipa_proto, osmo_proto) = struct.unpack(">HBB", data[:4])
+        if (plen + 3 > len(data)):
+            self.err('Warning: Wrong payload length', expected=plen, got=len(data)-3)
+        if (ipa_proto != 0xee or osmo_proto != 0):
+            raise CtrlInterfaceExn("Wrong protocol in answer!")
+        return data[4:plen+3], data[plen+3:]
+
+    def connect(self):
+        self.dbg('Connecting')
+        self.sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sck.connect((self.host, self.port))
+        self.sck.setblocking(1)
+
+    def disconnect(self):
+        self.dbg('Disconnecting')
+        if self.sck is not None:
+            self.sck.close()
+
+    def _send(self, data):
+        self.dbg('Sending', data=data)
+        data = self.prefix_ipa_ctrl_header(data)
+        self.sck.send(data)
+
+    def receive(self, length = 1024):
+        return self.sck.recv(length)
+
+    def do_set(self, var, value, id=0):
+        setmsg = "SET %s %s %s" %(id, var, value)
+        self._send(setmsg)
+
+    def do_get(self, var, id=0):
+        getmsg = "GET %s %s" %(id, var)
+        self._send(getmsg)
+
+    def __enter__(self):
+        self.connect()
+        return self
+
+    def __exit__(self, *exc_info):
+        self.disconnect()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_nitb.py b/src/osmo_gsm_tester/osmo_nitb.py
new file mode 100644
index 0000000..3d5fc6a
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_nitb.py
@@ -0,0 +1,155 @@
+# osmo_gsm_tester: specifics for running an osmo-nitb
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import random
+import re
+import socket
+
+from . import log, util, config, template, process, osmo_ctrl
+
+class OsmoNitb(log.Origin):
+    suite_run = None
+    nitb_iface = None
+    run_dir = None
+    config_file = None
+    process = None
+    bts = None
+
+    def __init__(self, suite_run, nitb_iface):
+        self.suite_run = suite_run
+        self.nitb_iface = nitb_iface
+        self.set_log_category(log.C_RUN)
+        self.set_name('osmo-nitb_%s' % nitb_iface.get('addr'))
+        self.bts = []
+
+    def start(self):
+        self.log('Starting osmo-nitb')
+        self.run_dir = util.Dir(self.suite_run.trial.get_run_dir().new_dir(self.name()))
+        self.configure()
+        inst = util.Dir(self.suite_run.trial.get_inst('openbsc'))
+        binary = os.path.abspath(inst.child('bin', 'osmo-nitb'))
+        if not os.path.isfile(binary):
+            raise RuntimeError('Binary missing: %r' % binary)
+        env = { 'LD_LIBRARY_PATH': os.path.abspath(str(inst)) }
+        self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+        self.process = process.Process(self.name(), self.run_dir,
+                                       (binary, '-c',
+                                       os.path.abspath(self.config_file)),
+                                       env=env)
+        self.suite_run.remember_to_stop(self.process)
+        self.process.launch()
+
+    def configure(self):
+        self.config_file = self.run_dir.new_file('osmo-nitb.cfg')
+        self.dbg(config_file=self.config_file)
+
+        values = dict(nitb=config.get_defaults('nitb'))
+        config.overlay(values, self.suite_run.config())
+        config.overlay(values, dict(nitb_iface=self.nitb_iface))
+
+        bts_list = []
+        for bts in self.bts:
+            bts_list.append(bts.conf_for_nitb())
+        config.overlay(values, dict(nitb=dict(net=dict(bts_list=bts_list))))
+
+        self.dbg(conf=values)
+
+        with open(self.config_file, 'w') as f:
+            r = template.render('osmo-nitb.cfg', values)
+            self.dbg(r)
+            f.write(r)
+
+    def addr(self):
+        return self.nitb_iface.get('addr')
+
+    def add_bts(self, bts):
+        self.bts.append(bts)
+        bts.set_nitb(self)
+
+    def add_subscriber(self, modem, msisdn=None):
+        if msisdn is None:
+            msisdn = self.suite_run.resources_pool.next_msisdn(modem)
+        modem.set_msisdn(msisdn)
+        self.log('Add subscriber', msisdn=msisdn, imsi=modem.imsi())
+        with self:
+            OsmoNitbCtrl(self).add_subscriber(modem.imsi(), msisdn, modem.ki())
+
+    def subscriber_attached(self, *modems):
+        return all([self.imsi_attached(m.imsi()) for m in modems])
+
+    def imsi_attached(self, imsi):
+        return random.choice((True, False))
+
+    def sms_received(self, sms):
+        return random.choice((True, False))
+
+    def running(self):
+        return not self.process.terminated()
+
+
+class OsmoNitbCtrl(log.Origin):
+    PORT = 4249
+    SUBSCR_MODIFY_VAR = 'subscriber-modify-v1'
+    SUBSCR_MODIFY_REPLY_RE = re.compile("SET_REPLY (\d+) %s OK" % SUBSCR_MODIFY_VAR)
+    SUBSCR_LIST_ACTIVE_VAR = 'subscriber-list-active-v1'
+
+    def __init__(self, nitb):
+        self.nitb = nitb
+        self.set_name('CTRL(%s:%d)' % (self.nitb.addr(), OsmoNitbCtrl.PORT))
+        self.set_child_of(nitb)
+
+    def ctrl(self):
+        return osmo_ctrl.OsmoCtrl(self.nitb.addr(), OsmoNitbCtrl.PORT)
+
+    def add_subscriber(self, imsi, msisdn, ki=None, algo=None):
+        created = False
+        if ki and not algo:
+            algo = 'comp128v1'
+
+        if algo:
+            value = '%s,%s,%s,%s' % (imsi,msisdn,algo,ki)
+        else:
+            value = '%s,%s' % (imsi, msisdn)
+
+        with osmo_ctrl.OsmoCtrl(self.nitb.addr(), OsmoNitbCtrl.PORT) as ctrl:
+            ctrl.do_set(OsmoNitbCtrl.SUBSCR_MODIFY_VAR, value)
+            data = ctrl.receive()
+            (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+            answer_str = answer.decode('utf-8')
+            res = OsmoNitbCtrl.SUBSCR_MODIFY_REPLY_RE.match(answer_str)
+            if not res:
+                raise RuntimeError('Cannot create subscriber %r (answer=%r)' % (imsi, answer_str))
+            self.dbg('Created subscriber', imsi=imsi, msisdn=msisdn)
+            return True
+
+    def subscriber_list_active(self):
+        var = 'subscriber-list-active-v1'
+        aslist_str = ""
+        with osmo_ctrl.OsmoCtrl(self.nitb.addr(), OsmoNitbCtrl.PORT) as ctrl:
+            self.ctrl.do_get(OsmoNitbCtrl.SUBSCR_LIST_ACTIVE_VAR)
+            # this looks like it doesn't work for long data. It's legacy code from the old osmo-gsm-tester.
+            data = self.ctrl.receive()
+            while (len(data) > 0):
+                (answer, data) = self.ctrl.remove_ipa_ctrl_header(data)
+                answer = answer.replace('\n', ' ')
+                aslist_str = answer
+            return aslist_str
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/process.py b/src/osmo_gsm_tester/process.py
index 2e0ff52..4cf1b8d 100644
--- a/src/osmo_gsm_tester/process.py
+++ b/src/osmo_gsm_tester/process.py
@@ -17,7 +17,190 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+import os
+import time
+import subprocess
+import signal
+
+from . import log
+from .util import Dir
+
+class Process(log.Origin):
+
+    process_obj = None
+    outputs = None
+    result = None
+    killed = None
+
+    def __init__(self, name, run_dir, popen_args, **popen_kwargs):
+        self.name_str = name
+        self.set_name(name)
+        self.set_log_category(log.C_RUN)
+        self.run_dir = run_dir
+        self.popen_args = popen_args
+        self.popen_kwargs = popen_kwargs
+        self.outputs = {}
+        if not isinstance(self.run_dir, Dir):
+            self.run_dir = Dir(os.path.abspath(str(self.run_dir)))
+
+    def set_env(self, key, value):
+        env = self.popen_kwargs.get('env') or {}
+        env[key] = value
+        self.popen_kwargs['env'] = env
+
+    def make_output_log(self, name):
+        '''
+        create a non-existing log output file in run_dir to pipe stdout and
+        stderr from this process to.
+        '''
+        path = self.run_dir.new_child(name)
+        f = open(path, 'w')
+        self.dbg(path)
+        f.write('(launched: %s)\n' % time.strftime(log.LONG_DATEFMT))
+        f.flush()
+        self.outputs[name] = (path, f)
+        return f
+
+    def launch(self):
+        with self:
+
+            self.dbg('cd %r; %s %s' % (
+                    os.path.abspath(str(self.run_dir)),
+                    ' '.join(['%s=%r'%(k,v) for k,v in self.popen_kwargs.get('env', {}).items()]),
+                    ' '.join(self.popen_args)))
+
+            self.process_obj = subprocess.Popen(
+                self.popen_args,
+                stdout=self.make_output_log('stdout'),
+                stderr=self.make_output_log('stderr'),
+                shell=False,
+                cwd=self.run_dir.path,
+                **self.popen_kwargs)
+            self.set_name(self.name_str, pid=self.process_obj.pid)
+            self.log('Launched')
+
+    def _poll_termination(self, time_to_wait_for_term=5):
+        wait_step = 0.001
+        waited_time = 0
+        while True:
+            # poll returns None if proc is still running
+            self.result = self.process_obj.poll()
+            if self.result is not None:
+                return True
+            waited_time += wait_step
+            # make wait_step approach 1.0
+            wait_step = (1. + 5. * wait_step) / 6.
+            if waited_time >= time_to_wait_for_term:
+                break
+            time.sleep(wait_step)
+        return False
+
+    def terminate(self):
+        if self.process_obj is None:
+            return
+        if self.result is not None:
+            return
+
+        while True:
+            # first try SIGINT to allow stdout+stderr flushing
+            self.log('Terminating (SIGINT)')
+            os.kill(self.process_obj.pid, signal.SIGINT)
+            self.killed = signal.SIGINT
+            if self._poll_termination():
+                break
+
+            # SIGTERM maybe?
+            self.log('Terminating (SIGTERM)')
+            self.process_obj.terminate()
+            self.killed = signal.SIGTERM
+            if self._poll_termination():
+                break
+
+            # out of patience
+            self.log('Terminating (SIGKILL)')
+            self.process_obj.kill()
+            self.killed = signal.SIGKILL
+            break;
+
+        self.process_obj.wait()
+        self.cleanup()
+
+    def cleanup(self):
+        self.close_output_logs()
+        if self.result == 0:
+            self.log('Terminated: ok', rc=self.result)
+        elif self.killed:
+            self.log('Terminated', rc=self.result)
+        else:
+            self.err('Terminated: ERROR', rc=self.result)
+            #self.err('stdout:\n', self.get_stdout_tail(prefix='| '), '\n')
+            self.err('stderr:\n', self.get_stderr_tail(prefix='| '), '\n')
+
+    def close_output_logs(self):
+        self.dbg('Cleanup')
+        for k, v in self.outputs.items():
+            path, f = v
+            if f:
+                f.flush()
+                f.close()
+            self.outputs[k] = (path, None)
+
+    def poll(self):
+        if self.process_obj is None:
+            return
+        if self.result is not None:
+            return
+        self.result = self.process_obj.poll()
+        if self.result is not None:
+            self.cleanup()
+
+    def get_output(self, which):
+        v = self.outputs.get(which)
+        if not v:
+            return None
+        path, f = v
+        with open(path, 'r') as f2:
+            return f2.read()
+
+    def get_output_tail(self, which, tail=10, prefix=''):
+        out = self.get_output(which).splitlines()
+        tail = min(len(out), tail)
+        return ('\n' + prefix).join(out[-tail:])
+
+    def get_stdout(self):
+        return self.get_output('stdout')
+
+    def get_stderr(self):
+        return self.get_output('stderr')
+
+    def get_stdout_tail(self, tail=10, prefix=''):
+        return self.get_output_tail('stdout', tail, prefix)
+
+    def get_stderr_tail(self, tail=10, prefix=''):
+        return self.get_output_tail('stderr', tail, prefix)
+
+    def terminated(self):
+        self.poll()
+        return self.result is not None
+
+    def wait(self):
+        self.process_obj.wait()
+        self.poll()
 
 
+class RemoteProcess(Process):
+
+    def __init__(self, remote_host, remote_cwd, *process_args, **process_kwargs):
+        super().__init__(*process_args, **process_kwargs)
+        self.remote_host = remote_host
+        self.remote_cwd = remote_cwd
+
+        # hacky: instead of just prepending ssh, i.e. piping stdout and stderr
+        # over the ssh link, we should probably run on the remote side,
+        # monitoring the process remotely.
+        self.popen_args = ['ssh', '-t', self.remote_host,
+                           'cd "%s"; %s' % (self.remote_cwd,
+                                            ' '.join(['"%s"' % arg for arg in self.popen_args]))]
+        self.dbg(self.popen_args, dir=self.run_dir, conf=self.popen_kwargs)
 
 # vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/resource.py b/src/osmo_gsm_tester/resource.py
index bebc82d..dc8435e 100644
--- a/src/osmo_gsm_tester/resource.py
+++ b/src/osmo_gsm_tester/resource.py
@@ -18,34 +18,443 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
+import time
+import copy
+import atexit
+import pprint
 
 from . import log
 from . import config
-from .utils import listdict, FileLock
+from . import util
+from . import schema
+from . import ofono_client
+from . import osmo_nitb
+from . import bts_sysmo, bts_osmotrx
 
-class Resources(log.Origin):
+from .util import is_dict, is_list
 
-    def __init__(self, config_path, lock_dir):
-        self.config_path = config_path
-        self.lock_dir = lock_dir
-        self.set_name(conf=self.config_path, lock=self.lock_dir)
+HASH_KEY = '_hash'
+RESERVED_KEY = '_reserved_by'
+USED_KEY = '_used'
 
-    def ensure_lock_dir_exists(self):
-        if not os.path.isdir(self.lock_dir):
-            os.makedirs(self.lock_dir)
+RESOURCES_CONF = 'resources.conf'
+LAST_USED_MSISDN_FILE = 'last_used_msisdn.state'
+RESERVED_RESOURCES_FILE = 'reserved_resources.state'
+
+R_NITB_IFACE = 'nitb_iface'
+R_BTS = 'bts'
+R_ARFCN = 'arfcn'
+R_MODEM = 'modem'
+R_ALL = (R_NITB_IFACE, R_BTS, R_ARFCN, R_MODEM)
+
+RESOURCES_SCHEMA = {
+        'nitb_iface[].addr': schema.IPV4,
+        'bts[].label': schema.STR,
+        'bts[].type': schema.STR,
+        'bts[].unit_id': schema.INT,
+        'bts[].addr': schema.IPV4,
+        'bts[].band': schema.BAND,
+        'bts[].trx[].hwaddr': schema.HWADDR,
+        'arfcn[].arfcn': schema.INT,
+        'arfcn[].band': schema.BAND,
+        'modem[].label': schema.STR,
+        'modem[].path': schema.STR,
+        'modem[].imsi': schema.IMSI,
+        'modem[].ki': schema.KI,
+    }
+
+WANT_SCHEMA = util.dict_add(
+    dict([('%s[].times' % r, schema.INT) for r in R_ALL]),
+    RESOURCES_SCHEMA)
+
+KNOWN_BTS_TYPES = {
+        'sysmo': bts_sysmo.SysmoBts,
+        'osmotrx': bts_osmotrx.OsmoBtsTrx,
+    }
+
+def register_bts_type(name, clazz):
+    KNOWN_BTS_TYPES[name] = clazz
+
+class ResourcesPool(log.Origin):
+    _remember_to_free = None
+    _registered_exit_handler = False
+
+    def __init__(self):
+        self.config_path = config.get_config_file(RESOURCES_CONF)
+        self.state_dir = config.get_state_dir()
+        self.set_name(conf=self.config_path, state=self.state_dir.path)
+        self.read_conf()
+
+    def read_conf(self):
+        self.all_resources = Resources(config.read(self.config_path, RESOURCES_SCHEMA))
+        self.all_resources.set_hashes()
+
+    def reserve(self, origin, want):
+        '''
+        attempt to reserve the resources specified in the dict 'want' for
+        'origin'. Obtain a lock on the resources lock dir, verify that all
+        wanted resources are available, and if yes mark them as reserved.
+
+        On success, return a reservation object which can be used to release
+        the reservation. The reservation will be freed automatically on program
+        exit, if not yet done manually.
+
+        'origin' should be an Origin() instance.
+
+        'want' is a dict matching WANT_SCHEMA, which is the same as
+        the RESOURCES_SCHEMA, except each entity that can be reserved has a 'times'
+        field added, to indicate how many of those should be reserved.
+
+        If an entry has only a 'times' set, any of the resources may be
+        reserved without further limitations.
+
+        ResourcesPool may also be selected with narrowed down constraints.
+        This would reserve one NITB IP address, two modems, one BTS of type
+        sysmo and one of type oct, plus 2 ARFCNs in the 1800 band:
+
+         {
+           'nitb_iface': [ { 'times': 1 } ],
+           'bts': [ { 'type': 'sysmo', 'times': 1 }, { 'type': 'oct', 'times': 1 } ],
+           'arfcn': [ { 'band': 'GSM-1800', 'times': 2 } ],
+           'modem': [ { 'times': 2 } ],
+         }
+
+        A times=1 value is implicit, so the above is equivalent to:
+
+         {
+           'nitb_iface': [ {} ],
+           'bts': [ { 'type': 'sysmo' }, { 'type': 'oct' } ],
+           'arfcn': [ { 'band': 'GSM-1800', 'times': 2 } ],
+           'modem': [ { 'times': 2 } ],
+         }
+        '''
+        schema.validate(want, WANT_SCHEMA)
+
+        # replicate items that have a 'times' > 1
+        want = copy.deepcopy(want)
+        for key, item_list in want.items():
+            more_items = []
+            for item in item_list:
+                times = int(item.pop('times'))
+                if times and times > 1:
+                    for i in range(times - 1):
+                        more_items.append(copy.deepcopy(item))
+            item_list.extend(more_items)
+
+        origin_id = origin.origin_id()
+
+        with self.state_dir.lock(origin_id):
+            rrfile_path = self.state_dir.mk_parentdir(RESERVED_RESOURCES_FILE)
+            reserved = Resources(config.read(rrfile_path, if_missing_return={}))
+            to_be_reserved = self.all_resources.without(reserved).find(want)
+
+            to_be_reserved.mark_reserved_by(origin_id)
+
+            reserved.add(to_be_reserved)
+            config.write(rrfile_path, reserved)
+
+            self.remember_to_free(to_be_reserved)
+            return ReservedResources(self, origin, to_be_reserved)
+
+    def free(self, origin, to_be_freed):
+        with self.state_dir.lock(origin.origin_id()):
+            rrfile_path = self.state_dir.mk_parentdir(RESERVED_RESOURCES_FILE)
+            reserved = Resources(config.read(rrfile_path, if_missing_return={}))
+            reserved.drop(to_be_freed)
+            config.write(rrfile_path, reserved)
+            self.forget_freed(to_be_freed)
+
+    def register_exit_handler(self):
+        if self._registered_exit_handler:
+            return
+        atexit.register(self.clean_up_registered_resources)
+        self._registered_exit_handler = True
+
+    def unregister_exit_handler(self):
+        if not self._registered_exit_handler:
+            return
+        atexit.unregister(self.clean_up_registered_resources)
+        self._registered_exit_handler = False
+
+    def clean_up_registered_resources(self):
+        if not self._remember_to_free:
+            return
+        self.free(log.Origin('atexit.clean_up_registered_resources()'),
+                  self._remember_to_free)
+
+    def remember_to_free(self, to_be_reserved):
+        self.register_exit_handler()
+        if not self._remember_to_free:
+            self._remember_to_free = Resources()
+        self._remember_to_free.add(to_be_reserved)
+
+    def forget_freed(self, freed):
+        if freed is self._remember_to_free:
+            self._remember_to_free.clear()
+        else:
+            self._remember_to_free.drop(freed)
+        if not self._remember_to_free:
+            self.unregister_exit_handler()
+
+    def next_msisdn(self, origin):
+        origin_id = origin.origin_id()
+
+        with self.state_dir.lock(origin_id):
+            msisdn_path = self.state_dir.child(LAST_USED_MSISDN_FILE)
+            with log.Origin(msisdn_path):
+                last_msisdn = '1'
+                if os.path.exists(msisdn_path):
+                    if not os.path.isfile(msisdn_path):
+                        raise RuntimeError('path should be a file but is not: %r' % msisdn_path)
+                    with open(msisdn_path, 'r') as f:
+                        last_msisdn = f.read().strip()
+                    schema.msisdn(last_msisdn)
+
+                next_msisdn = util.msisdn_inc(last_msisdn)
+                with open(msisdn_path, 'w') as f:
+                    f.write(next_msisdn)
+                return next_msisdn
 
 
-global_resources = listdict()
+class NoResourceExn(Exception):
+    pass
 
-def register(kind, instance):
-    global global_resources
-    global_resources.add(kind, instance)
+class Resources(dict):
 
-def reserve(user, config):
-    asdf
+    def __init__(self, all_resources={}, do_copy=True):
+        if do_copy:
+            all_resources = copy.deepcopy(all_resources)
+        self.update(all_resources)
 
-def read_conf(path):
-    with open(path, 'r') as f:
-        conf = f.read()
+    def drop(self, reserved, fail_if_not_found=True):
+        # protect from modifying reserved because we're the same object
+        if reserved is self:
+            raise RuntimeError('Refusing to drop a list of resources from itself.'
+                               ' This is probably a bug where a list of Resources()'
+                               ' should have been copied but is passed as-is.'
+                               ' use Resources.clear() instead.')
+
+        for key, reserved_list in reserved.items():
+            my_list = self.get(key) or []
+
+            if my_list is reserved_list:
+                self.pop(key)
+                continue
+
+            for reserved_item in reserved_list:
+                found = False
+                reserved_hash = reserved_item.get(HASH_KEY)
+                if not reserved_hash:
+                    raise RuntimeError('Resources.drop() only works with hashed items')
+
+                for i in range(len(my_list)):
+                    my_item = my_list[i]
+                    my_hash = my_item.get(HASH_KEY)
+                    if not my_hash:
+                        raise RuntimeError('Resources.drop() only works with hashed items')
+                    if my_hash == reserved_hash:
+                        found = True
+                        my_list.pop(i)
+                        break
+
+                if fail_if_not_found and not found:
+                    raise RuntimeError('Asked to drop resource from a pool, but the'
+                                       ' resource was not found: %s = %r' % (key, reserved_item))
+
+            if not my_list:
+                self.pop(key)
+        return self
+
+    def without(self, reserved):
+        return Resources(self).drop(reserved)
+
+    def find(self, want, skip_if_marked=None, do_copy=True):
+        matches = {}
+        for key, want_list in want.items():
+          with log.Origin(want=key):
+            my_list = self.get(key)
+
+            log.dbg(None, None, 'Looking for', len(want_list), 'x', key, ', candidates:', len(my_list))
+
+            # Try to avoid a less constrained item snatching away a resource
+            # from a more detailed constrained requirement.
+
+            # first record all matches
+            all_matches = []
+            for want_item in want_list:
+                item_match_list = []
+                for i in range(len(my_list)):
+                    my_item = my_list[i]
+                    if skip_if_marked and my_item.get(skip_if_marked):
+                        continue
+                    if item_matches(my_item, want_item, ignore_keys=('times',)):
+                        item_match_list.append(i)
+                if not item_match_list:
+                    raise NoResourceExn('No matching resource available for %s = %r'
+                                        % (key, want_item))
+                all_matches.append( item_match_list )
+
+            if not all_matches:
+                raise NoResourceExn('No matching resource available for %s = %r'
+                                    % (key, want_list))
+
+            # figure out who gets what
+            solution = solve(all_matches)
+            picked = [ my_list[i] for i in solution if i is not None ]
+            log.dbg(None, None, 'Picked', pprint.pformat(picked))
+            matches[key] = picked
+
+        return Resources(matches, do_copy=do_copy)
+
+    def set_hashes(self):
+        for key, item_list in self.items():
+            for item in item_list:
+                item[HASH_KEY] = util.hash_obj(item, HASH_KEY, RESERVED_KEY, USED_KEY)
+
+    def add(self, more):
+        if more is self:
+            raise RuntimeError('adding a list of resources to itself?')
+        config.add(self, copy.deepcopy(more))
+
+    def combine(self, more_rules):
+        if more_rules is self:
+            raise RuntimeError('combining a list of resource rules with itself?')
+        config.combine(self, copy.deepcopy(more))
+
+    def mark_reserved_by(self, origin_id):
+        for key, item_list in self.items():
+            for item in item_list:
+                item[RESERVED_KEY] = origin_id
+
+
+def solve(all_matches):
+    '''
+    all_matches shall be a list of index-lists.
+    all_matches[i] is the list of indexes that item i can use.
+    Return a solution so that each i gets a different index.
+    solve([ [0, 1, 2],
+            [0],
+            [0, 2] ]) == [1, 0, 2]
+    '''
+
+    def all_differ(l):
+        return len(set(l)) == len(l)
+
+    def search_in_permutations(fixed=[]):
+        idx = len(fixed)
+        for i in range(len(all_matches[idx])):
+            val = all_matches[idx][i]
+            # don't add a val that's already in the list
+            if val in fixed:
+                continue
+            l = list(fixed)
+            l.append(val)
+            if len(l) == len(all_matches):
+                # found a solution
+                return l
+            # not at the end yet, add next digit
+            r = search_in_permutations(l)
+            if r:
+                # nested search_in_permutations() call found a solution
+                return r
+        # this entire branch yielded no solution
+        return None
+
+    if not all_matches:
+        raise RuntimeError('Cannot solve: no candidates')
+
+    solution = search_in_permutations()
+    if not solution:
+        raise NoResourceExn('The requested resource requirements are not solvable %r'
+                            % all_matches)
+    return solution
+
+
+def contains_hash(list_of_dicts, a_hash):
+    for d in list_of_dicts:
+        if d.get(HASH_KEY) == a_hash:
+            return True
+    return False
+
+def item_matches(item, wanted_item, ignore_keys=None):
+    if is_dict(wanted_item):
+        # match up two dicts
+        if not isinstance(item, dict):
+            return False
+        for key, wanted_val in wanted_item.items():
+            if ignore_keys and key in ignore_keys:
+                continue
+            if not item_matches(item.get(key), wanted_val, ignore_keys=ignore_keys):
+                return False
+        return True
+
+    if is_list(wanted_item):
+        # multiple possible values
+        if item not in wanted_item:
+            return False
+        return True
+
+    return item == wanted_item
+
+
+class ReservedResources(log.Origin):
+    '''
+    After all resources have been figured out, this is the API that a test case
+    gets to interact with resources. From those resources that have been
+    reserved for it, it can pick some to mark them as currently in use.
+    Functions like nitb() provide a resource by automatically picking its
+    dependencies from so far unused (but reserved) resource.
+    '''
+
+    def __init__(self, resources_pool, origin, reserved):
+        self.resources_pool = resources_pool
+        self.origin = origin
+        self.reserved = reserved
+
+    def __repr__(self):
+        return 'resources(%s)=%s' % (self.origin.name(), pprint.pformat(self.reserved))
+
+    def get(self, kind, specifics=None):
+        if specifics is None:
+            specifics = {}
+        self.dbg('requesting use of', kind, specifics=specifics)
+        want = { kind: [specifics] }
+        available_dict = self.reserved.find(want, skip_if_marked=USED_KEY, do_copy=False)
+        available = available_dict.get(kind)
+        self.dbg(available=len(available))
+        if not available:
+            raise NoResourceExn('No unused resource found: %r%s' %
+                                (kind,
+                                 (' matching %r' % specifics) if specifics else '')
+                               )
+        pick = available[0]
+        self.dbg(using=pick)
+        assert not pick.get(USED_KEY)
+        pick[USED_KEY] = True
+        return copy.deepcopy(pick)
+
+    def put(self, item):
+        if not item.get(USED_KEY):
+            raise RuntimeError('Can only put() a resource that is used: %r' % item)
+        hash_to_put = item.get(HASH_KEY)
+        if not hash_to_put:
+            raise RuntimeError('Can only put() a resource that has a hash marker: %r' % item)
+        for key, item_list in self.reserved.items():
+            my_list = self.get(key)
+            for my_item in my_list:
+                if hash_to_put == my_item.get(HASH_KEY):
+                    my_item.pop(USED_KEY)
+
+    def put_all(self):
+        for key, item_list in self.reserved.items():
+            my_list = self.get(key)
+            for my_item in my_list:
+                if my_item.get(USED_KEY):
+                    my_item.pop(USED_KEY)
+
+    def free(self):
+        self.resources_pool.free(self.origin, self.reserved)
+        self.reserved = None
+
 
 # vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/schema.py b/src/osmo_gsm_tester/schema.py
new file mode 100644
index 0000000..a10ddd1
--- /dev/null
+++ b/src/osmo_gsm_tester/schema.py
@@ -0,0 +1,144 @@
+# osmo_gsm_tester: validate dict structures
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+from . import log
+from .util import is_dict, is_list
+
+KEY_RE = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
+IPV4_RE = re.compile('([0-9]{1,3}.){3}[0-9]{1,3}')
+HWADDR_RE = re.compile('([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}')
+IMSI_RE = re.compile('[0-9]{6,15}')
+KI_RE = re.compile('[0-9a-fA-F]{32}')
+MSISDN_RE = re.compile('[0-9]{1,15}')
+
+def match_re(name, regex, val):
+    while True:
+        if not isinstance(val, str):
+            break;
+        if not regex.fullmatch(val):
+            break;
+        return
+    raise ValueError('Invalid %s: %r' % (name, val))
+
+def band(val):
+    if val in ('GSM-1800', 'GSM-1900'):
+        return
+    raise ValueError('Unknown GSM band: %r' % val)
+
+def ipv4(val):
+    match_re('IPv4 address', IPV4_RE, val)
+    els = [int(el) for el in val.split('.')]
+    if not all([el >= 0 and el <= 255 for el in els]):
+        raise ValueError('Invalid IPv4 address: %r' % val)
+
+def hwaddr(val):
+    match_re('hardware address', HWADDR_RE, val)
+
+def imsi(val):
+    match_re('IMSI', IMSI_RE, val)
+
+def ki(val):
+    match_re('KI', KI_RE, val)
+
+def msisdn(val):
+    match_re('MSISDN', MSISDN_RE, val)
+
+INT = 'int'
+STR = 'str'
+BAND = 'band'
+IPV4 = 'ipv4'
+HWADDR = 'hwaddr'
+IMSI = 'imsi'
+KI = 'ki'
+MSISDN = 'msisdn'
+SCHEMA_TYPES = {
+        INT: int,
+        STR: str,
+        BAND: band,
+        IPV4: ipv4,
+        HWADDR: hwaddr,
+        IMSI: imsi,
+        KI: ki,
+        MSISDN: msisdn,
+    }
+
+def validate(config, schema):
+    '''Make sure the given config dict adheres to the schema.
+       The schema is a dict of 'dict paths' in dot-notation with permitted
+       value type. All leaf nodes are validated, nesting dicts are implicit.
+
+       validate( { 'a': 123, 'b': { 'b1': 'foo', 'b2': [ 1, 2, 3 ] } },
+                 { 'a': int,
+                   'b.b1': str,
+                   'b.b2[]': int } )
+
+       Raise a ValueError in case the schema is violated.
+    '''
+
+    def validate_item(path, value, schema):
+        want_type = schema.get(path)
+
+        if is_list(value):
+            if want_type:
+                raise ValueError('config item is a list, should be %r: %r' % (want_type, path))
+            path = path + '[]'
+            want_type = schema.get(path)
+
+        if not want_type:
+            if is_dict(value):
+                nest(path, value, schema)
+                return
+            if is_list(value) and value:
+                for list_v in value:
+                    validate_item(path, list_v, schema)
+                return
+            raise ValueError('config item not known: %r' % path)
+
+        if want_type not in SCHEMA_TYPES:
+            raise ValueError('unknown type %r at %r' % (want_type, path))
+
+        if is_dict(value):
+            raise ValueError('config item is dict but should be a leaf node of type %r: %r'
+                             % (want_type, path))
+
+        if is_list(value):
+            for list_v in value:
+                validate_item(path, list_v, schema)
+            return
+
+        with log.Origin(item=path):
+            type_validator = SCHEMA_TYPES.get(want_type)
+            type_validator(value)
+
+    def nest(parent_path, config, schema):
+        if parent_path:
+            parent_path = parent_path + '.'
+        else:
+            parent_path = ''
+        for k,v in config.items():
+            if not KEY_RE.fullmatch(k):
+                raise ValueError('invalid config key: %r' % k)
+            path = parent_path + k
+            validate_item(path, v, schema)
+
+    nest(None, config, schema)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/suite.py b/src/osmo_gsm_tester/suite.py
index fb7c34d..0b8927f 100644
--- a/src/osmo_gsm_tester/suite.py
+++ b/src/osmo_gsm_tester/suite.py
@@ -18,9 +18,12 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
-from . import config, log, template, utils
+import sys
+import time
+from . import config, log, template, util, resource, schema, ofono_client, osmo_nitb
+from . import test
 
-class Suite(log.Origin):
+class SuiteDefinition(log.Origin):
     '''A test suite reserves resources for a number of tests.
        Each test requires a specific number of modems, BTSs etc., which are
        reserved beforehand by a test suite. This way several test suites can be
@@ -29,14 +32,122 @@
 
     CONF_FILENAME = 'suite.conf'
 
-    CONF_SCHEMA = {
-            'resources.nitb_iface': config.INT,
-            'resources.nitb': config.INT,
-            'resources.bts': config.INT,
-            'resources.msisdn': config.INT,
-            'resources.modem': config.INT,
-            'defaults.timeout': config.STR,
-        }
+    CONF_SCHEMA = util.dict_add(
+        {
+            'defaults.timeout': schema.STR,
+        },
+        dict([('resources.%s' % k, t) for k,t in resource.WANT_SCHEMA.items()])
+        )
+
+
+    def __init__(self, suite_dir):
+        self.set_log_category(log.C_CNF)
+        self.suite_dir = suite_dir
+        self.set_name(os.path.basename(self.suite_dir))
+        self.read_conf()
+
+    def read_conf(self):
+        with self:
+            self.dbg('reading %s' % SuiteDefinition.CONF_FILENAME)
+            if not os.path.isdir(self.suite_dir):
+                raise RuntimeError('No such directory: %r' % self.suite_dir)
+            self.conf = config.read(os.path.join(self.suite_dir,
+                                                 SuiteDefinition.CONF_FILENAME),
+                                    SuiteDefinition.CONF_SCHEMA)
+            self.load_tests()
+
+
+    def load_tests(self):
+        with self:
+            self.tests = []
+            for basename in sorted(os.listdir(self.suite_dir)):
+                if not basename.endswith('.py'):
+                    continue
+                self.tests.append(Test(self, basename))
+
+    def add_test(self, test):
+        with self:
+            if not isinstance(test, Test):
+                raise ValueError('add_test(): pass a Test() instance, not %s' % type(test))
+            if test.suite is None:
+                test.suite = self
+            if test.suite is not self:
+                raise ValueError('add_test(): test already belongs to another suite')
+            self.tests.append(test)
+
+
+
+class Test(log.Origin):
+
+    def __init__(self, suite, test_basename):
+        self.suite = suite
+        self.basename = test_basename
+        self.path = os.path.join(self.suite.suite_dir, self.basename)
+        super().__init__(self.path)
+        self.set_name(self.basename)
+        self.set_log_category(log.C_TST)
+
+    def run(self, suite_run):
+        assert self.suite is suite_run.definition
+        with self:
+            test.setup(suite_run, self, ofono_client)
+            success = False
+            try:
+                self.log('START')
+                with self.redirect_stdout():
+                    util.run_python_file('%s.%s' % (self.suite.name(), self.name()),
+                                         self.path)
+                    success = True
+            except resource.NoResourceExn:
+                self.err('Current resource state:\n', repr(reserved_resources))
+                raise
+            finally:
+                if success:
+                    self.log('PASS')
+                else:
+                    self.log('FAIL')
+
+    def name(self):
+        l = log.get_line_for_src(self.path)
+        if l is not None:
+            return '%s:%s' % (self._name, l)
+        return super().name()
+
+class SuiteRun(log.Origin):
+
+    trial = None
+    resources_pool = None
+    reserved_resources = None
+    _resource_requirements = None
+    _config = None
+    _processes = None
+
+    def __init__(self, current_trial, suite_definition, scenarios=[]):
+        self.trial = current_trial
+        self.definition = suite_definition
+        self.scenarios = scenarios
+        self.set_name(suite_definition.name())
+        self.set_log_category(log.C_TST)
+        self.resources_pool = resource.ResourcesPool()
+
+    def combined(self, conf_name):
+        combination = self.definition.conf.get(conf_name) or {}
+        for scenario in self.scenarios:
+            c = scenario.get(conf_name)
+            if c is None:
+                continue
+            config.combine(combination, c)
+        return combination
+
+    def resource_requirements(self):
+        if self._resource_requirements is None:
+            self._resource_requirements = self.combined('resources')
+        return self._resource_requirements
+
+    def config(self):
+        if self._config is None:
+            self._config = self.combined('config')
+        return self._config
 
     class Results:
         def __init__(self):
@@ -54,97 +165,162 @@
             self.all_passed = bool(self.passed) and not bool(self.failed)
             return self
 
-    def __init__(self, suite_dir):
-        self.set_log_category(log.C_CNF)
-        self.suite_dir = suite_dir
-        self.set_name(os.path.basename(self.suite_dir))
-        self.read_conf()
+        def __str__(self):
+            if self.failed:
+                return 'FAIL: %d of %d tests failed:\n  %s' % (
+                       len(self.failed),
+                       len(self.failed) + len(self.passed),
+                       '\n  '.join([t.name() for t in self.failed]))
+            if not self.passed:
+                return 'no tests were run.'
+            return 'pass: all %d tests passed.' % len(self.passed)
 
-    def read_conf(self):
+    def reserve_resources(self):
+        if self.reserved_resources:
+            raise RuntimeError('Attempt to reserve resources twice for a SuiteRun')
+        self.log('reserving resources...')
         with self:
-            if not os.path.isdir(self.suite_dir):
-                raise RuntimeError('No such directory: %r' % self.suite_dir)
-            self.conf = config.read(os.path.join(self.suite_dir,
-                                                 Suite.CONF_FILENAME),
-                                    Suite.CONF_SCHEMA)
-            self.load_tests()
+            self.reserved_resources = self.resources_pool.reserve(self, self.resource_requirements())
 
-    def load_tests(self):
-        with self:
-            self.tests = []
-            for basename in os.listdir(self.suite_dir):
-                if not basename.endswith('.py'):
-                    continue
-                self.tests.append(Test(self, basename))
-
-    def add_test(self, test):
-        with self:
-            if not isinstance(test, Test):
-                raise ValueError('add_test(): pass a Test() instance, not %s' % type(test))
-            if test.suite is None:
-                test.suite = self
-            if test.suite is not self:
-                raise ValueError('add_test(): test already belongs to another suite')
-            self.tests.append(test)
-
-    def run_tests(self):
-        results = Suite.Results()
-        for test in self.tests:
+    def run_tests(self, names=None):
+        if not self.reserved_resources:
+            self.reserve_resources()
+        results = SuiteRun.Results()
+        for test in self.definition.tests:
+            if names and not test.name() in names:
+                continue
             self._run_test(test, results)
-        return results.conclude()
-
-    def run_tests_by_name(self, *names):
-        results = Suite.Results()
-        for name in names:
-            basename = name
-            if not basename.endswith('.py'):
-                basename = name + '.py'
-            for test in self.tests:
-                if basename == test.basename:
-                    self._run_test(test, results)
-                    break
+        self.stop_processes()
         return results.conclude()
 
     def _run_test(self, test, results):
         try:
             with self:
-                test.run()
+                test.run(self)
             results.add_pass(test)
         except:
             results.add_fail(test)
             self.log_exn()
 
-class Test(log.Origin):
+    def remember_to_stop(self, process):
+        if self._processes is None:
+            self._processes = []
+        self._processes.append(process)
 
-    def __init__(self, suite, test_basename):
-        self.suite = suite
-        self.basename = test_basename
-        self.set_name(self.basename)
-        self.set_log_category(log.C_TST)
-        self.path = os.path.join(self.suite.suite_dir, self.basename)
-        with self:
-            with open(self.path, 'r') as f:
-                self.script = f.read()
+    def stop_processes(self):
+        if not self._processes:
+            return
+        for process in self._processes:
+            process.terminate()
 
-    def run(self):
-        with self:
-            self.code = compile(self.script, self.path, 'exec')
-            with self.redirect_stdout():
-                exec(self.code, self.test_globals())
-                self._success = True
+    def nitb_iface(self):
+        return self.reserved_resources.get(resource.R_NITB_IFACE)
 
-    def test_globals(self):
-        test_globals = {
-            'this': utils.dict2obj({
-                    'suite': self.suite.suite_dir,
-                    'test': self.basename,
-                }),
-            'resources': utils.dict2obj({
-                }),
-        }
-        return test_globals
+    def nitb(self, nitb_iface=None):
+        if nitb_iface is None:
+            nitb_iface = self.nitb_iface()
+        return osmo_nitb.OsmoNitb(self, nitb_iface)
 
-def load(suite_dir):
-    return Suite(suite_dir)
+    def bts(self):
+        return bts_obj(self, self.reserved_resources.get(resource.R_BTS))
+
+    def modem(self):
+        return modem_obj(self.reserved_resources.get(resource.R_MODEM))
+
+    def msisdn(self):
+        msisdn = self.resources_pool.next_msisdn(self.origin)
+        self.log('using MSISDN', msisdn)
+        return msisdn
+
+    def wait(self, condition, *condition_args, timeout=300, **condition_kwargs):
+        if not timeout or timeout < 0:
+            raise RuntimeError('wait() *must* time out at some point. timeout=%r' % timeout)
+
+        started = time.time()
+        while True:
+            self.poll()
+            if condition(*condition_args, **condition_kwargs):
+                return True
+            waited = time.time() - started
+            if waited > timeout:
+                return False
+            time.sleep(.1)
+
+    def sleep(self, seconds):
+        self.wait(lambda: False, timeout=seconds)
+
+    def poll(self):
+        if self._processes:
+            for process in self._processes:
+                process.poll()
+        ofono_client.poll()
+
+    def prompt(self, *msgs, **msg_details):
+        'ask for user interaction. Do not use in tests that should run automatically!'
+        if msg_details:
+            msgs = list(msgs)
+            msgs.append('{%s}' %
+                        (', '.join(['%s=%r' % (k,v)
+                                    for k,v in sorted(msg_details.items())])))
+        msg = ' '.join(msgs) or 'Hit Enter to continue'
+        self.log('prompt:', msg)
+        sys.__stdout__.write(msg)
+        sys.__stdout__.write('\n> ')
+        sys.__stdout__.flush()
+        entered = util.input_polling(self.poll)
+        self.log('prompt entered:', entered)
+        return entered
+
+
+loaded_suite_definitions = {}
+
+def load(suite_name):
+    global loaded_suite_definitions
+
+    suite = loaded_suite_definitions.get(suite_name)
+    if suite is not None:
+        return suite
+
+    suites_dir = config.get_suites_dir()
+    suite_dir = suites_dir.child(suite_name)
+    if not suites_dir.exists(suite_name):
+        raise RuntimeError('Suite not found: %r in %r' % (suite_name, suites_dir))
+    if not suites_dir.isdir(suite_name):
+        raise RuntimeError('Suite name found, but not a directory: %r' % (suite_dir))
+
+    suite_def = SuiteDefinition(suite_dir)
+    loaded_suite_definitions[suite_name] = suite_def
+    return suite_def
+
+def parse_suite_scenario_str(suite_scenario_str):
+    tokens = suite_scenario_str.split(':')
+    if len(tokens) > 2:
+        raise RuntimeError('invalid combination string: %r' % suite_scenario_str)
+
+    suite_name = tokens[0]
+    if len(tokens) <= 1:
+        scenario_names = []
+    else:
+        scenario_names = tokens[1].split('+')
+
+    return suite_name, scenario_names
+
+def load_suite_scenario_str(suite_scenario_str):
+    suite_name, scenario_names = parse_suite_scenario_str(suite_scenario_str)
+    suite = load(suite_name)
+    scenarios = [config.get_scenario(scenario_name) for scenario_name in scenario_names]
+    return (suite, scenarios)
+
+def bts_obj(suite_run, conf):
+    bts_type = conf.get('type')
+    log.dbg(None, None, 'create BTS object', type=bts_type)
+    bts_class = resource.KNOWN_BTS_TYPES.get(bts_type)
+    if bts_class is None:
+        raise RuntimeError('No such BTS type is defined: %r' % bts_type)
+    return bts_class(suite_run, conf)
+
+def modem_obj(conf):
+    log.dbg(None, None, 'create Modem object', conf=conf)
+    return ofono_client.Modem(conf)
 
 # vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/template.py b/src/osmo_gsm_tester/template.py
index 434ab62..c00bdc8 100644
--- a/src/osmo_gsm_tester/template.py
+++ b/src/osmo_gsm_tester/template.py
@@ -23,7 +23,7 @@
 from mako.lookup import TemplateLookup
 
 from . import log
-from .utils import dict2obj
+from .util import dict2obj
 
 _lookup = None
 _logger = log.Origin('no templates dir set')
@@ -47,10 +47,12 @@
     global _lookup
     if _lookup is None:
         set_templates_dir()
-    with _logger:
-        tmpl_name = name + '.tmpl'
+    tmpl_name = name + '.tmpl'
+    with log.Origin(tmpl_name):
         template = _lookup.get_template(tmpl_name)
         _logger.dbg('rendering', tmpl_name)
+
+        line_info_name = tmpl_name.replace('-', '_').replace('.', '_')
         return template.render(**dict2obj(values))
 
 # vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/templates/osmo-bts-sysmo.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bts-sysmo.cfg.tmpl
new file mode 100644
index 0000000..724bba8
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-bts-sysmo.cfg.tmpl
@@ -0,0 +1,18 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+  logging color 1
+  logging timestamp 1
+  logging print extended-timestamp 1
+  logging print category 1
+  logging level all debug
+  logging level l1c info
+  logging level linp info
+!
+phy 0
+ instance 0
+bts 0
+ band ${osmo_bts_sysmo.band}
+ ipa unit-id ${osmo_bts_sysmo.ipa_unit_id} 0
+ oml remote-ip ${osmo_bts_sysmo.oml_remote_ip}
+ trx 0
+  phy 0 instance 0
diff --git a/src/osmo_gsm_tester/templates/osmo-bts-trx.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bts-trx.cfg.tmpl
new file mode 100644
index 0000000..d416361
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-bts-trx.cfg.tmpl
@@ -0,0 +1,22 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+  logging color 1
+  logging timestamp 1
+  logging print extended-timestamp 1
+  logging print category 1
+  logging level all debug
+  logging level l1c info
+  logging level linp info
+!
+phy 0
+ instance 0
+ osmotrx rx-gain 25
+bts 0
+ band ${osmo_bts_trx.band}
+ ipa unit-id ${osmo_bts_trx.ipa_unit_id} 0
+ oml remote-ip ${osmo_bts_trx.oml_remote_ip}
+ settsc
+ gsmtap-sapi ccch
+ gsmtap-sapi pdtch
+ trx 0
+  phy 0 instance 0
diff --git a/src/osmo_gsm_tester/templates/osmo-bts.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bts.cfg.tmpl
deleted file mode 100644
index 20fa57f..0000000
--- a/src/osmo_gsm_tester/templates/osmo-bts.cfg.tmpl
+++ /dev/null
@@ -1,21 +0,0 @@
-!
-! OsmoBTS () configuration saved from vty
-!!
-!
-log stderr
-  logging color 1
-  logging timestamp 1
-  logging print extended-timestamp 1
-  logging print category 1
-  logging level all debug
-  logging level l1c info
-  logging level linp info
-!
-phy 0
- instance 0
-bts 0
- band {band}
- ipa unit-id {ipa_unit_id} 0
- oml remote-ip {oml_remote_ip}
- trx 0
-  phy 0 instance 0
diff --git a/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl
index 3404b7f..e7dc119 100644
--- a/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl
+++ b/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl
@@ -1,6 +1,4 @@
-!
-! OpenBSC configuration saved from vty
-!
+! Configuration rendered by osmo-gsm-tester
 password foo
 !
 log stderr
@@ -12,19 +10,19 @@
 !
 line vty
  no login
- bind ${vty_bind_ip}
+ bind ${nitb_iface.addr}
 !
 e1_input
  e1_line 0 driver ipa
- ipa bind ${abis_bind_ip}
+ ipa bind ${nitb_iface.addr}
 network
- network country code ${mcc}
- mobile network code ${mnc}
- short name ${net_name_short}
- long name ${net_name_long}
- auth policy ${net_auth_policy}
+ network country code ${nitb.net.mcc}
+ mobile network code ${nitb.net.mnc}
+ short name ${nitb.net.short_name}
+ long name ${nitb.net.long_name}
+ auth policy ${nitb.net.auth_policy}
  location updating reject cause 13
- encryption a5 ${encryption}
+ encryption ${nitb.net.encryption}
  neci 1
  rrlp mode none
  mm info 1
@@ -46,16 +44,7 @@
  timer t3117 0
  timer t3119 0
  timer t3141 0
-smpp
- local-tcp-ip ${smpp_bind_ip} 2775
- system-id test
- policy closed
- esme test
-  password test
-  default-route
-ctrl
- bind ${ctrl_bind_ip}
-%for bts in bts_list:
+%for bts in nitb.net.bts_list:
  bts ${loop.index}
   type ${bts.type}
   band ${bts.band}
@@ -69,7 +58,7 @@
   channel allocator ascending
   rach tx integer 9
   rach max transmission 7
-  ip.access unit_id ${bts.unit_id} 0
+  ip.access unit_id ${bts.ipa_unit_id} 0
   oml ip.access stream_id ${bts.stream_id} line 0
   gprs mode none
 % for trx in bts.trx_list:
@@ -85,3 +74,12 @@
 %  endfor
 % endfor
 %endfor
+smpp
+ local-tcp-ip ${nitb_iface.addr} 2775
+ system-id test
+ policy closed
+ esme test
+  password test
+  default-route
+ctrl
+ bind ${nitb_iface.addr}
diff --git a/src/osmo_gsm_tester/test.py b/src/osmo_gsm_tester/test.py
index fd5a640..e52b545 100644
--- a/src/osmo_gsm_tester/test.py
+++ b/src/osmo_gsm_tester/test.py
@@ -1,4 +1,4 @@
-# osmo_gsm_tester: prepare a test run and provide test API
+# osmo_gsm_tester: context for individual test runs
 #
 # Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
 #
@@ -17,27 +17,33 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys, os
-import pprint
-import inspect
+# These will be initialized before each test run.
+# A test script can thus establish its context by doing:
+# from osmo_gsm_tester.test import *
+trial = None
+suite = None
+test = None
+resources = None
+log = None
+dbg = None
+err = None
+wait = None
+sleep = None
+poll = None
+prompt = None
 
-from . import suite as _suite
-from . import log
-from . import resource
+def setup(suite_run, _test, ofono_client):
+    global trial, suite, test, resources, log, dbg, err, wait, sleep, poll, prompt
+    trial = suite_run.trial
+    suite = suite_run
+    test = _test
+    resources = suite_run.reserved_resources
+    log = test.log
+    dbg = test.dbg
+    err = test.err
+    wait = suite_run.wait
+    sleep = suite_run.sleep
+    poll = suite_run.poll
+    prompt = suite_run.prompt
 
-# load the configuration for the test
-suite = _suite.Suite(sys.path[0])
-test = _suite.Test(suite, os.path.basename(inspect.stack()[-1][1]))
-
-def test_except_hook(*exc_info):
-    log.exn_add_info(exc_info, test)
-    log.exn_add_info(exc_info, suite)
-    log.log_exn(exc_info=exc_info)
-
-sys.excepthook = test_except_hook
-
-orig_stdout, sys.stdout = sys.stdout, test
-
-resources = {}
-	
 # vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/trial.py b/src/osmo_gsm_tester/trial.py
new file mode 100644
index 0000000..a938971
--- /dev/null
+++ b/src/osmo_gsm_tester/trial.py
@@ -0,0 +1,160 @@
+# osmo_gsm_tester: trial: directory of binaries to be tested
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import time
+import shutil
+import tarfile
+
+from . import log, util
+
+FILE_MARK_TAKEN = 'taken'
+FILE_CHECKSUMS = 'checksums.md5'
+TIMESTAMP_FMT = '%Y-%m-%d_%H-%M-%S'
+FILE_LAST_RUN = 'last_run'
+
+class Trial(log.Origin):
+    path = None
+    dir = None
+    _run_dir = None
+    bin_tars = None
+
+    @staticmethod
+    def next(trials_dir):
+
+        with trials_dir.lock('Trial.next'):
+            trials = [e for e in trials_dir.children()
+                      if trials_dir.isdir(e) and not trials_dir.exists(e, FILE_MARK_TAKEN)]
+            if not trials:
+                return None
+            # sort by time to get the one that waited longest
+            trials.sort(key=lambda e: os.path.getmtime(trials_dir.child(e)))
+            next_trial = trials[0]
+            return Trial(trials_dir.child(next_trial)).take()
+
+    def __init__(self, trial_dir):
+        self.path = trial_dir
+        self.set_name(self.path)
+        self.set_log_category(log.C_TST)
+        self.dir = util.Dir(self.path)
+        self.inst_dir = util.Dir(self.dir.child('inst'))
+        self.bin_tars = []
+
+    def __repr__(self):
+        return self.name()
+
+    def __enter__(self):
+        self.log('Trial start')
+        super().__enter__()
+
+    def __exit__(self, *exc_info):
+        super().__exit__(*exc_info)
+        self.log('Trial end')
+
+    def take(self):
+        self.dir.touch(FILE_MARK_TAKEN)
+        return self
+
+    def get_run_dir(self):
+        if self._run_dir is not None:
+            return self._run_dir
+        self._run_dir = util.Dir(self.dir.new_child('run.%s' % time.strftime(TIMESTAMP_FMT)))
+        self._run_dir.mkdir()
+
+        last_run = self.dir.child(FILE_LAST_RUN)
+        if os.path.islink(last_run):
+            os.remove(last_run)
+        if not os.path.exists(last_run):
+            os.symlink(self.dir.rel_path(self._run_dir.path), last_run)
+        return self._run_dir
+
+    def verify(self):
+        "verify checksums"
+
+        if not self.dir.exists():
+            raise RuntimeError('Trial dir does not exist: %r' % self.dir)
+        if not self.dir.isdir():
+            raise RuntimeError('Trial dir is not a dir: %r' % self.dir)
+
+        checksums = self.dir.child(FILE_CHECKSUMS)
+        if not self.dir.isfile(FILE_CHECKSUMS):
+            raise RuntimeError('No checksums file in trial dir: %r', checksums)
+
+        with open(checksums, 'r') as f:
+            line_nr = 0
+            for line in [l.strip() for l in f.readlines()]:
+                line_nr += 1
+                if not line:
+                    continue
+                md5, filename = line.split('  ')
+                file_path = self.dir.child(filename)
+
+                if not self.dir.isfile(filename):
+                    raise RuntimeError('File listed in checksums file but missing in trials dir:'
+                                       ' %r vs. %r line %d' % (file_path, checksums, line_nr))
+
+                if md5 != util.md5_of_file(file_path):
+                    raise RuntimeError('Checksum mismatch for %r vs. %r line %d'
+                                       % (file_path, checksums, line_nr))
+
+                if filename.endswith('.tgz'):
+                    self.bin_tars.append(filename)
+
+    def has_bin_tar(self, bin_name):
+        bin_tar_start = '%s.' % bin_name
+        matches = [t for t in self.bin_tars if t.startswith(bin_tar_start)]
+        self.dbg(bin_name=bin_name, matches=matches)
+        if not matches:
+            return None
+        if len(matches) > 1:
+            raise RuntimeError('More than one match for bin name %r: %r' % (bin_name, matches))
+        bin_tar = matches[0]
+        bin_tar_path = self.dir.child(bin_tar)
+        if not os.path.isfile(bin_tar_path):
+            raise RuntimeError('Not a file or missing: %r' % bin_tar_path)
+        return bin_tar_path
+
+    def get_inst(self, bin_name):
+        bin_tar = self.has_bin_tar(bin_name)
+        if not bin_tar:
+            return None
+        inst_dir = self.inst_dir.child(bin_name)
+
+        if os.path.isdir(inst_dir):
+            # already unpacked
+            return inst_dir
+
+        t = None
+        try:
+            os.makedirs(inst_dir)
+            t = tarfile.open(bin_tar)
+            t.extractall(inst_dir)
+            return inst_dir
+
+        except:
+            shutil.rmtree(inst_dir)
+            raise
+        finally:
+            if t:
+                try:
+                    t.close()
+                except:
+                    pass
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/util.py b/src/osmo_gsm_tester/util.py
new file mode 100644
index 0000000..61d0f6e
--- /dev/null
+++ b/src/osmo_gsm_tester/util.py
@@ -0,0 +1,332 @@
+# osmo_gsm_tester: language snippets
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import time
+import fcntl
+import hashlib
+import tempfile
+import shutil
+import atexit
+import threading
+import importlib.util
+import fcntl
+import tty
+import termios
+
+
+class listdict:
+    'a dict of lists { "a": [1, 2, 3],  "b": [1, 2] }'
+    def __getattr__(ld, name):
+        if name == 'add':
+            return ld.__getattribute__(name)
+        return ld.__dict__.__getattribute__(name)
+
+    def add(ld, name, item):
+        l = ld.__dict__.get(name)
+        if not l:
+            l = []
+            ld.__dict__[name] = l
+        l.append(item)
+        return l
+
+    def add_dict(ld, d):
+        for k,v in d.items():
+            ld.add(k, v)
+
+    def __setitem__(ld, name, val):
+        return ld.__dict__.__setitem__(name, val)
+
+    def __getitem__(ld, name):
+        return ld.__dict__.__getitem__(name)
+
+    def __str__(ld):
+        return ld.__dict__.__str__()
+
+
+class DictProxy:
+    '''
+    allow accessing dict entries like object members
+    syntactical sugar, adapted from http://stackoverflow.com/a/31569634
+    so that e.g. templates can do ${bts.member} instead of ${bts['member']}
+    '''
+    def __init__(self, obj):
+        self.obj = obj
+
+    def __getitem__(self, key):
+        return dict2obj(self.obj[key])
+
+    def __getattr__(self, key):
+        try:
+            return dict2obj(getattr(self.obj, key))
+        except AttributeError:
+            try:
+                return self[key]
+            except KeyError:
+                raise AttributeError(key)
+
+class ListProxy:
+    'allow nesting for DictProxy'
+    def __init__(self, obj):
+        self.obj = obj
+
+    def __getitem__(self, key):
+        return dict2obj(self.obj[key])
+
+def dict2obj(value):
+    if isinstance(value, dict):
+        return DictProxy(value)
+    if isinstance(value, (tuple, list)):
+        return ListProxy(value)
+    return value
+
+
+class FileLock:
+    def __init__(self, path, owner):
+        self.path = path
+        self.owner = owner
+        self.f = None
+
+    def __enter__(self):
+        if self.f is not None:
+            return
+        self.fd = os.open(self.path, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
+        fcntl.flock(self.fd, fcntl.LOCK_EX)
+        os.truncate(self.fd, 0)
+        os.write(self.fd, str(self.owner).encode('utf-8'))
+        os.fsync(self.fd)
+
+    def __exit__(self, *exc_info):
+        #fcntl.flock(self.fd, fcntl.LOCK_UN)
+        os.truncate(self.fd, 0)
+        os.fsync(self.fd)
+        os.close(self.fd)
+        self.fd = -1
+
+    def lock(self):
+        self.__enter__()
+
+    def unlock(self):
+        self.__exit__()
+
+
+class Dir():
+    LOCK_FILE = 'lock'
+
+    def __init__(self, path):
+        self.path = path
+        self.lock_path = os.path.join(self.path, Dir.LOCK_FILE)
+
+    def lock(self, origin_id):
+        '''
+        return lock context, usage:
+
+          with my_dir.lock(origin):
+              read_from(my_dir.child('foo.txt'))
+              write_to(my_dir.child('bar.txt'))
+        '''
+        self.mkdir()
+        return FileLock(self.lock_path, origin_id)
+
+    @staticmethod
+    def ensure_abs_dir_exists(*path_elements):
+        l = len(path_elements)
+        if l < 1:
+            raise RuntimeError('Cannot create empty path')
+        if l == 1:
+            path = path_elements[0]
+        else:
+            path = os.path.join(*path_elements)
+        if not os.path.isdir(path):
+            os.makedirs(path)
+
+    def child(self, *rel_path):
+        if not rel_path:
+            return self.path
+        return os.path.join(self.path, *rel_path)
+
+    def mk_parentdir(self, *rel_path):
+        child = self.child(*rel_path)
+        child_parent = os.path.dirname(child)
+        Dir.ensure_abs_dir_exists(child_parent)
+        return child
+
+    def mkdir(self, *rel_path):
+        child = self.child(*rel_path)
+        Dir.ensure_abs_dir_exists(child)
+        return child
+
+    def children(self):
+        return os.listdir(self.path)
+
+    def exists(self, *rel_path):
+        return os.path.exists(self.child(*rel_path))
+
+    def isdir(self, *rel_path):
+        return os.path.isdir(self.child(*rel_path))
+
+    def isfile(self, *rel_path):
+        return os.path.isfile(self.child(*rel_path))
+
+    def new_child(self, *rel_path):
+        attempt = 1
+        prefix, suffix = os.path.splitext(self.child(*rel_path))
+        rel_path_fmt = '%s%%s%s' % (prefix, suffix)
+        while True:
+            path = rel_path_fmt % (('_%d'%attempt) if attempt > 1 else '')
+            if not os.path.exists(path):
+                break
+            attempt += 1
+            continue
+        Dir.ensure_abs_dir_exists(os.path.dirname(path))
+        return path
+
+    def rel_path(self, path):
+        return os.path.relpath(path, self.path)
+
+    def touch(self, *rel_path):
+        touch_file(self.child(*rel_path))
+
+    def new_file(self, *rel_path):
+        path = self.new_child(*rel_path)
+        touch_file(path)
+        return path
+
+    def new_dir(self, *rel_path):
+        path = self.new_child(*rel_path)
+        Dir.ensure_abs_dir_exists(path)
+        return path
+
+    def __str__(self):
+        return self.path
+    def __repr__(self):
+        return self.path
+
+def touch_file(path):
+    with open(path, 'a') as f:
+        f.close()
+
+def is_dict(l):
+    return isinstance(l, dict)
+
+def is_list(l):
+    return isinstance(l, (list, tuple))
+
+
+def dict_add(a, *b, **c):
+    for bb in b:
+        a.update(bb)
+    a.update(c)
+    return a
+
+def _hash_recurse(acc, obj, ignore_keys):
+    if is_dict(obj):
+        for key, val in sorted(obj.items()):
+            if key in ignore_keys:
+                continue
+            _hash_recurse(acc, val, ignore_keys)
+        return
+
+    if is_list(obj):
+        for item in obj:
+            _hash_recurse(acc, item, ignore_keys)
+        return
+
+    acc.update(str(obj).encode('utf-8'))
+
+def hash_obj(obj, *ignore_keys):
+    acc = hashlib.sha1()
+    _hash_recurse(acc, obj, ignore_keys)
+    return acc.hexdigest()
+
+
+def md5(of_content):
+    if isinstance(of_content, str):
+        of_content = of_content.encode('utf-8')
+    return hashlib.md5(of_content).hexdigest()
+
+def md5_of_file(path):
+    with open(path, 'rb') as f:
+        return md5(f.read())
+
+_tempdir = None
+
+def get_tempdir(remove_on_exit=True):
+    global _tempdir
+    if _tempdir is not None:
+        return _tempdir
+    _tempdir = tempfile.mkdtemp()
+    if remove_on_exit:
+        atexit.register(lambda: shutil.rmtree(_tempdir))
+    return _tempdir
+
+
+if hasattr(importlib.util, 'module_from_spec'):
+    def run_python_file(module_name, path):
+        spec = importlib.util.spec_from_file_location(module_name, path)
+        spec.loader.exec_module( importlib.util.module_from_spec(spec) )
+else:
+    from importlib.machinery import SourceFileLoader
+    def run_python_file(module_name, path):
+        SourceFileLoader(module_name, path).load_module()
+
+def msisdn_inc(msisdn_str):
+    'add 1 and preserve leading zeros'
+    return ('%%0%dd' % len(msisdn_str)) % (int(msisdn_str) + 1)
+
+class polling_stdin:
+    def __init__(self, stream):
+        self.stream = stream
+        self.fd = self.stream.fileno()
+    def __enter__(self):
+        self.original_stty = termios.tcgetattr(self.stream)
+        tty.setcbreak(self.stream)
+        self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
+        fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
+    def __exit__(self, *args):
+        fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
+        termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
+
+def input_polling(poll_func, stream=None):
+    if stream is None:
+        stream = sys.stdin
+    unbuffered_stdin = os.fdopen(stream.fileno(), 'rb', buffering=0)
+    try:
+        with polling_stdin(unbuffered_stdin):
+            acc = []
+            while True:
+                poll_func()
+                got = unbuffered_stdin.read(1)
+                if got and len(got):
+                    try:
+                        # this is hacky: can't deal with multibyte sequences
+                        got_str = got.decode('utf-8')
+                    except:
+                        got_str = '?'
+                    acc.append(got_str)
+                    sys.__stdout__.write(got_str)
+                    sys.__stdout__.flush()
+                    if '\n' in got_str:
+                        return ''.join(acc)
+                time.sleep(.1)
+    finally:
+        unbuffered_stdin.close()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/utils.py b/src/osmo_gsm_tester/utils.py
deleted file mode 100644
index 9992d44..0000000
--- a/src/osmo_gsm_tester/utils.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# osmo_gsm_tester: language snippets
-#
-# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
-#
-# Author: Neels Hofmeyr <neels@hofmeyr.de>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import fcntl
-
-class listdict:
-    'a dict of lists { "a": [1, 2, 3],  "b": [1, 2] }'
-    def __getattr__(ld, name):
-        if name == 'add':
-            return ld.__getattribute__(name)
-        return ld.__dict__.__getattribute__(name)
-
-    def add(ld, name, item):
-        l = ld.__dict__.get(name)
-        if not l:
-            l = []
-            ld.__dict__[name] = l
-        l.append(item)
-        return l
-
-    def add_dict(ld, d):
-        for k,v in d.items():
-            ld.add(k, v)
-
-    def __setitem__(ld, name, val):
-        return ld.__dict__.__setitem__(name, val)
-
-    def __getitem__(ld, name):
-        return ld.__dict__.__getitem__(name)
-
-    def __str__(ld):
-        return ld.__dict__.__str__()
-
-
-class DictProxy:
-    '''
-    allow accessing dict entries like object members
-    syntactical sugar, adapted from http://stackoverflow.com/a/31569634
-    so that e.g. templates can do ${bts.member} instead of ${bts['member']}
-    '''
-    def __init__(self, obj):
-        self.obj = obj
-
-    def __getitem__(self, key):
-        return dict2obj(self.obj[key])
-
-    def __getattr__(self, key):
-        try:
-            return dict2obj(getattr(self.obj, key))
-        except AttributeError:
-            try:
-                return self[key]
-            except KeyError:
-                raise AttributeError(key)
-
-class ListProxy:
-    'allow nesting for DictProxy'
-    def __init__(self, obj):
-        self.obj = obj
-
-    def __getitem__(self, key):
-        return dict2obj(self.obj[key])
-
-def dict2obj(value):
-    if isinstance(value, dict):
-        return DictProxy(value)
-    if isinstance(value, (tuple, list)):
-        return ListProxy(value)
-    return value
-
-
-class FileLock:
-    def __init__(self, path, owner):
-        self.path = path
-        self.owner = owner
-        self.f = None
-
-    def __enter__(self):
-        if self.f is not None:
-            return
-        self.fd = os.open(self.path, os.O_CREAT | os.O_WRONLY | os.O_TRUNC)
-        fcntl.flock(self.fd, fcntl.LOCK_EX)
-        os.truncate(self.fd, 0)
-        os.write(self.fd, str(self.owner).encode('utf-8'))
-        os.fsync(self.fd)
-
-    def __exit__(self, *exc_info):
-        #fcntl.flock(self.fd, fcntl.LOCK_UN)
-        os.truncate(self.fd, 0)
-        os.fsync(self.fd)
-        os.close(self.fd)
-        self.fd = -1
-
-    def lock(self):
-        self.__enter__()
-
-    def unlock(self):
-        self.__exit__()
-
-
-# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/run_once.py b/src/run_once.py
index ff15204..88eed28 100755
--- a/src/run_once.py
+++ b/src/run_once.py
@@ -21,28 +21,124 @@
 
 '''osmo_gsm_tester: invoke a single test run.
 
-./run_once.py ~/path/to/test_package/
+Examples:
 
-Upon launch, a 'test_package/run-<date>' directory will be created.
-When complete, a symbolic link 'test_package/last_run' will point at this dir.
-The run dir then contains logs and test results.
+./run_once.py ~/my_trial_package/ -s osmo_trx
+./run_once.py ~/my_trial_package/ -c sms_tests:dyn_ts+eu_band+bts_sysmo
+./run_once.py ~/my_trial_package/ -c sms_tests/mo_mt_sms:bts_trx
+
+(The names for test suite, scenario and series names used in these examples
+must be defined by the osmo-gsm-tester configuration.)
+
+A trial package contains binaries (usually built by a jenkins job) of GSM
+software, including the core network programs as well as binaries for the
+various BTS models.
+
+A test suite defines specific actions to be taken and verifies their outcome.
+Such a test suite may leave certain aspects of a setup undefined, e.g. it may
+be BTS model agnostic or does not care which voice codecs are chosen.
+
+A test scenario completes the picture in that it defines which specific choices
+shall be made to run a test suite. Any one test suite may thus run on any
+number of different scenarios, e.g. to test various voice codecs.
+
+Test scenarios may be combined. For example, one scenario may define a timeslot
+configuration to use, while another scenario may define the voice codec
+configuration.
+
+There may still be aspects that are neither required by a test suite nor
+strictly defined by a scenario, which will be resolved automatically, e.g. by
+choosing the first available item that matches the other constraints.
+
+A test run thus needs to define: a trial package containing built binaries, a
+combination of scenarios to run a suite in, and a test suite to launch in the
+given scenario with the given binaries.
+
+The osmo-gsm-tester configuration may define one or more series as a number of
+suite:scenario combinations. So instead of a specific suite:scenario
+combination, the name of such a series can be passed.
+
+If neither a combination or series is specified, the default series will be run
+as defined in the osmo-gsm-tester configuration.
+
+The scenarios and suites run for a given trial will be recorded in a trial
+package's directory: Upon launch, a 'test_package/run.<date>' directory will be
+created, which will collect logs and reports.
 '''
 
-import osmo_gsm_tester
+from osmo_gsm_tester import trial, suite, log, __version__
 
 if __name__ == '__main__':
     import argparse
 
-    parser = argparse.ArgumentParser()
+    parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawTextHelpFormatter)
     parser.add_argument('-V', '--version', action='store_true',
             help='Show version')
-    parser.add_argument('test_package', nargs='*',
+    parser.add_argument('trial_package', nargs='+',
             help='Directory containing binaries to test')
+    parser.add_argument('-s', '--suite-scenario', dest='suite_scenario', action='append',
+            help='A suite-scenarios combination like suite:scenario+scenario')
+    parser.add_argument('-S', '--series', dest='series', action='append',
+            help='A series of suite-scenarios combinations as defined in the'
+                 ' osmo-gsm-tester configuration')
+    parser.add_argument('-t', '--test', dest='test', action='append',
+            help='Run only tests matching this name')
+    parser.add_argument('-l', '--log-level', dest='log_level', choices=log.LEVEL_STRS.keys(),
+            default=None,
+            help='Set logging level for all categories (on stdout)')
+    parser.add_argument('-T', '--traceback', dest='trace', action='store_true',
+            help='Enable logging of tracebacks')
     args = parser.parse_args()
 
     if args.version:
-        print(osmo_gsm_tester.__version__)
+        print(__version__)
         exit(0)
 
+    print('combinations:', repr(args.suite_scenario))
+    print('series:', repr(args.series))
+    print('trials:', repr(args.trial_package))
+    print('tests:', repr(args.test))
+
+    if args.log_level:
+        log.set_all_levels(log.LEVEL_STRS.get(args.log_level))
+    log.style_change(origin_width=32)
+    if args.trace:
+        log.style_change(trace=True)
+
+    combination_strs = list(args.suite_scenario or [])
+    # for series in args.series:
+    #     combination_strs.extend(config.get_series(series))
+
+    if not combination_strs:
+        raise RuntimeError('Need at least one suite:scenario or series to run')
+
+    suite_scenarios = []
+    for combination_str in combination_strs:
+        suite_scenarios.append(suite.load_suite_scenario_str(combination_str))
+
+    test_names = []
+    for test_name in (args.test or []):
+        found = False
+        for suite_run in suite_runs:
+            for test in suite_run.definition.tests:
+                if test_name in test.name():
+                    found = True
+                    test_names.append(test.name())
+        if not found:
+            raise RuntimeError('No test found for %r' % test_name)
+    if test_names:
+        print(repr(test_names))
+
+    trials = []
+    for trial_package in args.trial_package:
+        t = trial.Trial(trial_package)
+        t.verify()
+        trials.append(t)
+
+    for current_trial in trials:
+        with current_trial:
+            for suite_def, scenarios in suite_scenarios:
+                suite_run = suite.SuiteRun(current_trial, suite_def, scenarios)
+                suite_run.run_tests(test_names)
 
 # vim: expandtab tabstop=4 shiftwidth=4