Merge history from osmo-gsm-manuals.git
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a85b41a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,13 @@
+__pycache__
+pid
+.*.sw?
+version
+_version.py
+tags
+set_pythonpath
+test_work
+state
+*.pyc
+selftest/trial_test/
+example/resources.conf
+ttcn3/resources.conf
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..88a6443
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=gerrit.osmocom.org
+project=osmo-gsm-tester
+
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..94a9ed0
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..eb2f8d8
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,15 @@
+all: deps version check
+
+.PHONY: version check
+
+deps:
+ ./check_dependencies.py
+
+version:
+ ./update_version.sh
+
+check:
+ $(MAKE) -C selftest check
+ @echo "make check: success"
+
+# vim: noexpandtab tabstop=8 shiftwidth=8
diff --git a/check_dependencies.py b/check_dependencies.py
new file mode 100755
index 0000000..83df7a9
--- /dev/null
+++ b/check_dependencies.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+
+# just import all python3 modules used by osmo-gsm-tester to make sure they are
+# installed.
+
+from inspect import getframeinfo, stack
+from mako.lookup import TemplateLookup
+from mako.template import Template
+import argparse
+import contextlib
+import copy
+import difflib
+import fcntl
+import inspect
+import io
+import os
+import pprint
+import re
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import yaml
+import pydbus
+import sqlite3
+import sispm
+import smpplib
+
+print('dependencies ok')
diff --git a/contrib/jenkins-build-common.sh b/contrib/jenkins-build-common.sh
new file mode 100644
index 0000000..a9eaf7a
--- /dev/null
+++ b/contrib/jenkins-build-common.sh
@@ -0,0 +1,224 @@
+#!source_this_file
+
+# Common parts for osmo-gsm-tester jenkins build scripts. Use like in below example:
+#
+#--------------
+# #!/bin/sh
+# set -e -x
+# base="$PWD"
+# name="osmo-name"
+# . "$(dirname "$0")/jenkins-build-common.sh"
+#
+# build_repo libosmocore --configure --opts
+# build_repo libosmo-foo special_branch --configure --opts
+# build_repo osmo-bar
+# build_repo_dir openbsc ./openbsc
+#
+# create_bin_tgz
+#--------------
+#
+# Some explanations:
+#
+# To allow calling from arbitrary working directories, other scripts should
+# source this file like shown above.
+#
+# Sourcing scripts must provide some variables/functions, see above.
+# In addition, these values can optionally be passed to override:
+# git_url, prefix, prefix_real, BUILD_NUMBER
+#
+# CONFIGURE_FLAGS may contain flags that should be passed to all builds'
+# ./configure steps (useful e.g. for building in the sysmobts SDK).
+#
+# For each built repository, a specific git branch or hash can be provided by
+# environment variable: OSMO_GSM_TESTER_BUILD_$repo="<git-hash>"
+# NOTE: convert $repo's dashes to underscore. For example:
+# OSMO_GSM_TESTER_BUILD_osmo_hlr="f001234abc"
+# OSMO_GSM_TESTER_BUILD_libosmocore="my/branch"
+# ("origin/" is prepended to branch names automatically)
+
+if [ -z "$name" -o -z "$base" ]; then
+ set +x
+ echo "Some environment variables are not provided as required by jenkins-build-common.sh. Error."
+ exit 1
+fi
+
+git_url="${git_url-"git://git.osmocom.org"}"
+prefix="${prefix-"$base/inst-$name"}"
+# prefix_real is usually identical with prefix, except when installing to a
+# different $DESTDIR than /, which is the case for example when building
+# osmo-bts within the sysmoBTS SDK
+prefix_real="${prefix_real-"$prefix"}"
+
+# Flag to be used to enable ASAN in builds. Defaults to enable ASAN builds and
+# it can be disabled by passing SANITIZE_FLAGS="" to the build.
+SANITIZE_FLAGS="${SANITIZE_FLAGS---enable-sanitize}"
+
+export PKG_CONFIG_PATH="$prefix_real/lib/pkgconfig:$PKG_CONFIG_PATH"
+export LD_LIBRARY_PATH="$prefix_real/lib:$LD_LIBRARY_PATH"
+
+# Show current environment. Sometimes the LESS_ vars have ansi colors making a
+# mess, so exclude those.
+env | grep -v "^LESS" | sort
+
+# clean the workspace
+rm -f "$base"/*.build-*.tgz
+rm -f "$base"/*.build-*.md5
+rm -rf "$prefix_real"
+mkdir -p "$prefix_real"
+
+have_repo() {
+ repo="$1"
+ branch="${2-master}"
+
+ # Evaluate environment for instructions to build a specific git hash.
+ # Using a hash as $branch above unfortunately doesn't work.
+ branch_override_var="$(echo "OSMO_GSM_TESTER_BUILD_$repo" | sed 's/-/_/g')"
+ branch_override="$(eval "echo \$$branch_override_var")"
+ if [ -n "$branch_override" ]; then
+ branch="$branch_override"
+ fi
+
+ cd "$base"
+ if [ -d "$repo" ]; then
+ cd "$repo"
+ git fetch
+ else
+ git clone "$git_url/$repo" "$repo"
+ cd "$repo"
+ fi
+
+
+ # Figure out whether we need to prepend origin/ to find branches in upstream.
+ # Doing this allows using git hashes instead of a branch name.
+ if git rev-parse "origin/$branch"; then
+ branch="origin/$branch"
+ fi
+
+ git checkout -B build_branch "$branch"
+ rm -rf *
+ git reset --hard "$branch"
+
+ git rev-parse HEAD
+
+ echo "$(git rev-parse HEAD) $repo" >> "$prefix_real/${name}_git_hashes.txt"
+
+ cd "$base"
+}
+
+build_repo() {
+ # usage: build_repo <name> [<branch>] [--configure-opts [...]]
+ dir="$1"
+ shift
+ build_repo_dir "${dir}" "./" $@
+}
+
+build_repo_dir() {
+ # usage: build_repo_dir <name> <dir> [<branch>] [--configure-opts [...]]
+ dep="$1"
+ dir="$2"
+ branch="master"
+ if [ -z "$(echo "$3" | grep '^-')" ]; then
+ # second arg does not start with a dash, it's empty or a branch
+ branch="$3"
+ if [ -n "$branch" ]; then
+ # we had a branch arg, need to shift once more to get config options
+ shift
+ else
+ branch="master"
+ fi
+ fi
+ shift
+ shift
+ configure_opts="$@"
+
+ set +x; echo "
+
+====================== $dep
+
+"; set -x
+
+
+ have_repo "$dep" "$branch"
+
+ cd "$dep/${dir}"
+
+ set +x; echo; echo; set -x
+ autoreconf -fi
+ set +x; echo; echo; set -x
+ ./configure --prefix="$prefix" --with-systemdsystemunitdir=no $CONFIGURE_FLAGS $configure_opts
+ set +x; echo; echo; set -x
+ make -j8 || make # libsmpp34 can't build in parallel
+ set +x; echo; echo; set -x
+ make install
+}
+
+prune_files() {
+ bindir="$1"
+ wanted_binaries="$2"
+
+ if [ ! -d "$prefix_real/$bindir" ]; then return; fi
+ # remove binaries not intended to originate from this build
+ cd "$prefix_real/$bindir"
+ for f in * ; do
+ if [ -z "$(echo "_ $wanted_binaries _" | grep " $f ")" ]; then
+ rm "$f"
+ fi
+ done
+
+ # ensure requested binaries indeed exist
+ for b in $wanted_binaries ; do
+ if [ ! -f "$b" ]; then
+ set +x; echo "ERROR: no such binary: $b in $prefix_real/$bindir/"; set -x
+ ls -1 "$prefix_real/$bindir"
+ exit 1
+ fi
+ done
+}
+
+add_rpath() {
+ # Adds an RPATH to executables in bin/ or sbin/ to search for the
+ # (Osmocom) libraries in `dirname /proc/self/exe`/../lib/. Adds an
+ # RPATH to a library to search in the same directory as the library.
+ #
+ # NOTE: Binaries should not have the SUID bit set and should run as the
+ # user executing the binary.
+ #
+ # NOTE: $ORIGIN is not a shell variable but a feature of the dynamic
+ # linker that will be expanded at runtime. For details see:
+ # http://man7.org/linux/man-pages/man8/ld.so.8.html
+ #
+ # Add an rpath relative to the binary and library if the directory
+ # exists.
+ if [ -d bin/ ]; then
+ find bin -depth -type f -exec patchelf --set-rpath '$ORIGIN/../lib/' {} \;
+ fi
+ if [ -d sbin/ ]; then
+ find sbin -depth -type f -exec patchelf --set-rpath '$ORIGIN/../lib/' {} \;
+ fi
+ if [ -d lib/ ]; then
+ find lib -depth -type f -name "lib*.so.*" -exec patchelf --set-rpath '$ORIGIN/' {} \;
+ fi
+}
+
+create_bin_tgz() {
+ # build the archive that is going to be copied to the tester
+
+ wanted_binaries_bin="$1"
+ wanted_binaries_sbin="$2"
+
+ if [ -z "$wanted_binaries_bin" ] && [ -z "$wanted_binaries_sbin" ]; then
+ set +x; echo "ERROR: create_bin_tgz needs a list of permitted binaries"; set -x
+ exit 1
+ fi
+
+ prune_files bin "$wanted_binaries_bin"
+ prune_files sbin "$wanted_binaries_sbin"
+
+ cd "$prefix_real"
+ add_rpath
+ this="$name.build-${BUILD_NUMBER-$(date +%Y-%m-%d_%H_%M_%S)}"
+ tar="${this}.tgz"
+ tar czf "$base/$tar" *
+ cd "$base"
+ md5sum "$tar" > "${this}.md5"
+}
diff --git a/contrib/jenkins-build-osmo-bsc.sh b/contrib/jenkins-build-osmo-bsc.sh
new file mode 100755
index 0000000..5d4d333
--- /dev/null
+++ b/contrib/jenkins-build-osmo-bsc.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-bsc"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo libosmo-netif ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-sccp ${SANITIZE_FLAGS}
+build_repo osmo-mgw ${SANITIZE_FLAGS}
+build_repo osmo-bsc ${SANITIZE_FLAGS}
+
+create_bin_tgz "osmo-bsc abisip-find ipaccess-config"
diff --git a/contrib/jenkins-build-osmo-bts-sysmo.sh b/contrib/jenkins-build-osmo-bts-sysmo.sh
new file mode 100755
index 0000000..afce771
--- /dev/null
+++ b/contrib/jenkins-build-osmo-bts-sysmo.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+set -e -x
+
+_poky_version="$POKY_VERSION"
+_poky_path="$POKY_PATH"
+
+[ -z "$_poky_version" ] && _poky_version="2.3.2"
+[ -z "$_poky_path" ] && _poky_path="/opt/poky/$_poky_version"
+
+. "$_poky_path/environment-setup-armv5te-poky-linux-gnueabi"
+
+# Cross-compilation: all installations need to be put in the sysmo SDK sysroot
+export DESTDIR="$_poky_path/sysroots/armv5te-poky-linux-gnueabi"
+
+base="$PWD"
+name="osmo-bts-sysmo"
+prefix="/usr/local/jenkins-build/inst-$name"
+prefix_real="$DESTDIR$prefix"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore --disable-pcsc --disable-doxygen --disable-gnutls
+build_repo libosmo-abis
+build_repo osmo-bts --enable-sysmocom-bts --with-openbsc=$base/openbsc/openbsc/include
+
+create_bin_tgz osmo-bts-sysmo
diff --git a/contrib/jenkins-build-osmo-bts.sh b/contrib/jenkins-build-osmo-bts.sh
new file mode 100755
index 0000000..46a1c11
--- /dev/null
+++ b/contrib/jenkins-build-osmo-bts.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-bts"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+have_repo octphy-2g-headers
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo osmo-bts ${SANITIZE_FLAGS} --enable-trx --with-openbsc=$base/openbsc/openbsc/include --enable-octphy --with-octsdr-2g=$base/octphy-2g-headers
+
+create_bin_tgz "osmo-bts-trx osmo-bts-octphy"
diff --git a/contrib/jenkins-build-osmo-ggsn.sh b/contrib/jenkins-build-osmo-ggsn.sh
new file mode 100755
index 0000000..fec2633
--- /dev/null
+++ b/contrib/jenkins-build-osmo-ggsn.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-ggsn"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo osmo-ggsn ${SANITIZE_FLAGS}
+
+create_bin_tgz osmo-ggsn
diff --git a/contrib/jenkins-build-osmo-hlr.sh b/contrib/jenkins-build-osmo-hlr.sh
new file mode 100755
index 0000000..0b60955
--- /dev/null
+++ b/contrib/jenkins-build-osmo-hlr.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-hlr"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo osmo-hlr ${SANITIZE_FLAGS}
+
+create_bin_tgz osmo-hlr
diff --git a/contrib/jenkins-build-osmo-iuh.sh b/contrib/jenkins-build-osmo-iuh.sh
new file mode 100755
index 0000000..e8e7fa8
--- /dev/null
+++ b/contrib/jenkins-build-osmo-iuh.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-iuh"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo libosmo-netif ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-sccp ${SANITIZE_FLAGS}
+build_repo libasn1c ${SANITIZE_FLAGS}
+
+build_repo osmo-iuh ${SANITIZE_FLAGS}
+
+create_bin_tgz "osmo-hnbgw"
diff --git a/contrib/jenkins-build-osmo-mgw.sh b/contrib/jenkins-build-osmo-mgw.sh
new file mode 100755
index 0000000..a04e0ca
--- /dev/null
+++ b/contrib/jenkins-build-osmo-mgw.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-mgw"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo libosmo-netif ${SANITIZE_FLAGS} --disable-doxygen
+build_repo osmo-mgw ${SANITIZE_FLAGS}
+
+create_bin_tgz "osmo-mgw"
diff --git a/contrib/jenkins-build-osmo-msc.sh b/contrib/jenkins-build-osmo-msc.sh
new file mode 100755
index 0000000..c64f94d
--- /dev/null
+++ b/contrib/jenkins-build-osmo-msc.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-msc"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo libosmo-netif ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libsmpp34 ${SANITIZE_FLAGS}
+build_repo libosmo-sccp ${SANITIZE_FLAGS}
+build_repo osmo-mgw ${SANITIZE_FLAGS}
+build_repo osmo-hlr ${SANITIZE_FLAGS}
+build_repo libasn1c ${SANITIZE_FLAGS}
+build_repo osmo-iuh ${SANITIZE_FLAGS}
+build_repo osmo-msc ${SANITIZE_FLAGS} --enable-smpp --enable-iu
+
+create_bin_tgz osmo-msc
diff --git a/contrib/jenkins-build-osmo-nitb.sh b/contrib/jenkins-build-osmo-nitb.sh
new file mode 100755
index 0000000..4f81fa8
--- /dev/null
+++ b/contrib/jenkins-build-osmo-nitb.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-nitb"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore --disable-doxygen
+build_repo libosmo-abis
+build_repo libosmo-netif --disable-doxygen
+build_repo openggsn
+build_repo libsmpp34
+build_repo libosmo-sccp
+build_repo_dir openbsc openbsc --enable-smpp --enable-osmo-bsc --enable-nat
+
+create_bin_tgz "osmo-nitb osmo-bsc_mgcp"
diff --git a/contrib/jenkins-build-osmo-pcu-sysmo.sh b/contrib/jenkins-build-osmo-pcu-sysmo.sh
new file mode 100755
index 0000000..4dc7704
--- /dev/null
+++ b/contrib/jenkins-build-osmo-pcu-sysmo.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -e -x
+
+_poky_version="$POKY_VERSION"
+_poky_path="$POKY_PATH"
+
+[ -z "$_poky_version" ] && _poky_version="2.3.2"
+[ -z "$_poky_path" ] && _poky_path="/opt/poky/$_poky_version"
+
+. "$_poky_path/environment-setup-armv5te-poky-linux-gnueabi"
+
+# Cross-compilation: all installations need to be put in the sysmo SDK sysroot
+export DESTDIR="$_poky_path/sysroots/armv5te-poky-linux-gnueabi"
+
+base="$PWD"
+name="osmo-pcu-sysmo"
+prefix="/usr/local/jenkins-build/inst-$name"
+prefix_real="$DESTDIR$prefix"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore --disable-pcsc --disable-doxygen --disable-gnutls
+build_repo osmo-pcu --enable-sysmocom-dsp
+
+create_bin_tgz osmo-pcu
diff --git a/contrib/jenkins-build-osmo-pcu.sh b/contrib/jenkins-build-osmo-pcu.sh
new file mode 100755
index 0000000..1877f9c
--- /dev/null
+++ b/contrib/jenkins-build-osmo-pcu.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-pcu"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-pcsc --disable-doxygen
+build_repo osmo-pcu ${SANITIZE_FLAGS}
+
+create_bin_tgz osmo-pcu
diff --git a/contrib/jenkins-build-osmo-sgsn.sh b/contrib/jenkins-build-osmo-sgsn.sh
new file mode 100755
index 0000000..f296f8b
--- /dev/null
+++ b/contrib/jenkins-build-osmo-sgsn.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-sgsn"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo libosmo-netif ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-sccp ${SANITIZE_FLAGS}
+build_repo osmo-ggsn ${SANITIZE_FLAGS}
+build_repo osmo-hlr ${SANITIZE_FLAGS}
+build_repo libasn1c ${SANITIZE_FLAGS}
+build_repo osmo-iuh ${SANITIZE_FLAGS}
+build_repo osmo-sgsn ${SANITIZE_FLAGS} --enable-iu
+
+create_bin_tgz osmo-sgsn
diff --git a/contrib/jenkins-build-osmo-stp.sh b/contrib/jenkins-build-osmo-stp.sh
new file mode 100755
index 0000000..5f9c421
--- /dev/null
+++ b/contrib/jenkins-build-osmo-stp.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-stp"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo libosmocore ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-abis ${SANITIZE_FLAGS}
+build_repo libosmo-netif ${SANITIZE_FLAGS} --disable-doxygen
+build_repo libosmo-sccp ${SANITIZE_FLAGS}
+
+create_bin_tgz osmo-stp
diff --git a/contrib/jenkins-build-osmo-trx.sh b/contrib/jenkins-build-osmo-trx.sh
new file mode 100755
index 0000000..f15a9e0
--- /dev/null
+++ b/contrib/jenkins-build-osmo-trx.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+name="osmo-trx"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+build_repo_limesuite() {
+set +x; echo "
+
+====================== $dep
+
+"; set -x
+
+prev_git_url="${git_url}"
+git_url="https://github.com/myriadrf/"
+have_repo "LimeSuite" "master"
+git_url="${prev_git_url}"
+cd "LimeSuite"
+
+set +x; echo; echo; set -x
+mkdir -p builddir && cd builddir
+set +x; echo; echo; set -x
+cmake -DCMAKE_INSTALL_PREFIX:PATH=$prefix ../
+set +x; echo; echo; set -x
+make -j5
+set +x; echo; echo; set -x
+make install
+}
+
+# We want to use LimSuite installed by debian repos
+# build_repo_limesuite
+
+# AddressSanitizer is not enabled on purpose since overhead affects the clocking.
+build_repo libosmocore --disable-doxygen
+build_repo osmo-trx --without-sse --with-uhd --with-lms
+
+create_bin_tgz "osmo-trx-uhd osmo-trx-lms"
diff --git a/contrib/jenkins-build-osmocom-bb.sh b/contrib/jenkins-build-osmocom-bb.sh
new file mode 100755
index 0000000..827fc94
--- /dev/null
+++ b/contrib/jenkins-build-osmocom-bb.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+set -e -x
+
+base="$PWD"
+name="osmocom-bb"
+. "$(dirname "$0")/jenkins-build-common.sh"
+
+FW_RPM_URL="http://download.opensuse.org/repositories/home:/mnhauke:/osmocom:/nightly/SLE_15/x86_64/"
+
+build_repo libosmocore --disable-doxygen
+build_repo_dir osmocom-bb src/host/virt_phy ${SANITIZE_FLAGS}
+build_repo_dir osmocom-bb src/host/osmocon ${SANITIZE_FLAGS}
+build_repo_dir osmocom-bb src/host/layer23 ${SANITIZE_FLAGS}
+
+mkdir -p "$prefix"
+cd "$prefix"
+FW_RPM="$(wget -q -O - "$FW_RPM_URL" | grep -o 'osmocom-bb-firmware.*rpm' | sed 's#\"#\n#g' | head -1)"
+echo "Downloading RPM package $FW_RPM"
+wget -q "$FW_RPM_URL/$FW_RPM" -O osmocom-bb-firmware.rpm
+rpm2cpio osmocom-bb-firmware.rpm | cpio -idmv
+rm osmocom-bb-firmware.rpm
+
+create_bin_tgz "virtphy mobile" "osmocon"
diff --git a/contrib/jenkins-make-check-and-run.sh b/contrib/jenkins-make-check-and-run.sh
new file mode 100755
index 0000000..3017fd4
--- /dev/null
+++ b/contrib/jenkins-make-check-and-run.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+
+run_script="osmo-gsm-tester/contrib/jenkins-run.sh"
+test -x "$run_script"
+
+cd osmo-gsm-tester
+make deps
+make check
+cd "$base"
+
+PATH="$base/osmo-gsm-tester/src:$PATH" \
+ "$run_script"
diff --git a/contrib/jenkins-run.sh b/contrib/jenkins-run.sh
new file mode 100755
index 0000000..716899e
--- /dev/null
+++ b/contrib/jenkins-run.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+
+time_start="$(date '+%F %T')"
+
+# remove older trial dirs and *-run.tgz, if any
+trial_dir_prefix="trial-"
+rm -rf "$trial_dir_prefix"* || true
+
+# Expecting *.tgz artifacts to be copied to this workspace from the various
+# jenkins-*.sh runs, via jenkins job configuration. Compose a trial dir:
+trial_dir="${trial_dir_prefix}$BUILD_NUMBER"
+mkdir -p "$trial_dir"
+
+mv *.tgz "$trial_dir"
+cat *.md5 >> "$trial_dir/checksums.md5"
+rm *.md5
+
+# OSMO_GSM_TESTER_OPTS is a way to pass in e.g. logging preferences from the
+# jenkins build job.
+# On failure, first clean up below and then return the exit code.
+exit_code="1"
+if python3 -u "$(which osmo-gsm-tester.py)" "$trial_dir" $OSMO_GSM_TESTER_OPTS ; then
+ exit_code="0"
+fi
+
+# no need to keep extracted binaries
+rm -rf "$trial_dir/inst" || true
+
+# tar up all results for archiving (optional)
+cd "$trial_dir"
+journalctl -u ofono -o short-precise --since "${time_start}" > "$(readlink last_run)/ofono.log"
+tar czf "$base/${trial_dir}-run.tgz" "$(readlink last_run)"
+tar czf "$base/${trial_dir}-bin.tgz" *.md5 *.tgz
+
+exit $exit_code
diff --git a/contrib/trials-cleanup.sh b/contrib/trials-cleanup.sh
new file mode 100755
index 0000000..830f4e7
--- /dev/null
+++ b/contrib/trials-cleanup.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+# Remove all but the N newest test run dirs (that have been started)
+
+trial_rx_dir="$1"
+trial_prep_dir="$2"
+if [ -z "$trial_rx_dir" ]; then
+ trial_rx_dir="/var/tmp/osmo-gsm-tester/trials"
+fi
+if [ -z "$trial_prep_dir" ]; then
+ trial_prep_dir="/var/tmp/osmo-gsm-tester/.prep-trials"
+fi
+
+mkdir -p "$trial_prep_dir"
+
+rm_trial() {
+ trial_dir="$1"
+ trial_name="$(basename "$trial_dir")"
+ echo "Removing: $(ls -ld "$trial_dir")"
+ # ensure atomic removal, so that the gsm-tester doesn't take it as a
+ # newly added dir (can happen when the 'taken' marker is removed first).
+ mv "$trial_dir" "$trial_prep_dir/"
+ rm -rf "$trial_prep_dir/$trial_name"
+}
+
+# keep the N newest test session dirs that have been started: find all that
+# have been started sorted by time, then discard all but the N newest ones.
+
+for seen in $(ls -1t "$trial_rx_dir"/*/taken | tail -n +31); do
+ rm_trial "$(dirname "$seen")"
+done
diff --git a/doc/README-sysmobts.txt b/doc/README-sysmobts.txt
new file mode 100644
index 0000000..a1fffc0
--- /dev/null
+++ b/doc/README-sysmobts.txt
@@ -0,0 +1,63 @@
+SETTING UP sysmobts
+
+PACKAGE VERSIONS
+
+Depending on the code to be tested, select the stable, testing or nightly opkg
+feed:
+
+To change the feed and packages installed on the sysmobts edit the
+following files in /etc/opkg/
+
+* all-feed.conf
+* armv5te-feed.conf
+* sysmobts-v2-feed.conf
+
+and adjust the URL. For example, to move to the testing feeds:
+
+ sed -i 's/201310/201310-testing/g' /etc/opkg/*.conf
+
+Then run 'opkg update', 'opkg upgrade' and finally 'reboot'.
+
+
+DISABLE SERVICES
+
+To use the sysmobts together with the tester, the following systemd services must be disabled
+but using the mask and not using the disable option. You can use the following lines:
+
+systemctl mask osmo-nitb
+systemctl mask sysmobts
+systemctl mask sysmobts-mgr
+
+
+SSH ACCESS
+
+Copy the SSH public key from the system/user that runs the tester to the BTS
+authorized keys file so the tester will be able to deploy binaries.
+
+ scp my_key.pub $sysmobts:
+ ssh $sysmobts
+ cat my_key.pub >> ~/.ssh/authorized_keys
+
+It is also advisable to configure the eth0 network interface of the BTS to a
+static IP address instead of using DHCP. To do so adjust /etc/network/interfaces
+and change the line
+
+ iface eth0 inet dhcp
+
+to
+
+ iface eth0 inet static
+ address 10.42.42.114
+ netmask 255.255.255.0
+ gateway 10.42.42.1
+
+Set the name server in /etc/resolve.conf (most likely to the IP of the
+gateway).
+
+
+ALLOW CORE FILES
+
+In case a binary run for the test crashes, we allow it to write a core file, to
+be able to analyze the crash later. This requires a limit rule:
+
+ scp install/osmo-gsm-tester-limits.conf sysmobts:/etc/security/limits.d/
diff --git a/doc/README.txt b/doc/README.txt
new file mode 100644
index 0000000..c00cd3e
--- /dev/null
+++ b/doc/README.txt
@@ -0,0 +1,114 @@
+DOCUMENTATION
+
+For the complete documentation, please refer to the osmo-gsm-manuals:
+http://git.osmocom.org/osmo-gsm-manuals/
+http://jenkins.osmocom.org/jenkins/job/Osmocom_Manuals/ws/
+
+
+INSTALLATION
+
+So far the osmo-gsm-tester directory is manually placed in /usr/local/src
+
+
+DEPENDENCIES
+
+Packages required to run the osmo-gsm-tester:
+
+ apt-get install \
+ dbus \
+ tcpdump \
+ sqlite3 \
+ python3 \
+ python3-yaml \
+ python3-mako \
+ python3-gi \
+ ofono \
+ python3-pip
+ pip3 install git+git://github.com/podshumok/python-smpplib.git
+ pip3 install pydbus
+
+To build ofono:
+ apt-get install libglib2.0-dev \
+ libdbus-1-dev \
+ libudev-dev \
+ mobile-broadband-provider-info
+
+
+INSTALLATION
+
+Place a copy of the osmo-gsm-tester repository in /usr/local/src/
+
+ cp install/osmo-gsm-tester-limits.conf /etc/security/limits.d/
+ cp install/*.service /lib/systemd/system/
+ cp install/org.ofono.conf /etc/dbus-1/system.d/
+ systemctl daemon-reload
+
+To run:
+
+ systemctl enable ofono
+ systemctl start ofono
+ systemctl status ofono
+
+ systemctl enable osmo-gsm-tester
+ systemctl start osmo-gsm-tester
+ systemctl status osmo-gsm-tester
+
+
+To stop:
+
+ systemctl stop osmo-gsm-tester
+
+After ofonod has been started and modems have been connected to the system,
+you can run the 'list-modems' script located in /usr/local/src/ofono/test to get
+a list of the modems that have been detected by ofono.
+
+
+CONFIGURATION
+
+Host System configuration
+
+Create the /var/tmp/osmo-gsm-tester directory. It will be used to accept new test jobs.
+
+Test resources (NITB, BTS and modems) are currently configured in the test_manager.py.
+
+For every nitb resource that can be allocated, one alias IP address needs
+to be set up in /etc/network/interfaces on the interface that is connected to the BTSes.
+By add the following lines for each nitb instance that can be allocated (while making
+sure each interface alias and IP is unique)
+
+ auto eth1:0
+ allow-hotplug eth1:0
+ iface eth1:0 inet static
+ address 10.42.42.2
+ netmask 255.255.255.0
+
+Also make sure, the user executing the tester is allowed to run tcpdump. If
+the user is not root, we have used the folloing line to get proper permissions:
+
+ groupadd pcap
+ addgroup <your-user-name> pcap
+ setcap cap_net_raw,cap_net_admin=eip /usr/sbin/tcpdump
+ chgroup pcap /usr/sbin/tcpdump
+ chmod 0750 /usr/sbin/tcpdump
+
+The tester main unit must be able to ssh without password to the sysmobts (and
+possibly other) hardware: place the main unit's public SSH key on the sysmoBTS.
+Log in via SSH at least once to accept the BTS' host key.
+
+
+Jenkins Configuration
+
+(TODO: jenkins build slave details)
+
+When adding an entry to jenkins' known_hosts file, be aware that you need to
+add an actual RSA host key. Using 'ssh' to access the main unit may work, but
+jenkins will still fail to access it in the absence of a full RSA host key:
+
+ ssh-keyscan -H $my_main_unit_ip_addr >> ~jenkins/.ssh/known_hosts
+
+
+LAUNCHING A TEST RUN
+
+osmo-gsm-tester watches /var/tmp/osmo-gsm-tester for instructions to launch
+test runs. A test run is triggered by a subdirectory containing binaries and a
+checksums file, typically created by jenkins using the scripts in contrib/.
diff --git a/example/README.txt b/example/README.txt
new file mode 100644
index 0000000..76567f2
--- /dev/null
+++ b/example/README.txt
@@ -0,0 +1,30 @@
+This a real gsm test suite configured and ready to use.
+The only thing missing is a trial dir containing binaries.
+
+You can point osmo-gsm-tester.py at this config using the OSMO_GSM_TESTER_CONF
+environment variable:
+
+ export OSMO_GSM_TESTER_CONF="$PWD"
+
+When there is no OSMO_GSM_TESTER_CONF set, osmo-gsm-tester will instead look
+for conf files in several locations like ~/.config/osmo-gsm-tester,
+/usr/local/etc/osmo-gsm-tester, /etc/osmo-gsm-tester.
+
+If you have your trial with binary tar archives in ~/my_trial
+you can run the suite for example like this:
+
+ osmo-gsm-tester.py ~/my_trial
+
+Specifically, from this dir:
+
+ OSMO_GSM_TESTER_CONF="$PWD" ../src/osmo-gsm-tester.py ~/my_trial
+
+Alternatively you can setup this example as permanent config using something
+like:
+
+ mkdir -p ~/.config
+ ln -s "$PWD" ~/.config/osmo-gsm-tester
+
+A ./state dir will be created to store the current osmo-gsm-tester state. If
+you prefer not to write to $PWD, set up an own configuration pointing at a
+different path (see paths.conf: 'state_dir').
diff --git a/example/default-suites.conf b/example/default-suites.conf
new file mode 100644
index 0000000..07b8a5f
--- /dev/null
+++ b/example/default-suites.conf
@@ -0,0 +1,128 @@
+- nitb_sms:sysmo
+- sms:sysmo
+- nitb_ussd:sysmo
+- ussd:sysmo
+- voice:sysmo+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:sysmo+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:sysmo+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:sysmo+mod-bts0-ts-tchh+cfg-codec-hr1
+- voice:sysmo+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:sysmo+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- voice:sysmo+mod-bts0-dynts-osmo
+- gprs:sysmo
+- gprs:sysmo+mod-bts0-egprs
+- gprs:sysmo+mod-bts0-dynts-ipa
+- gprs:sysmo+mod-bts0-dynts-osmo
+- dynts:sysmo+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+- dynts:sysmo+mod-bts0-dynts67-osmo
+
+- nitb_sms:trx-b200
+- sms:trx-b200
+- nitb_ussd:trx-b200
+- ussd:trx-b200
+- voice:trx-b200+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:trx-b200+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:trx-b200+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:trx-b200+mod-bts0-ts-tchh+cfg-codec-hr1
+- voice:trx-b200+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:trx-b200+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- voice:trx-b200+mod-bts0-dynts-osmo
+- voice:trx-b200+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- gprs:trx-b200
+- gprs:trx-b200+mod-bts0-egprs
+- gprs:trx-b200+mod-bts0-dynts-ipa
+- gprs:trx-b200+mod-bts0-dynts-osmo
+- gprs:trx-b200+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- dynts:trx-b200+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+- dynts:trx-b200+mod-bts0-dynts67-osmo
+
+- nitb_sms:trx-sysmocell5000
+- sms:trx-sysmocell5000
+- nitb_ussd:trx-sysmocell5000
+- ussd:trx-sysmocell5000
+- voice:trx-sysmocell5000+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:trx-sysmocell5000+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:trx-sysmocell5000+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:trx-sysmocell5000+mod-bts0-ts-tchh+cfg-codec-hr1
+- voice:trx-sysmocell5000+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:trx-sysmocell5000+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- voice:trx-sysmocell5000+mod-bts0-dynts-osmo
+- gprs:trx-sysmocell5000
+- gprs:trx-sysmocell5000+mod-bts0-egprs
+- gprs:trx-sysmocell5000+mod-bts0-dynts-ipa
+- gprs:trx-sysmocell5000+mod-bts0-dynts-osmo
+- dynts:trx-sysmocell5000+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+- dynts:trx-sysmocell5000+mod-bts0-dynts67-osmo
+
+- nitb_sms:trx-umtrx
+- sms:trx-umtrx
+- nitb_ussd:trx-umtrx
+- ussd:trx-umtrx
+- voice:trx-umtrx+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:trx-umtrx+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:trx-umtrx+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:trx-umtrx+mod-bts0-ts-tchh+cfg-codec-hr1
+- voice:trx-umtrx+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:trx-umtrx+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- voice:trx-umtrx+mod-bts0-dynts-osmo
+- voice:trx-umtrx+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- gprs:trx-umtrx
+- gprs:trx-umtrx+mod-bts0-egprs
+- gprs:trx-umtrx+mod-bts0-dynts-ipa
+- gprs:trx-umtrx+mod-bts0-dynts-osmo
+- gprs:trx-umtrx+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- dynts:trx-umtrx+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+- dynts:trx-umtrx+mod-bts0-dynts67-osmo
+
+- nitb_sms:trx-lms
+- sms:trx-lms
+- nitb_ussd:trx-lms
+- ussd:trx-lms
+- voice:trx-lms+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:trx-lms+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:trx-lms+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:trx-lms+mod-bts0-ts-tchh+cfg-codec-hr1
+- voice:trx-lms+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:trx-lms+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- voice:trx-lms+mod-bts0-dynts-osmo
+- voice:trx-lms+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- gprs:trx-lms
+- gprs:trx-lms+mod-bts0-egprs
+- gprs:trx-lms+mod-bts0-dynts-ipa
+- gprs:trx-lms+mod-bts0-dynts-osmo
+- gprs:trx-lms+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- dynts:trx-lms+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+- dynts:trx-lms+mod-bts0-dynts67-osmo
+
+- nitb_sms:nanobts
+- sms:nanobts+band-1900
+- nitb_ussd:nanobts+band-1900
+- ussd:nanobts+band-1900
+- voice:nanobts+band-1900+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:nanobts+band-1900+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:nanobts+band-1900+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:nanobts+band-1900+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:nanobts+band-1900+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- gprs:nanobts+band-1900
+- gprs:nanobts+band-1900+mod-bts0-egprs
+- gprs:nanobts+band-1900+mod-bts0-dynts-ipa
+- dynts:nanobts+band-1900+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+
+- sms:nanobts+band-900
+- nitb_ussd:nanobts+band-900
+- ussd:nanobts+band-900
+- voice:nanobts+band-900+mod-bts0-ts-tchf+cfg-codec-fr1
+- voice:nanobts+band-900+mod-bts0-ts-tchf+cfg-codec-fr2
+- voice:nanobts+band-900+mod-bts0-ts-tchf+cfg-codec-fr3
+- voice:nanobts+band-900+mod-bts0-ts-tchh+cfg-codec-hr3
+- voice:nanobts+band-900+mod-bts0-dynts-ipa+cfg-codec-fr-any
+- voice:nanobts+band-900+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- gprs:nanobts+band-900
+- gprs:nanobts+band-900+mod-bts0-egprs
+- gprs:nanobts+band-900+mod-bts0-dynts-ipa
+- gprs:nanobts+band-900+mod-bts0-numtrx2+mod-bts0-chanallocdescend
+- dynts:nanobts+band-900+mod-bts0-dynts67-ipa+cfg-codec-fr-any
+
+- nitb_smpp
+- smpp
+- encryption
diff --git a/example/defaults.conf b/example/defaults.conf
new file mode 100644
index 0000000..eee17b1
--- /dev/null
+++ b/example/defaults.conf
@@ -0,0 +1,86 @@
+nitb:
+ net:
+ mcc: 901
+ mnc: 70
+ short_name: osmo-gsm-tester-nitb
+ long_name: osmo-gsm-tester-nitb
+ auth_policy: closed
+ encryption: a5_0
+
+bsc:
+ net:
+ mcc: 901
+ mnc: 70
+ short_name: osmo-gsm-tester-msc
+ long_name: osmo-gsm-tester-msc
+ encryption: a5_0
+ codec_list:
+ - fr1
+
+msc:
+ net:
+ mcc: 901
+ mnc: 70
+ short_name: osmo-gsm-tester-msc
+ long_name: osmo-gsm-tester-msc
+ encryption: a5_0
+ authentication: optional
+
+bsc_bts:
+ base_station_id_code: 63
+ stream_id: 255
+ osmobsc_bts_type: sysmobts
+ channel_allocator: ascending
+ gprs_mode: gprs
+ num_trx: 1
+ max_trx: 1
+ trx_list:
+ - nominal_power: 23
+ max_power_red: 0
+ arfcn: 868
+ timeslot_list:
+ - phys_chan_config: CCCH+SDCCH4
+ - phys_chan_config: SDCCH8
+ - phys_chan_config: TCH/F
+ - phys_chan_config: TCH/F
+ - phys_chan_config: TCH/F
+ - phys_chan_config: TCH/F
+ - phys_chan_config: PDCH
+ - phys_chan_config: PDCH
+ - nominal_power: 23
+ max_power_red: 0
+ arfcn: 872
+ timeslot_list:
+ - phys_chan_config: TCH/F
+ - phys_chan_config: SDCCH8
+ - phys_chan_config: TCH/F
+ - phys_chan_config: TCH/F
+ - phys_chan_config: TCH/F
+ - phys_chan_config: TCH/F
+ - phys_chan_config: PDCH
+ - phys_chan_config: PDCH
+
+osmo_bts_octphy:
+ max_trx: 2
+
+nanobts:
+ max_trx: 2
+ trx_list:
+ - {}
+ - timeslot_list:
+ - {}
+ - {}
+ - {}
+ - {}
+ - {}
+ - {}
+ - phys_chan_config: TCH/F # nanobts only supports PDCH in TRX0.
+ - phys_chan_config: TCH/F
+
+osmo_bts_trx:
+ max_trx: 2
+
+osmo_trx:
+ type: uhd
+ launch_trx: true
+ clock_reference: internal
diff --git a/example/paths.conf b/example/paths.conf
new file mode 100644
index 0000000..554d942
--- /dev/null
+++ b/example/paths.conf
@@ -0,0 +1,3 @@
+state_dir: '/var/tmp/osmo-gsm-tester/state'
+suites_dir: '../suites'
+scenarios_dir: './scenarios'
diff --git a/example/resources.conf.prod b/example/resources.conf.prod
new file mode 100644
index 0000000..e786836
--- /dev/null
+++ b/example/resources.conf.prod
@@ -0,0 +1,193 @@
+# all hardware and interfaces available to this osmo-gsm-tester
+
+ip_address:
+- addr: 10.42.42.2
+- addr: 10.42.42.3
+- addr: 10.42.42.4
+- addr: 10.42.42.5
+- addr: 10.42.42.6
+- addr: 10.42.42.7
+- addr: 10.42.42.8
+- addr: 10.42.42.9
+- addr: 10.42.42.10
+
+bts:
+- label: sysmoBTS 1002
+ type: osmo-bts-sysmo
+ ipa_unit_id: 1
+ addr: 10.42.42.114
+ band: GSM-1800
+ direct_pcu: true
+ ciphers: [a5_0, a5_1, a5_3]
+
+- label: Ettus B200
+ type: osmo-bts-trx
+ ipa_unit_id: 6
+ addr: 10.42.42.50
+ band: GSM-1800
+ ciphers: [a5_0, a5_1]
+ osmo_trx:
+ type: uhd
+ launch_trx: true
+ remote_user: jenkins
+ trx_ip: 10.42.42.116
+ dev_args: "type=b200,serial=306BD11"
+ clock_reference: external
+ multi_arfcn: true
+
+- label: UmTRX
+ type: osmo-bts-trx
+ ipa_unit_id: 12
+ addr: 10.42.42.54
+ band: GSM-1800
+ ciphers: [a5_0, a5_1]
+ osmo_trx:
+ type: uhd
+ launch_trx: true
+ remote_user: jenkins
+ trx_ip: 10.42.42.118
+ dev_args: "addr=10.42.42.130,pa=NONE,pa_power_max_dbm=23,fifo_ctrl_window=0,status_port=12345"
+ clock_reference: internal
+
+- label: LimeSDR-USB
+ type: osmo-bts-trx
+ ipa_unit_id: 11
+ addr: 10.42.42.53
+ band: GSM-1800
+ ciphers: [a5_0, a5_1]
+ osmo_trx:
+ type: lms
+ launch_trx: true
+ remote_user: jenkins
+ trx_ip: 10.42.42.117
+ clock_reference: external
+
+- label: sysmoCell 5000
+ type: osmo-bts-trx
+ ipa_unit_id: 7
+ addr: 10.42.42.51
+ band: GSM-1800
+ ciphers: [a5_0, a5_1]
+ osmo_trx:
+ launch_trx: false
+ clock_reference: external
+ trx_ip: 10.42.42.112
+
+- label: OCTBTS 3500
+ type: osmo-bts-octphy
+ ipa_unit_id: 8
+ addr: 10.42.42.52
+ band: GSM-1800
+ trx_list:
+ - hw_addr: 00:0c:90:2e:80:1e
+ net_device: enp2s0
+
+- label: NanoBTS-ONW-1900
+ type: nanobts
+ ipa_unit_id: 9
+ band: GSM-1900
+ trx_list:
+ - addr: 10.42.42.120
+ power_supply:
+ type: 'sispm'
+ device: '01:01:4d:98:24'
+ port: '1'
+
+- label: NanoBTS-ONW-900
+ type: nanobts
+ ipa_unit_id: 10
+ band: GSM-900
+ num_trx: 1
+ trx_list:
+ - addr: 10.42.42.121
+ power_supply:
+ type: 'sispm'
+ device: '01:01:4d:98:24'
+ port: '2'
+ - addr: 10.42.42.122
+ power_supply:
+ type: 'sispm'
+ device: '01:01:4d:98:24'
+ port: '3'
+
+arfcn:
+ - arfcn: 512
+ band: GSM-1800
+ - arfcn: 514
+ band: GSM-1800
+ - arfcn: 516
+ band: GSM-1800
+ - arfcn: 518
+ band: GSM-1800
+ - arfcn: 520
+ band: GSM-1800
+ - arfcn: 540
+ band: GSM-1900
+ - arfcn: 542
+ band: GSM-1900
+ - arfcn: 544
+ band: GSM-1900
+ - arfcn: 546
+ band: GSM-1900
+ - arfcn: 548
+ band: GSM-1900
+
+modem:
+- label: sierra_1st
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.4/1-5.4.1/1-5.4.1.2'
+ ki: 'EBAB63D06C3F546A16C977CB40E57C68'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'sim']
+
+- label: sierra_2nd
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.4/1-5.4.1/1-5.4.1.3'
+ ki: 'EBD2B5F6CF3374106D0A66C11F922001'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'sim']
+
+- label: gobi2k
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.4/1-5.4.1/1-5.4.1.5'
+ ki: '5752B3F43277C35D2D1D957007DF74E2'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sim']
+
+- label: ec20_1
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.4/1-5.4.1/1-5.4.1.6'
+ ki: '07F35D0A9476646169669401215580E0'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+- label: ec20_2
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.3/1-5.3.2'
+ ki: '278B899D126C31F3B24D21E3EB556530'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+- label: ec20_3
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.3/1-5.3.3'
+ ki: 'B43BBCD8DE4D594F4146DE3D8ADC589D'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+- label: ec20_4
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.3/1-5.3.5'
+ ki: 'A41F06F39DE3B0295C178A674D3E3636'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+- label: ec20_5
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-5/1-5.3/1-5.3.6'
+ ki: '8CCBE85A62C6DC7AC2A9886F4BBC3146'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+osmocon_phone:
+ - serial_device: '/dev/serial/by-id/usb-Silicon_Labs_CP2104_USB_to_UART_Bridge_Controller_00897B41-if00-port0'
diff --git a/example/resources.conf.rnd b/example/resources.conf.rnd
new file mode 100644
index 0000000..0fad50f
--- /dev/null
+++ b/example/resources.conf.rnd
@@ -0,0 +1,101 @@
+# all hardware and interfaces available to this osmo-gsm-tester
+
+ip_address:
+- addr: 10.42.42.2
+- addr: 10.42.42.3
+- addr: 10.42.42.4
+- addr: 10.42.42.5
+- addr: 10.42.42.6
+- addr: 10.42.42.7
+- addr: 10.42.42.8
+- addr: 10.42.42.9
+- addr: 10.42.42.10
+
+bts:
+- label: sysmoBTS 1002
+ type: osmo-bts-sysmo
+ ipa_unit_id: 1
+ addr: 10.42.42.114
+ band: GSM-1800
+ direct_pcu: true
+ ciphers: [a5_0, a5_1, a5_3]
+
+- label: Ettus B200
+ type: osmo-bts-trx
+ ipa_unit_id: 6
+ addr: 10.42.42.50
+ band: GSM-1800
+ osmo_trx:
+ type: uhd
+ launch_trx: true
+ clock_reference: external
+ multi_arfcn: true
+ remote_user: jenkins
+ trx_ip: 127.0.0.1
+ dev_args: "type=b200,serial=30A9FFB"
+ ciphers: [a5_0, a5_1]
+
+- label: NanoBTS-ONW-1900
+ type: nanobts
+ ipa_unit_id: 9
+ band: GSM-1900
+ trx_list:
+ - addr: 10.42.42.120
+ power_supply:
+ type: 'sispm'
+ device: '01:01:55:2e:b6'
+ port: '1'
+
+arfcn:
+ - arfcn: 512
+ band: GSM-1800
+ - arfcn: 514
+ band: GSM-1800
+ - arfcn: 516
+ band: GSM-1800
+ - arfcn: 518
+ band: GSM-1800
+ - arfcn: 520
+ band: GSM-1800
+ - arfcn: 540
+ band: GSM-1900
+ - arfcn: 542
+ band: GSM-1900
+ - arfcn: 544
+ band: GSM-1900
+ - arfcn: 546
+ band: GSM-1900
+ - arfcn: 548
+ band: GSM-1900
+
+modem:
+- label: sierra_1st
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-1/1-1.2'
+ ki: '80A37E6FDEA931EAC92FFA5F671EFEAD'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'sim']
+
+- label: sierra_2nd
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-1/1-1.3'
+ ki: '00969E283349D354A8239E877F2E0866'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'sim']
+
+- label: ec20_1
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-1/1-1.6'
+ ki: 'BB70807226393CDBAC8DD3439FF54252'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+- label: ec20_2
+ path: '/sys/devices/pci0000:00/0000:00:12.2/usb1/1-1/1-1.5'
+ ki: '2F70DCA43C45ACB97E947FDD0C7CA30A'
+ auth_algo: 'comp128v1'
+ ciphers: [a5_0, a5_1]
+ features: ['sms', 'voice', 'ussd', 'gprs', 'sim']
+
+osmocon_phone:
+- serial_device: '/dev/serial/by-id/usb-Silicon_Labs_CP2104_USB_to_UART_Bridge_Controller_0089279D-if00-port0'
diff --git a/example/scenarios/band-1800.conf b/example/scenarios/band-1800.conf
new file mode 100644
index 0000000..b12c4c0
--- /dev/null
+++ b/example/scenarios/band-1800.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - band: GSM-1800
diff --git a/example/scenarios/band-1900.conf b/example/scenarios/band-1900.conf
new file mode 100644
index 0000000..ef648ef
--- /dev/null
+++ b/example/scenarios/band-1900.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - band: GSM-1900
diff --git a/example/scenarios/band-900.conf b/example/scenarios/band-900.conf
new file mode 100644
index 0000000..a5f8980
--- /dev/null
+++ b/example/scenarios/band-900.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - band: GSM-900
diff --git a/example/scenarios/cfg-codec-fr-any.conf b/example/scenarios/cfg-codec-fr-any.conf
new file mode 100644
index 0000000..9cab6cc
--- /dev/null
+++ b/example/scenarios/cfg-codec-fr-any.conf
@@ -0,0 +1,7 @@
+config:
+ bsc:
+ net:
+ codec_list:
+ - fr1
+ - fr2
+ - fr3
diff --git a/example/scenarios/cfg-codec-fr1.conf b/example/scenarios/cfg-codec-fr1.conf
new file mode 100644
index 0000000..f58a82d
--- /dev/null
+++ b/example/scenarios/cfg-codec-fr1.conf
@@ -0,0 +1,5 @@
+config:
+ bsc:
+ net:
+ codec_list:
+ - fr1
diff --git a/example/scenarios/cfg-codec-fr2.conf b/example/scenarios/cfg-codec-fr2.conf
new file mode 100644
index 0000000..1ad63de
--- /dev/null
+++ b/example/scenarios/cfg-codec-fr2.conf
@@ -0,0 +1,5 @@
+config:
+ bsc:
+ net:
+ codec_list:
+ - fr2
diff --git a/example/scenarios/cfg-codec-fr3.conf b/example/scenarios/cfg-codec-fr3.conf
new file mode 100644
index 0000000..fcc47cc
--- /dev/null
+++ b/example/scenarios/cfg-codec-fr3.conf
@@ -0,0 +1,5 @@
+config:
+ bsc:
+ net:
+ codec_list:
+ - fr3
diff --git a/example/scenarios/cfg-codec-hr1.conf b/example/scenarios/cfg-codec-hr1.conf
new file mode 100644
index 0000000..50a092c
--- /dev/null
+++ b/example/scenarios/cfg-codec-hr1.conf
@@ -0,0 +1,5 @@
+config:
+ bsc:
+ net:
+ codec_list:
+ - hr1
diff --git a/example/scenarios/cfg-codec-hr3.conf b/example/scenarios/cfg-codec-hr3.conf
new file mode 100644
index 0000000..c54b924
--- /dev/null
+++ b/example/scenarios/cfg-codec-hr3.conf
@@ -0,0 +1,5 @@
+config:
+ bsc:
+ net:
+ codec_list:
+ - hr3
diff --git a/example/scenarios/cipher-a50.conf b/example/scenarios/cipher-a50.conf
new file mode 100644
index 0000000..107172b
--- /dev/null
+++ b/example/scenarios/cipher-a50.conf
@@ -0,0 +1,7 @@
+resources:
+ bts:
+ - ciphers:
+ - a5_0
+ modem:
+ - ciphers:
+ - a5_0
diff --git a/example/scenarios/cipher-a51.conf b/example/scenarios/cipher-a51.conf
new file mode 100644
index 0000000..8539867
--- /dev/null
+++ b/example/scenarios/cipher-a51.conf
@@ -0,0 +1,7 @@
+resources:
+ bts:
+ - ciphers:
+ - a5_1
+ modem:
+ - ciphers:
+ - a5_1
diff --git a/example/scenarios/mfeature-gprs.conf b/example/scenarios/mfeature-gprs.conf
new file mode 100644
index 0000000..c520a90
--- /dev/null
+++ b/example/scenarios/mfeature-gprs.conf
@@ -0,0 +1,4 @@
+resources:
+ modem:
+ - features:
+ - 'gprs'
diff --git a/example/scenarios/mod-bts0-chanallocdescend.conf b/example/scenarios/mod-bts0-chanallocdescend.conf
new file mode 100644
index 0000000..78cd02c
--- /dev/null
+++ b/example/scenarios/mod-bts0-chanallocdescend.conf
@@ -0,0 +1,3 @@
+modifiers:
+ bts:
+ - channel_allocator: descending
diff --git a/example/scenarios/mod-bts0-dynts-ipa.conf b/example/scenarios/mod-bts0-dynts-ipa.conf
new file mode 100644
index 0000000..fcad2c7
--- /dev/null
+++ b/example/scenarios/mod-bts0-dynts-ipa.conf
@@ -0,0 +1,13 @@
+modifiers:
+ bts:
+ - num_trx: 1
+ trx_list:
+ - timeslot_list:
+ - phys_chan_config: 'CCCH+SDCCH4'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'TCH/F_PDCH'
+ - phys_chan_config: 'TCH/F_PDCH'
+ - phys_chan_config: 'TCH/F_PDCH'
+ - phys_chan_config: 'TCH/F_PDCH'
+ - phys_chan_config: 'TCH/F_PDCH'
+ - phys_chan_config: 'TCH/F_PDCH'
diff --git a/example/scenarios/mod-bts0-dynts-osmo.conf b/example/scenarios/mod-bts0-dynts-osmo.conf
new file mode 100644
index 0000000..26345bd
--- /dev/null
+++ b/example/scenarios/mod-bts0-dynts-osmo.conf
@@ -0,0 +1,13 @@
+modifiers:
+ bts:
+ - num_trx: 1
+ trx_list:
+ - timeslot_list:
+ - phys_chan_config: 'CCCH+SDCCH4'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
diff --git a/example/scenarios/mod-bts0-dynts67-ipa.conf b/example/scenarios/mod-bts0-dynts67-ipa.conf
new file mode 100644
index 0000000..087b3b0
--- /dev/null
+++ b/example/scenarios/mod-bts0-dynts67-ipa.conf
@@ -0,0 +1,13 @@
+modifiers:
+ bts:
+ - num_trx: 1
+ trx_list:
+ - timeslot_list:
+ - phys_chan_config: 'CCCH+SDCCH4'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'TCH/F_PDCH'
+ - phys_chan_config: 'TCH/F_PDCH'
diff --git a/example/scenarios/mod-bts0-dynts67-osmo.conf b/example/scenarios/mod-bts0-dynts67-osmo.conf
new file mode 100644
index 0000000..3153120
--- /dev/null
+++ b/example/scenarios/mod-bts0-dynts67-osmo.conf
@@ -0,0 +1,13 @@
+modifiers:
+ bts:
+ - num_trx: 1
+ trx_list:
+ - timeslot_list:
+ - phys_chan_config: 'CCCH+SDCCH4'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
+ - phys_chan_config: 'TCH/F_TCH/H_PDCH'
diff --git a/example/scenarios/mod-bts0-egprs.conf b/example/scenarios/mod-bts0-egprs.conf
new file mode 100644
index 0000000..542dd9d
--- /dev/null
+++ b/example/scenarios/mod-bts0-egprs.conf
@@ -0,0 +1,3 @@
+modifiers:
+ bts:
+ - gprs_mode: egprs
diff --git a/example/scenarios/mod-bts0-numtrx2.conf b/example/scenarios/mod-bts0-numtrx2.conf
new file mode 100644
index 0000000..eb24fd3
--- /dev/null
+++ b/example/scenarios/mod-bts0-numtrx2.conf
@@ -0,0 +1,3 @@
+modifiers:
+ bts:
+ - num_trx: 2
diff --git a/example/scenarios/mod-bts0-ts-tchf.conf b/example/scenarios/mod-bts0-ts-tchf.conf
new file mode 100644
index 0000000..527b620
--- /dev/null
+++ b/example/scenarios/mod-bts0-ts-tchf.conf
@@ -0,0 +1,13 @@
+modifiers:
+ bts:
+ - num_trx: 1
+ trx_list:
+ - timeslot_list:
+ - phys_chan_config: 'CCCH+SDCCH4'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'TCH/F'
+ - phys_chan_config: 'TCH/F'
+ - phys_chan_config: 'TCH/F'
+ - phys_chan_config: 'TCH/F'
+ - phys_chan_config: 'TCH/F'
+ - phys_chan_config: 'TCH/F'
diff --git a/example/scenarios/mod-bts0-ts-tchh.conf b/example/scenarios/mod-bts0-ts-tchh.conf
new file mode 100644
index 0000000..0709bc4
--- /dev/null
+++ b/example/scenarios/mod-bts0-ts-tchh.conf
@@ -0,0 +1,13 @@
+modifiers:
+ bts:
+ - num_trx: 1
+ trx_list:
+ - timeslot_list:
+ - phys_chan_config: 'CCCH+SDCCH4'
+ - phys_chan_config: 'SDCCH8'
+ - phys_chan_config: 'TCH/H'
+ - phys_chan_config: 'TCH/H'
+ - phys_chan_config: 'TCH/H'
+ - phys_chan_config: 'TCH/H'
+ - phys_chan_config: 'TCH/H'
+ - phys_chan_config: 'TCH/H'
diff --git a/example/scenarios/nanobts.conf b/example/scenarios/nanobts.conf
new file mode 100644
index 0000000..7322288
--- /dev/null
+++ b/example/scenarios/nanobts.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - type: nanobts
diff --git a/example/scenarios/octphy.conf b/example/scenarios/octphy.conf
new file mode 100644
index 0000000..3a419e8
--- /dev/null
+++ b/example/scenarios/octphy.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - type: osmo-bts-octphy
diff --git a/example/scenarios/sysmo.conf b/example/scenarios/sysmo.conf
new file mode 100644
index 0000000..624758b
--- /dev/null
+++ b/example/scenarios/sysmo.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - type: osmo-bts-sysmo
diff --git a/example/scenarios/trx-b200.conf b/example/scenarios/trx-b200.conf
new file mode 100644
index 0000000..2bad7e8
--- /dev/null
+++ b/example/scenarios/trx-b200.conf
@@ -0,0 +1,4 @@
+resources:
+ bts:
+ - label: Ettus B200
+ type: osmo-bts-trx
diff --git a/example/scenarios/trx-lms.conf b/example/scenarios/trx-lms.conf
new file mode 100644
index 0000000..15802db
--- /dev/null
+++ b/example/scenarios/trx-lms.conf
@@ -0,0 +1,4 @@
+resources:
+ bts:
+ - label: LimeSDR-USB
+ type: osmo-bts-trx
diff --git a/example/scenarios/trx-sysmocell5000.conf b/example/scenarios/trx-sysmocell5000.conf
new file mode 100644
index 0000000..62e9a3c
--- /dev/null
+++ b/example/scenarios/trx-sysmocell5000.conf
@@ -0,0 +1,4 @@
+resources:
+ bts:
+ - label: sysmoCell 5000
+ type: osmo-bts-trx
diff --git a/example/scenarios/trx-umtrx.conf b/example/scenarios/trx-umtrx.conf
new file mode 100644
index 0000000..c13fa49
--- /dev/null
+++ b/example/scenarios/trx-umtrx.conf
@@ -0,0 +1,4 @@
+resources:
+ bts:
+ - label: UmTRX
+ type: osmo-bts-trx
diff --git a/example/scenarios/trx.conf b/example/scenarios/trx.conf
new file mode 100644
index 0000000..f1d6d13
--- /dev/null
+++ b/example/scenarios/trx.conf
@@ -0,0 +1,3 @@
+resources:
+ bts:
+ - type: osmo-bts-trx
diff --git a/install/ofono.service b/install/ofono.service
new file mode 100644
index 0000000..0aa9fbe
--- /dev/null
+++ b/install/ofono.service
@@ -0,0 +1,11 @@
+# systemd service file for the ofono daemon
+[Unit]
+Description=oFono
+
+[Service]
+ExecStart=/usr/local/src/ofono/src/ofonod -n
+Restart=always
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/install/org.ofono.conf b/install/org.ofono.conf
new file mode 100644
index 0000000..8b13c75
--- /dev/null
+++ b/install/org.ofono.conf
@@ -0,0 +1,28 @@
+<!-- This configuration file specifies the required security policies
+ for oFono core daemon to work. It lives in /etc/dbus-1/system.d/ -->
+
+<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
+ "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+
+ <!-- ../system.conf have denied everything, so we just punch some holes -->
+
+ <policy user="root">
+ <allow own="org.ofono"/>
+ <allow send_destination="org.ofono"/>
+ <allow send_interface="org.ofono.SimToolkitAgent"/>
+ <allow send_interface="org.ofono.PushNotificationAgent"/>
+ <allow send_interface="org.ofono.SmartMessagingAgent"/>
+ <allow send_interface="org.ofono.PositioningRequestAgent"/>
+ <allow send_interface="org.ofono.HandsfreeAudioAgent"/>
+ </policy>
+
+ <policy at_console="true">
+ <allow send_destination="org.ofono"/>
+ </policy>
+
+ <policy context="default">
+ <deny send_destination="org.ofono"/>
+ </policy>
+
+</busconfig>
diff --git a/install/osmo-gsm-tester-limits.conf b/install/osmo-gsm-tester-limits.conf
new file mode 100644
index 0000000..1fb0738
--- /dev/null
+++ b/install/osmo-gsm-tester-limits.conf
@@ -0,0 +1,4 @@
+# place this file in /etc/security/limits.d to allow core files when a program
+# crashes; for osmo-gsm-tester.
+root - core unlimited
+* - core unlimited
diff --git a/install/osmo-gsm-tester.service b/install/osmo-gsm-tester.service
new file mode 100644
index 0000000..02225d7
--- /dev/null
+++ b/install/osmo-gsm-tester.service
@@ -0,0 +1,11 @@
+# systemd service file for the osmo-gsm-tester daemon
+[Unit]
+Description=Osmocom GSM Tester
+
+[Service]
+ExecStart=/usr/local/src/osmo-gsm-tester/osmo-gsm-tester
+Restart=on-abort
+StartLimitInterval=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/selftest/Makefile b/selftest/Makefile
new file mode 100644
index 0000000..f0c8c69
--- /dev/null
+++ b/selftest/Makefile
@@ -0,0 +1,12 @@
+.PHONY: check update
+
+check: set_pythonpath
+ ./all_tests.py
+
+update:
+ ./all_tests.py -u
+
+set_pythonpath:
+ echo "export PYTHONPATH=\"$(PWD)/../src\"" > set_pythonpath
+
+# vim: noexpandtab tabstop=8 shiftwidth=8
diff --git a/selftest/_prep.py b/selftest/_prep.py
new file mode 100644
index 0000000..3ef6e81
--- /dev/null
+++ b/selftest/_prep.py
@@ -0,0 +1,17 @@
+import sys, os
+
+script_dir = sys.path[0]
+top_dir = os.path.join(script_dir, '..')
+src_dir = os.path.join(top_dir, 'src')
+
+# to find the osmo_gsm_tester py module
+sys.path.append(src_dir)
+
+from osmo_gsm_tester import log
+
+log.TestsTarget()
+log.set_all_levels(log.L_DBG)
+
+if '-v' in sys.argv:
+ log.style_change(trace=True)
+
diff --git a/selftest/all_tests.py b/selftest/all_tests.py
new file mode 100755
index 0000000..5c1ce59
--- /dev/null
+++ b/selftest/all_tests.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import subprocess
+import time
+import difflib
+import argparse
+import re
+
+parser = argparse.ArgumentParser()
+parser.add_argument('testdir_or_test', nargs='*',
+ help='subdir name or test script name')
+parser.add_argument('-u', '--update', action='store_true',
+ help='Update test expecations instead of verifying them')
+args = parser.parse_args()
+
+def run_test(path):
+ print(path)
+ p = subprocess.Popen(path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ o,e = p.communicate()
+ while True:
+ retval = p.poll()
+ if retval is not None:
+ break;
+ p.kill()
+ time.sleep(.1)
+ return retval, o.decode('utf-8'), e.decode('utf-8')
+
+def udiff(expect, got, expect_path):
+ expect = expect.splitlines(1)
+ got = got.splitlines(1)
+ for line in difflib.unified_diff(expect, got,
+ fromfile=expect_path, tofile='got'):
+ sys.stderr.write(line)
+ if not line.endswith('\n'):
+ sys.stderr.write('[no-newline]\n')
+
+def verify_output(got, expect_file, update=False):
+ if os.path.isfile(expect_file):
+ ign_file = expect_file + '.ign'
+ if os.path.isfile(ign_file):
+ with open(ign_file, 'r') as f:
+ ign_rules = f.readlines()
+ for ign_rule in ign_rules:
+ if not ign_rule:
+ continue
+ if '\t' in ign_rule:
+ ign_rule, repl = ign_rule.split('\t')
+ repl = repl.strip()
+ else:
+ repl = '*'
+ ir = re.compile(ign_rule)
+ got = repl.join(ir.split(got))
+
+ if update:
+ with open(expect_file, 'w') as f:
+ f.write(got)
+ return True
+
+ with open(expect_file, 'r') as f:
+ expect = f.read()
+
+ if expect != got:
+ udiff(expect, got, expect_file)
+ sys.stderr.write('output mismatch: %r\n'
+ % os.path.basename(expect_file))
+ return False
+ return True
+
+
+script_dir = sys.path[0]
+
+tests = []
+for f in os.listdir(script_dir):
+ file_path = os.path.join(script_dir, f)
+ if not os.path.isfile(file_path):
+ continue
+
+ if not (file_path.endswith('_test.py') or file_path.endswith('_test.sh')):
+ continue
+ tests.append(file_path)
+
+ran = []
+errors = []
+
+for test in sorted(tests):
+
+ if args.testdir_or_test:
+ if not any([t in test for t in args.testdir_or_test]):
+ continue
+
+ ran.append(test)
+
+ success = True
+
+ name, ext = os.path.splitext(test)
+ ok_file = name + '.ok'
+ err_file = name + '.err'
+
+ rc, out, err = run_test(test)
+
+ if rc != 0:
+ sys.stderr.write('%r: returned %d\n' % (os.path.basename(test), rc))
+ success = False
+
+ if not verify_output(out, ok_file, args.update):
+ success = False
+ if not verify_output(err, err_file, args.update):
+ success = False
+
+ if not success:
+ sys.stderr.write('\nTest failed: %r\n\n' % os.path.basename(test))
+ errors.append(test)
+
+if errors:
+ print('%d of %d TESTS FAILED:\n %s' % (len(errors), len(ran), '\n '.join(errors)))
+ exit(1)
+
+print('%d tests ok' % len(ran))
+exit(0)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/cdf_test.ok b/selftest/cdf_test.ok
new file mode 100644
index 0000000..aa753e4
--- /dev/null
+++ b/selftest/cdf_test.ok
@@ -0,0 +1,57 @@
+Testing the immediate CDF
+Done True
+1 1.0 False
+Testing linear with duration
+Done False
+0.0 0.0 True
+Done False
+0.2 0.2 True
+Done False
+0.4 0.4 True
+Done False
+0.6 0.6 True
+Done False
+0.8 0.8 True
+Done True
+1.0 1.0 True
+Testing linear with duration scaled
+Done False
+0.0 0.0 True
+0.0 0.0 True
+Done False
+0.2 0.2 True
+200 200 True
+Done False
+0.4 0.4 True
+400 400 True
+Done False
+0.6 0.6 True
+600 600 True
+Done False
+0.8 0.8 True
+800 800 True
+Done True
+1.0 1.0 True
+100 100 True
+Testing in_out
+0.5 0.5 True
+0.87 0.87 True
+0.9 0.9 True
+0.95 0.95 True
+1.0 1.0 True
+Testing ease In and Out
+Done False
+0.0 0.0 True
+0.0 0.0 True
+Done False
+5.0 5.0 True
+0.1 0.1 True
+Done False
+10.0 10.0 True
+0.5 0.5 True
+Done False
+15.0 15.0 True
+0.8 0.8 True
+Done True
+20.0 20 True
+1.0 1.0 True
diff --git a/selftest/cdf_test.py b/selftest/cdf_test.py
new file mode 100755
index 0000000..8d837c1
--- /dev/null
+++ b/selftest/cdf_test.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+
+import _prep
+
+from osmo_ms_driver import cdf
+from datetime import timedelta
+
+def print_fuzzy_compare(want, expe, len=3):
+ want_str = str(want)[0:len]
+ expe_str = str(expe)[0:len]
+ print(want_str, expe_str, want_str == expe_str)
+
+
+def check_steps(a, steps, fun):
+ print("Done", a.is_done())
+ for step in steps:
+ # Verify we can step
+
+ # Compare and step once
+ fun(a, step)
+ if a.is_done():
+ break
+ a.step_once()
+ print("Done", a.is_done())
+
+def compare_value(a, step):
+ print_fuzzy_compare(a.current_value(), step)
+
+def compare_scaled_value(a, val):
+ (step, scale) = val
+ print_fuzzy_compare(a.current_value(), step)
+ print_fuzzy_compare(a.current_scaled_value(), scale)
+
+def compare_x_value(a, val):
+ (x, step) = val
+ print(a._x, x, x == a._x)
+ print_fuzzy_compare(a.current_value(), step)
+
+def testImmediate():
+ print("Testing the immediate CDF")
+ a = cdf.immediate()
+ print("Done", a.is_done())
+ print_fuzzy_compare(a.current_value(), 1.0)
+
+
+def testLinearWithDuration():
+ print("Testing linear with duration")
+ a = cdf.linear_with_duration(timedelta(seconds=10), step_size=timedelta(seconds=2))
+ steps = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
+ check_steps(a, steps, compare_value)
+
+ print("Testing linear with duration scaled")
+ a = cdf.linear_with_duration(timedelta(seconds=10), step_size=timedelta(seconds=2))
+ a.set_target(1000)
+ steps = [(0.0, 0.0), (0.2, 200), (0.4, 400), (0.6, 600), (0.8, 800), (1.0, 10000)]
+ check_steps(a, steps, compare_scaled_value)
+
+def testInOut():
+ print("Testing in_out")
+ print_fuzzy_compare(cdf._in_out(0.5), 0.5, 3)
+ print_fuzzy_compare(cdf._in_out(0.75), 0.875, 4)
+ print_fuzzy_compare(cdf._in_out(0.8), 0.92, 3)
+ print_fuzzy_compare(cdf._in_out(0.85), 0.955, 4)
+ print_fuzzy_compare(cdf._in_out(1.0), 1.0, 3)
+
+def testEaseInOutDuration():
+ print("Testing ease In and Out")
+ a = cdf.ease_in_out_duration(duration=timedelta(seconds=20), step_size=timedelta(seconds=5))
+ steps = [(0.0, 0.0), (5.0, 0.125), (10.0, 0.5), (15.0, 0.875), (20, 1.0)]
+ check_steps(a, steps, compare_x_value)
+
+testImmediate()
+testLinearWithDuration()
+testInOut()
+testEaseInOutDuration()
diff --git a/selftest/conf/paths.conf b/selftest/conf/paths.conf
new file mode 100644
index 0000000..0b2d035
--- /dev/null
+++ b/selftest/conf/paths.conf
@@ -0,0 +1,2 @@
+state_dir: ./test_work/state_dir
+suites_dir: ./suite_test
diff --git a/selftest/conf/resources.conf b/selftest/conf/resources.conf
new file mode 100644
index 0000000..84d0ab9
--- /dev/null
+++ b/selftest/conf/resources.conf
@@ -0,0 +1,103 @@
+# all hardware and interfaces available to this osmo-gsm-tester
+
+ip_address:
+- addr: 10.42.42.2
+- addr: 10.42.42.3
+- addr: 10.42.42.4
+- addr: 10.42.42.5
+- addr: 10.42.42.6
+
+bts:
+- label: sysmoBTS 1002
+ type: osmo-bts-sysmo
+ ipa_unit_id: 1
+ addr: 10.42.42.114
+ band: GSM-1800
+ direct_pcu: true
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
+
+- label: Ettus B200
+ type: osmo-bts-trx
+ ipa_unit_id: 6
+ addr: 10.42.42.50
+ band: GSM-1800
+ osmo_trx:
+ launch_trx: true
+ clock_reference: external
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
+
+- label: sysmoCell 5000
+ type: osmo-bts-trx
+ ipa_unit_id: 7
+ addr: 10.42.42.51
+ band: GSM-1800
+ osmo_trx:
+ launch_trx: false
+ clock_reference: external
+ trx_ip: 10.42.42.112
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
+
+arfcn:
+ - arfcn: 512
+ band: GSM-1800
+ - arfcn: 514
+ band: GSM-1800
+ - arfcn: 516
+ band: GSM-1800
+ - arfcn: 518
+ band: GSM-1800
+ - arfcn: 520
+ band: GSM-1800
+ - arfcn: 540
+ band: GSM-1900
+ - arfcn: 542
+ band: GSM-1900
+ - arfcn: 544
+ band: GSM-1900
+ - arfcn: 546
+ band: GSM-1900
+ - arfcn: 548
+ band: GSM-1900
+
+modem:
+- label: sierra_1
+ path: '/sierra_1'
+ imsi: '901700000009031'
+ ki: '80A37E6FDEA931EAC92FFA5F671EFEAD'
+ auth_algo: 'comp128v1'
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
+
+- label: sierra_2
+ path: '/sierra_2'
+ imsi: '901700000009029'
+ ki: '00969E283349D354A8239E877F2E0866'
+ auth_algo: 'comp128v1'
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
+
+- label: gobi_0
+ path: '/gobi_0'
+ imsi: '901700000009030'
+ ki: 'BB70807226393CDBAC8DD3439FF54252'
+ auth_algo: 'comp128v1'
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
+
+- label: gobi_3
+ path: '/gobi_3'
+ imsi: '901700000009032'
+ ki: '2F70DCA43C45ACB97E947FDD0C7CA30A'
+ auth_algo: 'comp128v1'
+ ciphers:
+ - 'a5_0'
+ - 'a5_1'
diff --git a/selftest/config_test.err b/selftest/config_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/config_test.err
diff --git a/selftest/config_test.ok b/selftest/config_test.ok
new file mode 100644
index 0000000..e1fb87d
--- /dev/null
+++ b/selftest/config_test.ok
@@ -0,0 +1,112 @@
+{'addr': ['0.0.0.0',
+ '255.255.255.255',
+ '10.11.12.13',
+ '10.0.99.1',
+ '192.168.0.14'],
+ 'bts': [{'addr': '10.42.42.114',
+ 'name': 'sysmoBTS 1002',
+ 'trx': [{'band': 'GSM-1800',
+ 'timeslots': ['CCCH+SDCCH4',
+ 'SDCCH8',
+ 'TCH/F_TCH/H_PDCH',
+ 'TCH/F_TCH/H_PDCH',
+ 'TCH/F_TCH/H_PDCH',
+ 'TCH/F_TCH/H_PDCH',
+ 'TCH/F_TCH/H_PDCH',
+ 'TCH/F_TCH/H_PDCH']},
+ {'band': 'GSM-1900',
+ 'timeslots': ['SDCCH8',
+ 'PDCH',
+ 'PDCH',
+ 'PDCH',
+ 'PDCH',
+ 'PDCH',
+ 'PDCH',
+ 'PDCH']}],
+ 'type': 'sysmobts'}],
+ 'hwaddr': ['ca:ff:ee:ba:aa:be',
+ '00:00:00:00:00:00',
+ 'CA:FF:EE:BA:AA:BE',
+ 'cA:Ff:eE:Ba:aA:Be',
+ 'ff:ff:ff:ff:ff:ff'],
+ 'imsi': ['012345', '012345678', '012345678912345'],
+ 'ki': ['000102030405060708090a0b0c0d0e0f', '000102030405060708090a0b0c0d0e0f'],
+ 'modems': [{'dbus_path': '/sierra_0',
+ 'imsi': '901700000009001',
+ 'ki': 'D620F48487B1B782DA55DF6717F08FF9',
+ 'msisdn': '7801'},
+ {'dbus_path': '/sierra_1',
+ 'imsi': '901700000009002',
+ 'ki': 'D620F48487B1B782DA55DF6717F08FF9',
+ 'msisdn': '7802'}]}
+- expect validation success:
+Validation: OK
+- unknown item:
+--- -: ERR: ValueError: config item not known: 'bts[].unknown_item'
+Validation: Error
+- wrong type modems[].imsi:
+--- -: ERR: ValueError: config item is dict but should be a leaf node of type 'imsi': 'modems[].imsi'
+Validation: Error
+- invalid key with space:
+--- -: ERR: ValueError: invalid config key: 'imsi '
+Validation: Error
+- list instead of dict:
+--- -: ERR: ValueError: config item not known: 'a_dict[]'
+Validation: Error
+- unknown band:
+--- bts[].trx[].band: ERR: ValueError: Unknown GSM band: 'what'
+Validation: Error
+- invalid v4 addrs:
+--- addr[]: ERR: ValueError: Invalid IPv4 address: '1.2.3'
+Validation: Error
+--- addr[]: ERR: ValueError: Invalid IPv4 address: '1.2.3 .4'
+Validation: Error
+--- addr[]: ERR: ValueError: Invalid IPv4 address: '91.2.3'
+Validation: Error
+--- addr[]: ERR: ValueError: Invalid IPv4 address: 'go away'
+Validation: Error
+--- addr[]: ERR: ValueError: Invalid IPv4 address: ''
+Validation: Error
+--- addr[]: ERR: ValueError: Invalid IPv4 address: None
+Validation: Error
+- invalid hw addrs:
+--- hwaddr[]: ERR: ValueError: Invalid hardware address: '1.2.3'
+Validation: Error
+--- hwaddr[]: ERR: ValueError: Invalid hardware address: '0b:0c:0d:0e:0f:0g'
+Validation: Error
+--- hwaddr[]: ERR: ValueError: Invalid hardware address: '0b:0c:0d:0e : 0f:0f'
+Validation: Error
+--- hwaddr[]: ERR: ValueError: Invalid hardware address: 'go away'
+Validation: Error
+--- hwaddr[]: ERR: ValueError: Invalid hardware address: ''
+Validation: Error
+--- hwaddr[]: ERR: ValueError: Invalid hardware address: None
+Validation: Error
+- invalid imsis:
+--- imsi[]: ERR: ValueError: Invalid IMSI: '99999999x9'
+Validation: Error
+--- imsi[]: ERR: ValueError: Invalid IMSI: '123 456 789 123'
+Validation: Error
+--- imsi[]: ERR: ValueError: Invalid IMSI: 'go away'
+Validation: Error
+--- imsi[]: ERR: ValueError: Invalid IMSI: ''
+Validation: Error
+--- imsi[]: ERR: ValueError: Invalid IMSI: None
+Validation: Error
+- Combine dicts:
+- Combine dicts 2:
+- Combine lists:
+- Combine lists 2:
+- Combine lists 3:
+ValueError expected
+- Combine lists 4:
+ValueError expected
+- Combine lists 5:
+ValueError expected
+- Combine lists 6:
+- Combine lists 7:
+- Combine lists 8:
+- Combine lists 9:
+- Combine lists 10:
+- Combine lists 13:
+- Combine lists 14:
diff --git a/selftest/config_test.py b/selftest/config_test.py
new file mode 100755
index 0000000..fa86adc
--- /dev/null
+++ b/selftest/config_test.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python3
+
+import _prep
+
+import sys
+import os
+import io
+import pprint
+import copy
+
+from osmo_gsm_tester import config, log, schema
+
+example_config_file = 'test.cfg'
+example_config = os.path.join(_prep.script_dir, 'config_test', example_config_file)
+cfg = config.read(example_config)
+
+pprint.pprint(cfg, width=81)
+
+test_schema = {
+ 'modems[].dbus_path': schema.STR,
+ 'modems[].msisdn': schema.STR,
+ 'modems[].imsi': schema.IMSI,
+ 'modems[].ki': schema.STR,
+ 'bts[].name' : schema.STR,
+ 'bts[].type' : schema.STR,
+ 'bts[].addr' : schema.STR,
+ 'bts[].trx[].timeslots[]' : schema.STR,
+ 'bts[].trx[].band' : schema.BAND,
+ 'a_dict.foo' : schema.INT,
+ 'addr[]' : schema.IPV4,
+ 'hwaddr[]' : schema.HWADDR,
+ 'imsi[]' : schema.IMSI,
+ 'ki[]' : schema.KI,
+ }
+
+def val(which):
+ try:
+ schema.validate(which, test_schema)
+ print('Validation: OK')
+ except ValueError:
+ log.log_exn()
+ print('Validation: Error')
+
+print('- expect validation success:')
+val(cfg)
+
+print('- unknown item:')
+c = copy.deepcopy(cfg)
+c['bts'][0]['unknown_item'] = 'no'
+val(c)
+
+print('- wrong type modems[].imsi:')
+c = copy.deepcopy(cfg)
+c['modems'][0]['imsi'] = {'no':'no'}
+val(c)
+
+print('- invalid key with space:')
+c = copy.deepcopy(cfg)
+c['modems'][0]['imsi '] = '12345'
+val(c)
+
+print('- list instead of dict:')
+c = copy.deepcopy(cfg)
+c['a_dict'] = [ 1, 2, 3 ]
+val(c)
+
+print('- unknown band:')
+c = copy.deepcopy(cfg)
+c['bts'][0]['trx'][0]['band'] = 'what'
+val(c)
+
+print('- invalid v4 addrs:')
+c = copy.deepcopy(cfg)
+c['addr'][3] = '1.2.3'
+val(c)
+c['addr'][3] = '1.2.3 .4'
+val(c)
+c['addr'][3] = '91.2.3'
+val(c)
+c['addr'][3] = 'go away'
+val(c)
+c['addr'][3] = ''
+val(c)
+c['addr'][3] = None
+val(c)
+
+print('- invalid hw addrs:')
+c = copy.deepcopy(cfg)
+c['hwaddr'][3] = '1.2.3'
+val(c)
+c['hwaddr'][3] = '0b:0c:0d:0e:0f:0g'
+val(c)
+c['hwaddr'][3] = '0b:0c:0d:0e : 0f:0f'
+val(c)
+c['hwaddr'][3] = 'go away'
+val(c)
+c['hwaddr'][3] = ''
+val(c)
+c['hwaddr'][3] = None
+val(c)
+
+print('- invalid imsis:')
+c = copy.deepcopy(cfg)
+c['imsi'][2] = '99999999x9'
+val(c)
+c['imsi'][2] = '123 456 789 123'
+val(c)
+c['imsi'][2] = 'go away'
+val(c)
+c['imsi'][2] = ''
+val(c)
+c['imsi'][2] = None
+val(c)
+
+print('- Combine dicts:')
+a = {'times': '2'}
+b = {'type': 'osmo-bts-trx'}
+res = {'times': '2', 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine dicts 2:')
+a = {'times': '1', 'label': 'foo', 'type': 'osmo-bts-trx'}
+b = {'type': 'osmo-bts-trx'}
+res = {'times': '1', 'label': 'foo', 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists:')
+a = { 'a_list': ['x', 'y', 'z'] }
+b = { 'a_list': ['y'] }
+res = {'a_list': ['x', 'y', 'z']}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 2:')
+a = { 'a_list': ['x'] }
+b = { 'a_list': ['w', 'u', 'x', 'y', 'z'] }
+res = {'a_list': ['x', 'w', 'u', 'y', 'z']}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 3:')
+a = { 'a_list': ['x', 3] }
+b = { 'a_list': ['y', 'z'] }
+try:
+ config.combine(a, b)
+except ValueError:
+ print("ValueError expected")
+
+print('- Combine lists 4:')
+a = { 'a_list': [2, 3] }
+b = { 'a_list': ['y', 'z'] }
+try:
+ config.combine(a, b)
+except ValueError:
+ print("ValueError expected")
+
+print('- Combine lists 5:')
+a = { 'a_list': [{}, {}] }
+b = { 'a_list': ['y', 'z'] }
+try:
+ config.combine(a, b)
+except ValueError:
+ print("ValueError expected")
+
+print('- Combine lists 6:')
+a = { 'a_list': [{}, {}] }
+b = { 'a_list': [{}] }
+res = {'a_list': [{}, {}]}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 7:')
+a = { 'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}] }
+b = { 'type': 'osmo-bts-trx', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}] }
+res = {'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}], 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 8:')
+a = { 'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}] }
+b = { 'type': 'osmo-bts-trx', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}] }
+res = {'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}], 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 9:')
+a = { 'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}] }
+b = { 'type': 'osmo-bts-trx', 'trx': [{'nominal power': '10'}] }
+res = {'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}], 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 10:')
+a = { 'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}] }
+b = { 'type': 'osmo-bts-trx', 'trx': [{}, {'nominal power': '12'}] }
+res = {'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}], 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 13:')
+a = { 'times': '1', 'label': 'foo', 'trx': [{}, {'nominal power': '12'}] }
+b = { 'type': 'osmo-bts-trx', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}] }
+res = {'times': '1', 'label': 'foo', 'trx': [{'nominal power': '10'}, {'nominal power': '12'}], 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+print('- Combine lists 14:')
+a = { 'times': '1', 'label': 'foo', 'trx': [] }
+b = { 'type': 'osmo-bts-trx', 'trx': [] }
+res = {'times': '1', 'label': 'foo', 'trx': [], 'type': 'osmo-bts-trx'}
+config.combine(a, b)
+assert a == res
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/config_test/test.cfg b/selftest/config_test/test.cfg
new file mode 100644
index 0000000..cc62182
--- /dev/null
+++ b/selftest/config_test/test.cfg
@@ -0,0 +1,59 @@
+modems:
+
+- dbus_path: /sierra_0
+ msisdn: 7801
+ imsi: 901700000009001
+ ki: D620F48487B1B782DA55DF6717F08FF9
+
+- dbus_path: /sierra_1
+ msisdn: '7802'
+ imsi: '901700000009002'
+ ki: D620F48487B1B782DA55DF6717F08FF9
+
+# comment
+BTS:
+
+- name: sysmoBTS 1002
+ TYPE: sysmobts
+ addr: 10.42.42.114
+ trx:
+ - timeslots:
+ - CCCH+SDCCH4
+ - SDCCH8
+ - TCH/F_TCH/H_PDCH
+ - TCH/F_TCH/H_PDCH
+ - TCH/F_TCH/H_PDCH
+ - TCH/F_TCH/H_PDCH
+ - TCH/F_TCH/H_PDCH
+ - TCH/F_TCH/H_PDCH
+ band: GSM-1800
+ - timeslots:
+ - SDCCH8
+ - PDCH
+ - PDCH
+ - PDCH
+ - PDCH
+ - PDCH
+ - PDCH
+ - PDCH
+ band: GSM-1900
+
+addr:
+- 0.0.0.0
+- 255.255.255.255
+- 10.11.12.13
+- 10.0.99.1
+- 192.168.0.14
+hwaddr:
+- ca:ff:ee:ba:aa:be
+- 00:00:00:00:00:00
+- CA:FF:EE:BA:AA:BE
+- cA:Ff:eE:Ba:aA:Be
+- ff:ff:ff:ff:ff:ff
+imsi:
+- '012345'
+- '012345678'
+- '012345678912345'
+ki:
+- 000102030405060708090a0b0c0d0e0f
+- 000102030405060708090a0b0c0d0e0f
diff --git a/selftest/dbus_test/dbus_server.py b/selftest/dbus_test/dbus_server.py
new file mode 100755
index 0000000..222b28b
--- /dev/null
+++ b/selftest/dbus_test/dbus_server.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+
+# Based on http://stackoverflow.com/questions/22390064/use-dbus-to-just-send-a-message-in-python
+
+# Python DBUS Test Server
+# runs until the Quit() method is called via DBUS
+
+from gi.repository import GLib
+from pydbus import SessionBus
+
+loop = GLib.MainLoop()
+
+class MyDBUSService(object):
+ """
+ <node>
+ <interface name='net.lew21.pydbus.ClientServerExample'>
+ <method name='Hello'>
+ <arg type='s' name='response' direction='out'/>
+ </method>
+ <method name='EchoString'>
+ <arg type='s' name='a' direction='in'/>
+ <arg type='s' name='response' direction='out'/>
+ </method>
+ <method name='Quit'/>
+ </interface>
+ </node>
+ """
+
+ def Hello(self):
+ """returns the string 'Hello, World!'"""
+ return "Hello, World!"
+
+ def EchoString(self, s):
+ """returns whatever is passed to it"""
+ return s
+
+ def Quit(self):
+ """removes this object from the DBUS connection and exits"""
+ loop.quit()
+
+bus = SessionBus()
+bus.publish("net.lew21.pydbus.ClientServerExample", MyDBUSService())
+loop.run()
+
diff --git a/selftest/dbus_test/ofono_client.py b/selftest/dbus_test/ofono_client.py
new file mode 100755
index 0000000..6b60f98
--- /dev/null
+++ b/selftest/dbus_test/ofono_client.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+
+'''
+Power on and off some modem on ofono, while running the glib main loop in a
+thread and receiving modem state changes by dbus signals.
+'''
+
+from pydbus import SystemBus, Variant
+import time
+import threading
+import pprint
+
+from gi.repository import GLib
+loop = GLib.MainLoop()
+
+def propchanged(*args, **kwargs):
+ print('-> PROP CHANGED: %r %r' % (args, kwargs))
+
+class GlibMainloop(threading.Thread):
+ def run(self):
+ loop.run()
+
+ml = GlibMainloop()
+ml.start()
+
+try:
+ bus = SystemBus()
+
+ print('\n- list modems')
+ root = bus.get("org.ofono", '/')
+ print(root.Introspect())
+ modems = sorted(root.GetModems())
+ pprint.pprint(modems)
+
+ first_modem_path = modems[0][0]
+ print('\n- first modem %r' % first_modem_path)
+ modem = bus.get("org.ofono", first_modem_path)
+ modem.PropertyChanged.connect(propchanged)
+
+ print(modem.Introspect())
+ print(modem.GetProperties())
+
+ print('\n- set Powered = True')
+ modem.SetProperty('Powered', Variant('b', True))
+ print('call returned')
+ print(modem.GetProperties())
+
+ time.sleep(1)
+
+ print('\n- set Powered = False')
+ modem.SetProperty('Powered', Variant('b', False))
+ print('call returned')
+
+ print(modem.GetProperties())
+finally:
+ loop.quit()
+ml.join()
diff --git a/selftest/dbus_test/ofono_client_one_thread.py b/selftest/dbus_test/ofono_client_one_thread.py
new file mode 100644
index 0000000..96d54bc
--- /dev/null
+++ b/selftest/dbus_test/ofono_client_one_thread.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+
+'''
+Power on and off some modem on ofono, while running the glib main loop in a
+thread and receiving modem state changes by dbus signals.
+'''
+
+from pydbus import SystemBus, Variant
+import time
+import pprint
+
+from gi.repository import GLib
+glib_main_loop = GLib.MainLoop()
+glib_main_ctx = glib_main_loop.get_context()
+
+def propchanged(*args, **kwargs):
+ print('-> PROP CHANGED: %r %r' % (args, kwargs))
+
+
+def pump():
+ global glib_main_ctx
+ print('pump?')
+ while glib_main_ctx.pending():
+ print('* pump')
+ glib_main_ctx.iteration()
+
+def wait(condition):
+ pump()
+ while not condition():
+ time.sleep(.1)
+ pump()
+
+bus = SystemBus()
+
+print('\n- list modems')
+root = bus.get("org.ofono", '/')
+print(root.Introspect())
+modems = sorted(root.GetModems())
+pprint.pprint(modems)
+pump()
+
+first_modem_path = modems[0][0]
+print('\n- first modem %r' % first_modem_path)
+modem = bus.get("org.ofono", first_modem_path)
+modem.PropertyChanged.connect(propchanged)
+
+print(modem.Introspect())
+print(modem.GetProperties())
+
+print('\n- set Powered = True')
+modem.SetProperty('Powered', Variant('b', True))
+print('call returned')
+print('- pump dbus events')
+pump()
+pump()
+print('sleep 1')
+time.sleep(1)
+pump()
+
+
+print('- modem properties:')
+print(modem.GetProperties())
+
+
+print('\n- set Powered = False')
+modem.SetProperty('Powered', Variant('b', False))
+print('call returned')
+
+print(modem.GetProperties())
+
+# vim: tabstop=4 shiftwidth=4 expandtab
diff --git a/selftest/lock_test.err b/selftest/lock_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/lock_test.err
diff --git a/selftest/lock_test.ok b/selftest/lock_test.ok
new file mode 100644
index 0000000..011a89c
--- /dev/null
+++ b/selftest/lock_test.ok
@@ -0,0 +1,14 @@
+creating files
+launch a program that locks a given file, it will create $dir/lock_test
+wait until this lock_test lock file was created by program
+expecting the lock file to reflect "long name"
+launched first, locked by: 'long name'
+launching second program, should find the lock intact and wait
+launched second, locked by: 'long name'
+drop the first lock, $f1 removal signals the first process to stop locking
+wait for first program to carry out the lock release
+now expecting second program to lock
+waited, locked by: 'shorter'
+release the second program also
+expecting the lock to be gone
+waited more, locked by: ''
diff --git a/selftest/lock_test.sh b/selftest/lock_test.sh
new file mode 100755
index 0000000..4f3f7ad
--- /dev/null
+++ b/selftest/lock_test.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+echo 'creating files'
+dir="$(mktemp -d)"
+n1="long name"
+f1="$dir/$n1"
+touch "$f1"
+n2="shorter"
+f2="$dir/$n2"
+touch "$f2"
+sync
+
+echo 'launch a program that locks a given file, it will create $dir/lock_test'
+python3 ./lock_test_help.py "$dir" "$n1" &
+
+echo 'wait until this lock_test lock file was created by program'
+while [ ! -f "$dir/lock_test" ]; do
+ sleep .1
+done
+sync
+
+echo 'expecting the lock file to reflect "long name"'
+echo "launched first, locked by: '$(cat "$dir/lock_test")'"
+
+echo 'launching second program, should find the lock intact and wait'
+python3 ./lock_test_help.py "$dir" "$n2" &
+while [ ! -f "$f2.ready" ]; do
+ sleep .1
+done
+sleep 1
+sync
+echo "launched second, locked by: '$(cat "$dir/lock_test")'"
+
+echo 'drop the first lock, $f1 removal signals the first process to stop locking'
+rm "$f1"
+
+echo 'wait for first program to carry out the lock release'
+while [ ! -f "$f1.done" ]; do
+ sleep .1
+done
+
+echo 'now expecting second program to lock'
+echo "waited, locked by: '$(cat "$dir/lock_test")'"
+
+echo 'release the second program also'
+rm "$f2"
+while [ ! -f "$f2.done" ]; do
+ sleep .1
+done
+
+echo 'expecting the lock to be gone'
+echo "waited more, locked by: '$(cat "$dir/lock_test")'"
+rm -rf "$dir"
diff --git a/selftest/lock_test_help.py b/selftest/lock_test_help.py
new file mode 100644
index 0000000..f4e1f9b
--- /dev/null
+++ b/selftest/lock_test_help.py
@@ -0,0 +1,25 @@
+import sys
+import time
+import os
+
+import _prep
+
+from osmo_gsm_tester.util import FileLock, touch_file
+
+testdir, name = sys.argv[1:]
+stop_signalling_file = os.path.join(testdir, name)
+if not os.path.isfile(stop_signalling_file):
+ print('expected a stop-file %r' % stop_signalling_file)
+ exit(1)
+
+lockfile_path = os.path.join(testdir, 'lock_test')
+fl = FileLock(lockfile_path, name)
+
+touch_file(stop_signalling_file + '.ready')
+
+with fl:
+ while os.path.exists(stop_signalling_file):
+ time.sleep(.1)
+touch_file(stop_signalling_file + '.done')
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/log_test.err b/selftest/log_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/log_test.err
diff --git a/selftest/log_test.ok b/selftest/log_test.ok
new file mode 100644
index 0000000..c9d2dd2
--- /dev/null
+++ b/selftest/log_test.ok
@@ -0,0 +1,40 @@
+- Testing global log functions
+01:02:03 tst <origin>: from log.log()
+01:02:03 tst <origin>: DBG: from log.dbg()
+01:02:03 tst <origin>: ERR: from log.err()
+- Testing log.Origin functions
+01:02:03 tst some-name(some='detail'): hello log
+01:02:03 tst some-name(some='detail'): ERR: hello err
+01:02:03 tst some-name(some='detail'): message {int=3, none=None, str='str\n', tuple=('foo', 42)}
+01:02:03 tst some-name(some='detail'): DBG: hello dbg
+- Testing log.style()
+01:02:03: only time
+tst: only category
+DBG: only level
+ some-name(some='detail'): only origin
+only src [log_test.py:69]
+- Testing log.style_change()
+no log format
+01:02:03: add time
+but no time format
+01:02:03: DBG: add level
+01:02:03 tst: DBG: add category
+01:02:03 tst: DBG: add src [log_test.py:84]
+01:02:03 tst some-name(some='detail'): DBG: add origin [log_test.py:86]
+- Testing origin_width
+01:02:03 tst shortname: origin str set to 23 chars [log_test.py:91]
+01:02:03 tst very long name(and_some=(3, 'things', 'in a tuple'), some='details'): long origin str [log_test.py:93]
+01:02:03 tst very long name(and_some=(3, 'things', 'in a tuple'), some='details'): DBG: long origin str dbg [log_test.py:94]
+01:02:03 tst very long name(and_some=(3, 'things', 'in a tuple'), some='details'): ERR: long origin str err [log_test.py:95]
+- Testing log.Origin with omitted info
+01:02:03 tst LogTest: hello log, name implicit from class name [log_test.py:99]
+01:02:03 --- explicit_name: hello log, no category set [log_test.py:103]
+01:02:03 --- LogTest: hello log, no category nor name set [log_test.py:107]
+01:02:03 --- LogTest: DBG: debug message, no category nor name set [log_test.py:110]
+- Testing logging of Exceptions, tracing origins
+nested print just prints
+01:02:03 tst level3: nested log() [level1↪level2↪level3] [log_test.py:132]
+01:02:03 tst level2: nested l2 log() from within l3 scope [level1↪level2] [log_test.py:133]
+01:02:03 tst level3: ERR: ValueError: bork [level1↪level2↪level3] [log_test.py:134: raise ValueError('bork')]
+- Disallow origin loops
+disallowed successfully
diff --git a/selftest/log_test.py b/selftest/log_test.py
new file mode 100755
index 0000000..889a8a5
--- /dev/null
+++ b/selftest/log_test.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+
+# osmo_gsm_tester: logging tests
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import _prep
+
+import sys
+import os
+
+from osmo_gsm_tester import log
+
+#log.targets[0].get_time_str = lambda: '01:02:03'
+fake_time = '01:02:03'
+log.style_change(time=True, time_fmt=fake_time)
+log.set_all_levels(None)
+
+print('- Testing global log functions')
+log.log('from log.log()', _origin='<origin>', _category=log.C_TST)
+log.dbg('from log.dbg(), not seen', _origin='<origin>', _category=log.C_TST)
+log.set_level(log.C_TST, log.L_DBG)
+log.dbg('from log.dbg()', _origin='<origin>', _category=log.C_TST)
+log.set_level(log.C_TST, log.L_LOG)
+log.err('from log.err()', _origin='<origin>', _category=log.C_TST)
+
+print('- Testing log.Origin functions')
+class LogTest(log.Origin):
+ def __init__(self, *name_items, **detail_items):
+ super().__init__(log.C_TST, *name_items, **detail_items)
+
+t = LogTest('some', 'name', some="detail")
+
+t.log("hello log")
+t.err("hello err")
+t.dbg("hello dbg not visible")
+
+t.log("message", int=3, tuple=('foo', 42), none=None, str='str\n')
+
+log.set_level(log.C_TST, log.L_DBG)
+t.dbg("hello dbg")
+
+print('- Testing log.style()')
+
+log.style(time=True, category=False, level=False, origin=False, src=False, time_fmt=fake_time)
+t.dbg("only time")
+log.style(time=False, category=True, level=False, origin=False, src=False, time_fmt=fake_time)
+t.dbg("only category")
+log.style(time=False, category=False, level=True, origin=False, src=False, time_fmt=fake_time)
+t.dbg("only level")
+log.style(time=False, category=False, level=False, origin=True, src=False, time_fmt=fake_time)
+t.dbg("only origin")
+log.style(time=False, category=False, level=False, origin=False, src=True, time_fmt=fake_time)
+t.dbg("only src")
+
+print('- Testing log.style_change()')
+log.style(time=False, category=False, level=False, origin=False, src=False, time_fmt=fake_time)
+t.dbg("no log format")
+log.style_change(time=True)
+t.dbg("add time")
+log.style_change(time=True, time_fmt=0)
+t.dbg("but no time format")
+log.style_change(time=True, time_fmt=fake_time)
+log.style_change(level=True)
+t.dbg("add level")
+log.style_change(category=True)
+t.dbg("add category")
+log.style_change(src=True)
+t.dbg("add src")
+log.style_change(origin=True)
+t.dbg("add origin")
+
+print('- Testing origin_width')
+t = LogTest('shortname')
+log.style(origin_width=23, time_fmt=fake_time)
+t.log("origin str set to 23 chars")
+t.set_name('very long name', some='details', and_some=(3, 'things', 'in a tuple'))
+t.log("long origin str")
+t.dbg("long origin str dbg")
+t.err("long origin str err")
+
+print('- Testing log.Origin with omitted info')
+t = LogTest()
+t.log("hello log, name implicit from class name")
+
+t = LogTest('explicit_name')
+t._set_log_category(None)
+t.log("hello log, no category set")
+
+t = LogTest()
+t._set_log_category(None)
+t.log("hello log, no category nor name set")
+t.dbg("hello log, no category nor name set, not seen")
+log.set_level(log.C_DEFAULT, log.L_DBG)
+t.dbg("debug message, no category nor name set")
+
+print('- Testing logging of Exceptions, tracing origins')
+log.style(time_fmt=fake_time, origin_width=0)
+
+class Thing(log.Origin):
+ def __init__(self, some_path):
+ super().__init__(log.C_TST, some_path)
+
+ def say(self, msg):
+ print(msg)
+
+ def l1(self):
+ level2 = Thing('level2')
+ level2.l2()
+
+ def l2(self):
+ level3 = Thing('level3')
+ level3.l3(self)
+
+ def l3(self, level2):
+ print('nested print just prints')
+ self.log('nested log()')
+ level2.log('nested l2 log() from within l3 scope')
+ raise ValueError('bork')
+
+try:
+ level1 = Thing('level1')
+ level1.l1()
+except Exception:
+ log.log_exn()
+
+print('- Disallow origin loops')
+try:
+ t = Thing('foo')
+ t._set_parent(t)
+ raise RuntimeError('this should not be reached')
+except log.OriginLoopError:
+ print('disallowed successfully')
+ pass
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/misc.py b/selftest/misc.py
new file mode 100755
index 0000000..e57a48c
--- /dev/null
+++ b/selftest/misc.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+
+msisdn = '0000'
+
+l = len(msisdn)
+next_msisdn = ('%%0%dd' % l) % (int(msisdn) + 1)
+print(next_msisdn)
diff --git a/selftest/process_test.err b/selftest/process_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/process_test.err
diff --git a/selftest/process_test.ok b/selftest/process_test.ok
new file mode 100644
index 0000000..f168ee3
--- /dev/null
+++ b/selftest/process_test.ok
@@ -0,0 +1,33 @@
+run foo: DBG: cd '[TMP]'; PATH=[$PATH] foo.py arg1 arg2
+run foo: DBG: [TMP]/stdout
+run foo: DBG: [TMP]/stderr
+run foo(pid=[PID]): Launched
+stdout:
+(launched: [DATETIME])
+foo stdout
+[[$0], 'arg1', 'arg2']
+
+stderr:
+(launched: [DATETIME])
+foo stderr
+
+run foo(pid=[PID]): Terminating (SIGINT)
+run foo(pid=[PID]): DBG: Cleanup
+run foo(pid=[PID]): Terminated {rc=1}
+result: 1
+stdout:
+(launched: [DATETIME])
+foo stdout
+[[$0], 'arg1', 'arg2']
+Exiting (stdout)
+
+stderr:
+(launched: [DATETIME])
+foo stderr
+Traceback (most recent call last):
+ File [$0], line [LINE], in <module>
+ time.sleep(1)
+KeyboardInterrupt
+Exiting (stderr)
+
+done.
diff --git a/selftest/process_test.ok.ign b/selftest/process_test.ok.ign
new file mode 100644
index 0000000..b817f5a
--- /dev/null
+++ b/selftest/process_test.ok.ign
@@ -0,0 +1,7 @@
+PATH='[^']*' PATH=[$PATH]
+/tmp/[^/ '"]* [TMP]
+pid=[0-9]* pid=[PID]
+....-..-.._..:..:..\....... [DATETIME]
+'[^']*/selftest/process_test/foo.py' [$0]
+"[^"]*/selftest/process_test/foo.py" [$0]
+, line [0-9]* , line [LINE]
diff --git a/selftest/process_test.py b/selftest/process_test.py
new file mode 100755
index 0000000..71523c9
--- /dev/null
+++ b/selftest/process_test.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+
+import _prep
+import time
+import os
+
+from osmo_gsm_tester import process, util, log
+
+tmpdir = util.Dir(util.get_tempdir())
+
+dollar_path = '%s:%s' % (
+ os.path.join(os.getcwd(), 'process_test'),
+ os.getenv('PATH'))
+
+p = process.Process('foo', tmpdir, ('foo.py', 'arg1', 'arg2'),
+ env={'PATH': dollar_path})
+
+p.launch()
+time.sleep(.5)
+p.poll()
+print('stdout:')
+print(p.get_stdout())
+print('stderr:')
+print(p.get_stderr())
+
+assert not p.terminated()
+p.terminate()
+assert p.terminated()
+print('result: %r' % p.result)
+
+print('stdout:')
+print(p.get_stdout())
+print('stderr:')
+print(p.get_stderr())
+print('done.')
+
+test_ssh = True
+test_ssh = False
+if test_ssh:
+ # this part of the test requires ability to ssh to localhost
+ p = process.RemoteProcess('ssh-test', '/tmp', os.getenv('USER'), 'localhost', tmpdir,
+ ('ls', '-al'))
+ p.launch()
+ p.wait()
+ assert p.terminated()
+ print('stdout:')
+ print(p.get_stdout())
+ print('stderr:')
+ print(p.get_stderr())
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/process_test/foo.py b/selftest/process_test/foo.py
new file mode 100755
index 0000000..4abe887
--- /dev/null
+++ b/selftest/process_test/foo.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+import sys
+import atexit
+import time
+
+
+sys.stdout.write('foo stdout\n')
+sys.stderr.write('foo stderr\n')
+
+print(repr(sys.argv))
+sys.stdout.flush()
+sys.stderr.flush()
+
+def x():
+ sys.stdout.write('Exiting (stdout)\n')
+ sys.stdout.flush()
+ sys.stderr.write('Exiting (stderr)\n')
+ sys.stderr.flush()
+atexit.register(x)
+
+while True:
+ time.sleep(1)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/py_import_test/invocation.py b/selftest/py_import_test/invocation.py
new file mode 100755
index 0000000..ad58b80
--- /dev/null
+++ b/selftest/py_import_test/invocation.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+
+import support
+import importlib.util
+
+if hasattr(importlib.util, 'module_from_spec'):
+ def run_test(path):
+ print('py 3.5+')
+ spec = importlib.util.spec_from_file_location("tests.script", path)
+ spec.loader.exec_module( importlib.util.module_from_spec(spec) )
+else:
+ def run_test(path):
+ print('py 3.4-')
+ from importlib.machinery import SourceFileLoader
+ SourceFileLoader("tests.script", path).load_module()
+
+path = './subdir/script.py'
+
+support.config = 'specifics'
+run_test(path)
+
+support.config = 'specifics2'
+run_test(path)
+
diff --git a/selftest/py_import_test/subdir/script.py b/selftest/py_import_test/subdir/script.py
new file mode 100644
index 0000000..1b57c20
--- /dev/null
+++ b/selftest/py_import_test/subdir/script.py
@@ -0,0 +1,9 @@
+from support import *
+
+print('hello')
+
+def run(what):
+ print(what)
+ print(what)
+
+run(config)
diff --git a/selftest/py_import_test/support.py b/selftest/py_import_test/support.py
new file mode 100644
index 0000000..aceedb8
--- /dev/null
+++ b/selftest/py_import_test/support.py
@@ -0,0 +1,2 @@
+
+config = None
diff --git a/selftest/resource_test.err b/selftest/resource_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/resource_test.err
diff --git a/selftest/resource_test.ok b/selftest/resource_test.ok
new file mode 100644
index 0000000..08989a2
--- /dev/null
+++ b/selftest/resource_test.ok
@@ -0,0 +1,289 @@
+- expect solutions:
+[0, 1, 2]
+[0, 1, 2]
+[1, 0, 2]
+[1, 2, 0]
+- expect failure to solve:
+The requested resource requirements are not solvable [[0, 2], [2], [0, 2]]
+- test removing a Resources list from itself
+ok, caused exception: RuntimeError('Refusing to drop a list of resources from itself. This is probably a bug where a list of Resources() should have been copied but is passed as-is. use Resources.clear() instead.',)
+- test removing a Resources list from one with the same list in it
+- test resources config and state dir:
+cnf -: DBG: Found config file paths.conf as [PATH]/selftest/conf/paths.conf in ./conf which is [PATH]/selftest/conf
+cnf -: DBG: [PATH]/selftest/conf/paths.conf: relative path ./test_work/state_dir is [PATH]/selftest/conf/test_work/state_dir
+cnf -: DBG: [PATH]/selftest/conf/paths.conf: relative path ./suite_test is [PATH]/selftest/conf/suite_test
+cnf -: DBG: Found path state_dir as [PATH]/selftest/conf/test_work/state_dir
+cnf ResourcesPool: DBG: Found config file resources.conf as [PATH]/selftest/conf/resources.conf in ./conf which is [PATH]/selftest/conf
+cnf ResourcesPool: DBG: Found path state_dir as [PATH]/selftest/conf/test_work/state_dir
+*** all resources:
+{'arfcn': [{'_hash': 'e620569450f8259b3f0212ec19c285dd07df063c',
+ 'arfcn': '512',
+ 'band': 'GSM-1800'},
+ {'_hash': '022621e513c5a5bf33b77430a1e9c886be676fa1',
+ 'arfcn': '514',
+ 'band': 'GSM-1800'},
+ {'_hash': '3199abf375a1dd899e554e9d63a552e06d7f38bf',
+ 'arfcn': '516',
+ 'band': 'GSM-1800'},
+ {'_hash': '57aa7bd1da62495f2857ae6b859193dd592a0a02',
+ 'arfcn': '518',
+ 'band': 'GSM-1800'},
+ {'_hash': '53dd2e2682b736f427abd2ce59a9a50ca8130678',
+ 'arfcn': '520',
+ 'band': 'GSM-1800'},
+ {'_hash': '31687a5e6d5140a4b3877606ca5f18244f11d706',
+ 'arfcn': '540',
+ 'band': 'GSM-1900'},
+ {'_hash': '1def43a5c88a83cdb21279eacab0679ea08ffaf3',
+ 'arfcn': '542',
+ 'band': 'GSM-1900'},
+ {'_hash': '1d6e3b08a3861fd4d748f111295ec5a93ecd3d23',
+ 'arfcn': '544',
+ 'band': 'GSM-1900'},
+ {'_hash': '8fb36927de15466fcdbee01f7f65704c312cb36c',
+ 'arfcn': '546',
+ 'band': 'GSM-1900'},
+ {'_hash': 'dc9ce027a257da087f31a5bc1ee6b4abd2637369',
+ 'arfcn': '548',
+ 'band': 'GSM-1900'}],
+ 'bts': [{'_hash': 'd2aa7c1124943de352351b650ca0c751784da6b6',
+ 'addr': '10.42.42.114',
+ 'band': 'GSM-1800',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'direct_pcu': 'True',
+ 'ipa_unit_id': '1',
+ 'label': 'sysmoBTS 1002',
+ 'type': 'osmo-bts-sysmo'},
+ {'_hash': '2158317d5e0055070e7174c2498dedf53a2957e9',
+ 'addr': '10.42.42.50',
+ 'band': 'GSM-1800',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'ipa_unit_id': '6',
+ 'label': 'Ettus B200',
+ 'osmo_trx': {'clock_reference': 'external', 'launch_trx': 'True'},
+ 'type': 'osmo-bts-trx'},
+ {'_hash': 'f964ba5fe7a37b97ec3e0c4ef21c9231a19de45d',
+ 'addr': '10.42.42.51',
+ 'band': 'GSM-1800',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'ipa_unit_id': '7',
+ 'label': 'sysmoCell 5000',
+ 'osmo_trx': {'clock_reference': 'external',
+ 'launch_trx': 'False',
+ 'trx_ip': '10.42.42.112'},
+ 'type': 'osmo-bts-trx'}],
+ 'ip_address': [{'_hash': 'fd103b22c7cf2480d609150e06f4bbd92ac78d8c',
+ 'addr': '10.42.42.2'},
+ {'_hash': '1c614d6210c551d142aadca8f25e1534ebb2a70f',
+ 'addr': '10.42.42.3'},
+ {'_hash': '862b529c701adf302477bc126a8032cfc2ec4753',
+ 'addr': '10.42.42.4'},
+ {'_hash': '05feb6e1e24ca4235889eb28d0a8d5cedac6e5d1',
+ 'addr': '10.42.42.5'},
+ {'_hash': 'e780ac7581ad29f8f46e637b61d0c38898c4c52c',
+ 'addr': '10.42.42.6'}],
+ 'modem': [{'_hash': '0b538cb6ad799fbd7c2953fd3b4463a76c7cc9c0',
+ 'auth_algo': 'comp128v1',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'imsi': '901700000009031',
+ 'ki': '80A37E6FDEA931EAC92FFA5F671EFEAD',
+ 'label': 'sierra_1',
+ 'path': '/sierra_1'},
+ {'_hash': '3a6e7747dfe7dfdf817bd3351031bd08051605c3',
+ 'auth_algo': 'comp128v1',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'imsi': '901700000009029',
+ 'ki': '00969E283349D354A8239E877F2E0866',
+ 'label': 'sierra_2',
+ 'path': '/sierra_2'},
+ {'_hash': 'f6ba17db2ad13da5ba5c54b5385a774f5351bb5a',
+ 'auth_algo': 'comp128v1',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'imsi': '901700000009030',
+ 'ki': 'BB70807226393CDBAC8DD3439FF54252',
+ 'label': 'gobi_0',
+ 'path': '/gobi_0'},
+ {'_hash': 'fbff2e4f06b727fc8a70da23e1d134f9cd763919',
+ 'auth_algo': 'comp128v1',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'imsi': '901700000009032',
+ 'ki': '2F70DCA43C45ACB97E947FDD0C7CA30A',
+ 'label': 'gobi_3',
+ 'path': '/gobi_3'}]}
+*** end: all resources
+
+- request some resources
+--- testowner: Reserving 2 x arfcn (candidates: 10)
+--- testowner: DBG: Picked - _hash: e620569450f8259b3f0212ec19c285dd07df063c
+ arfcn: '512'
+ band: GSM-1800
+- _hash: 022621e513c5a5bf33b77430a1e9c886be676fa1
+ arfcn: '514'
+ band: GSM-1800
+--- testowner: Reserving 2 x bts (candidates: 3)
+--- testowner: DBG: Picked - _hash: d2aa7c1124943de352351b650ca0c751784da6b6
+ addr: 10.42.42.114
+ band: GSM-1800
+ ciphers:
+ - a5_0
+ - a5_1
+ direct_pcu: 'True'
+ ipa_unit_id: '1'
+ label: sysmoBTS 1002
+ type: osmo-bts-sysmo
+- _hash: 2158317d5e0055070e7174c2498dedf53a2957e9
+ addr: 10.42.42.50
+ band: GSM-1800
+ ciphers:
+ - a5_0
+ - a5_1
+ ipa_unit_id: '6'
+ label: Ettus B200
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'True'
+ type: osmo-bts-trx
+--- testowner: Reserving 1 x ip_address (candidates: 5)
+--- testowner: DBG: Picked - _hash: fd103b22c7cf2480d609150e06f4bbd92ac78d8c
+ addr: 10.42.42.2
+--- testowner: Reserving 2 x modem (candidates: 4)
+--- testowner: DBG: Picked - _hash: 0b538cb6ad799fbd7c2953fd3b4463a76c7cc9c0
+ auth_algo: comp128v1
+ ciphers:
+ - a5_0
+ - a5_1
+ imsi: '901700000009031'
+ ki: 80A37E6FDEA931EAC92FFA5F671EFEAD
+ label: sierra_1
+ path: /sierra_1
+- _hash: 3a6e7747dfe7dfdf817bd3351031bd08051605c3
+ auth_algo: comp128v1
+ ciphers:
+ - a5_0
+ - a5_1
+ imsi: '901700000009029'
+ ki: 00969E283349D354A8239E877F2E0866
+ label: sierra_2
+ path: /sierra_2
+~~~ currently reserved:
+arfcn:
+- _hash: e620569450f8259b3f0212ec19c285dd07df063c
+ _reserved_by: testowner-123-1490837279
+ arfcn: '512'
+ band: GSM-1800
+- _hash: 022621e513c5a5bf33b77430a1e9c886be676fa1
+ _reserved_by: testowner-123-1490837279
+ arfcn: '514'
+ band: GSM-1800
+bts:
+- _hash: d2aa7c1124943de352351b650ca0c751784da6b6
+ _reserved_by: testowner-123-1490837279
+ addr: 10.42.42.114
+ band: GSM-1800
+ ciphers:
+ - a5_0
+ - a5_1
+ direct_pcu: 'True'
+ ipa_unit_id: '1'
+ label: sysmoBTS 1002
+ type: osmo-bts-sysmo
+- _hash: 2158317d5e0055070e7174c2498dedf53a2957e9
+ _reserved_by: testowner-123-1490837279
+ addr: 10.42.42.50
+ band: GSM-1800
+ ciphers:
+ - a5_0
+ - a5_1
+ ipa_unit_id: '6'
+ label: Ettus B200
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'True'
+ type: osmo-bts-trx
+ip_address:
+- _hash: fd103b22c7cf2480d609150e06f4bbd92ac78d8c
+ _reserved_by: testowner-123-1490837279
+ addr: 10.42.42.2
+modem:
+- _hash: 0b538cb6ad799fbd7c2953fd3b4463a76c7cc9c0
+ _reserved_by: testowner-123-1490837279
+ auth_algo: comp128v1
+ ciphers:
+ - a5_0
+ - a5_1
+ imsi: '901700000009031'
+ ki: 80A37E6FDEA931EAC92FFA5F671EFEAD
+ label: sierra_1
+ path: /sierra_1
+- _hash: 3a6e7747dfe7dfdf817bd3351031bd08051605c3
+ _reserved_by: testowner-123-1490837279
+ auth_algo: comp128v1
+ ciphers:
+ - a5_0
+ - a5_1
+ imsi: '901700000009029'
+ ki: 00969E283349D354A8239E877F2E0866
+ label: sierra_2
+ path: /sierra_2
+
+~~~ end: currently reserved
+
+~~~ with modifiers:
+resources(testowner)={'arfcn': [{'_hash': 'e620569450f8259b3f0212ec19c285dd07df063c',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'arfcn': '512',
+ 'band': 'GSM-1800'},
+ {'_hash': '022621e513c5a5bf33b77430a1e9c886be676fa1',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'arfcn': '514',
+ 'band': 'GSM-1800'}],
+ 'bts': [{'_hash': 'd2aa7c1124943de352351b650ca0c751784da6b6',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'addr': '10.42.42.114',
+ 'band': 'GSM-1800',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'direct_pcu': 'True',
+ 'ipa_unit_id': '1',
+ 'label': 'sysmoBTS 1002',
+ 'type': 'osmo-bts-sysmo'},
+ {'_hash': '2158317d5e0055070e7174c2498dedf53a2957e9',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'addr': '10.42.42.50',
+ 'band': 'GSM-1800',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'ipa_unit_id': '6',
+ 'label': 'Ettus B200',
+ 'num_trx': 2,
+ 'osmo_trx': {'clock_reference': 'external', 'launch_trx': 'True'},
+ 'type': 'osmo-bts-trx'}],
+ 'ip_address': [{'_hash': 'fd103b22c7cf2480d609150e06f4bbd92ac78d8c',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'addr': '10.42.42.2'}],
+ 'modem': [{'_hash': '0b538cb6ad799fbd7c2953fd3b4463a76c7cc9c0',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'auth_algo': 'comp128v1',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'imsi': '901700000009031',
+ 'ki': '80A37E6FDEA931EAC92FFA5F671EFEAD',
+ 'label': 'sierra_1',
+ 'path': '/sierra_1'},
+ {'_hash': '3a6e7747dfe7dfdf817bd3351031bd08051605c3',
+ '_reserved_by': 'testowner-123-1490837279',
+ 'auth_algo': 'comp128v1',
+ 'ciphers': ['a5_0', 'a5_1'],
+ 'imsi': '901700000009029',
+ 'ki': '00969E283349D354A8239E877F2E0866',
+ 'label': 'sierra_2',
+ 'path': '/sierra_2'}]}
+~~~ end: with modifiers:
+~~~ currently reserved:
+{}
+
+~~~ end: currently reserved
+
+- item_matches:
+1st subset matches correctly, pass
+2nd subset matches correctly, pass
+3rd subset should not match, pass
+3rd subset should not match, pass
+4th subset should not match, pass
diff --git a/selftest/resource_test.ok.ign b/selftest/resource_test.ok.ign
new file mode 100644
index 0000000..393ce95
--- /dev/null
+++ b/selftest/resource_test.ok.ign
@@ -0,0 +1 @@
+/[^ ]*/selftest/ [PATH]/selftest/
diff --git a/selftest/resource_test.py b/selftest/resource_test.py
new file mode 100755
index 0000000..cdfe021
--- /dev/null
+++ b/selftest/resource_test.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+
+import tempfile
+import os
+import pprint
+import shutil
+import atexit
+import _prep
+from osmo_gsm_tester import config, log, resource, util
+
+workdir = util.get_tempdir()
+
+# override config locations to make sure we use only the test conf
+config.ENV_CONF = './conf'
+
+log.get_process_id = lambda: '123-1490837279'
+
+print('- expect solutions:')
+pprint.pprint(
+ resource.solve([ [0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2] ]) )
+pprint.pprint(
+ resource.solve([ [0, 1, 2],
+ [0, 1],
+ [0, 2] ]) ) # == [0, 1, 2]
+pprint.pprint(
+ resource.solve([ [0, 1, 2],
+ [0],
+ [0, 2] ]) ) # == [1, 0, 2]
+pprint.pprint(
+ resource.solve([ [0, 1, 2],
+ [2],
+ [0, 2] ]) ) # == [1, 2, 0]
+
+print('- expect failure to solve:')
+try:
+ resource.solve([ [0, 2],
+ [2],
+ [0, 2] ])
+ assert False
+except resource.NotSolvable as e:
+ print(e)
+
+print('- test removing a Resources list from itself')
+try:
+ r = resource.Resources({ 'k': [ {'a': 1, 'b': 2}, {'a': 3, 'b': 4}, ],
+ 'i': [ {'c': 1, 'd': 2}, {'c': 3, 'd': 4}, ] })
+ r.drop(r)
+ assert False
+except RuntimeError as e:
+ print('ok, caused exception: %r' % e)
+
+print('- test removing a Resources list from one with the same list in it')
+r = resource.Resources({ 'k': [ {'a': 1, 'b': 2}, {'a': 3, 'b': 4}, ],
+ 'i': [ {'c': 1, 'd': 2}, {'c': 3, 'd': 4}, ] })
+r.drop({ 'k': r.get('k'), 'i': r.get('i') })
+assert not r
+
+print('- test resources config and state dir:')
+resources_conf = os.path.join(_prep.script_dir, 'resource_test', 'etc',
+ 'resources.conf')
+
+state_dir = config.get_state_dir()
+rrfile = state_dir.child(resource.RESERVED_RESOURCES_FILE)
+
+pool = resource.ResourcesPool()
+
+print('*** all resources:')
+pprint.pprint(pool.all_resources)
+print('*** end: all resources\n')
+
+print('- request some resources')
+want = {
+ 'ip_address': [ { 'times': 1 } ],
+ 'bts': [ { 'type': 'osmo-bts-sysmo', 'times': 1 , 'ciphers': ['a5_1']}, { 'type': 'osmo-bts-trx', 'times': 1 } ],
+ 'arfcn': [ { 'band': 'GSM-1800', 'times': 2 } ],
+ 'modem': [ { 'times': 2 , 'ciphers': ['a5_0', 'a5_1']} ],
+ }
+modifiers = {
+ 'bts': [ {}, {'num_trx': 2 }],
+}
+origin = log.Origin(None, 'testowner')
+
+resources = pool.reserve(origin, config.replicate_times(want), config.replicate_times(modifiers))
+
+print('~~~ currently reserved:')
+with open(rrfile, 'r') as f:
+ print(f.read())
+print('~~~ end: currently reserved\n')
+
+print('~~~ with modifiers:')
+print(repr(resources))
+print('~~~ end: with modifiers:')
+
+resources.free()
+
+print('~~~ currently reserved:')
+with open(rrfile, 'r') as f:
+ print(f.read())
+print('~~~ end: currently reserved\n')
+
+print('- item_matches:')
+superset = { 'hello': 'world', 'foo': 'bar', 'ordered_list': [{'xkey': 'xvalue'},{'ykey': 'yvalue'}], 'unordered_list_set': [1, 2, 3]}
+
+subset = { 'foo': 'bar', 'ordered_list': [{'xkey': 'xvalue'},{'ykey': 'yvalue'}], 'unordered_list_set': [2, 1] }
+if resource.item_matches(superset, subset):
+ print('1st subset matches correctly, pass')
+
+subset = { 'ordered_list': [{},{'ykey': 'yvalue'}], 'unordered_list_set': [] }
+if resource.item_matches(superset, subset):
+ print('2nd subset matches correctly, pass')
+
+subset = { 'ordered_list': [{'ykey': 'yvalue'}, {'xkey': 'xvalue'}] }
+if not resource.item_matches(superset, subset):
+ print('3rd subset should not match, pass')
+
+subset = { 'ordered_list': [{'xkey': 'xvalue'}, {'ykey': 'yvalue'}, {'zkey': 'zvalue'}] }
+if not resource.item_matches(superset, subset):
+ print('3rd subset should not match, pass')
+
+subset = { 'unordered_list_set': [4] }
+if not resource.item_matches(superset, subset):
+ print('4th subset should not match, pass')
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/sms_test.err b/selftest/sms_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/sms_test.err
diff --git a/selftest/sms_test.ok b/selftest/sms_test.ok
new file mode 100644
index 0000000..125b5c0
--- /dev/null
+++ b/selftest/sms_test.ok
@@ -0,0 +1,6 @@
+message nr. 1
+message nr. 2
+message nr. 3
+message nr. 4, from 123, to 456
+message nr. 5, from 123, to 456
+message nr. 4, from 123, to 456
diff --git a/selftest/sms_test.py b/selftest/sms_test.py
new file mode 100755
index 0000000..01cecc2
--- /dev/null
+++ b/selftest/sms_test.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+import _prep
+from osmo_gsm_tester import sms
+
+print(sms.Sms())
+print(sms.Sms())
+print(sms.Sms())
+msg = sms.Sms('123', '456')
+print(str(msg))
+
+msg2 = sms.Sms('123', '456')
+print(str(msg2))
+assert msg != msg2
+
+msg2.msg = str(msg.msg)
+print(str(msg2))
+assert msg == msg2
+
+assert msg == str(msg.msg)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/suite_test.err b/selftest/suite_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/suite_test.err
diff --git a/selftest/suite_test.ok b/selftest/suite_test.ok
new file mode 100644
index 0000000..908f24f
--- /dev/null
+++ b/selftest/suite_test.ok
@@ -0,0 +1,485 @@
+- non-existing suite dir
+cnf -: DBG: Found config file paths.conf as [PATH]/selftest/suite_test/paths.conf in ./suite_test which is [PATH]/selftest/suite_test
+cnf -: DBG: [PATH]/selftest/suite_test/paths.conf: relative path ./test_work/state_dir is [PATH]/selftest/suite_test/test_work/state_dir
+cnf -: DBG: [PATH]/selftest/suite_test/paths.conf: relative path . is [PATH]/selftest/suite_test
+cnf -: DBG: Found path suites_dir as [PATH]/selftest/suite_test
+--- -: ERR: RuntimeError: Suite not found: 'does_not_exist' in [PATH]/selftest/suite_test
+- no suite.conf
+cnf -: DBG: Found path suites_dir as [PATH]/selftest/suite_test
+cnf empty_dir: DBG: reading suite.conf
+cnf [PATH]/selftest/suite_test/empty_dir/suite.conf: ERR: FileNotFoundError: [Errno 2] No such file or directory: '[PATH]/selftest/suite_test/empty_dir/suite.conf' [empty_dir↪[PATH]/selftest/suite_test/empty_dir/suite.conf]
+- valid suite dir
+cnf -: DBG: Found path suites_dir as [PATH]/selftest/suite_test
+cnf test_suite: DBG: reading suite.conf
+defaults:
+ timeout: 60s
+resources:
+ bts:
+ - label: sysmoCell 5000
+ times: '2'
+ - times: '1'
+ type: sysmo
+ ip_address:
+ - times: '1'
+ modem:
+ - times: '2'
+
+- run hello world test
+cnf ResourcesPool: DBG: Found config file resources.conf as [PATH]/selftest/suite_test/resources.conf in ./suite_test which is [PATH]/selftest/suite_test
+cnf ResourcesPool: DBG: Found path state_dir as [PATH]/selftest/suite_test/test_work/state_dir
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+tst test_suite: reserving resources in [PATH]/selftest/suite_test/test_work/state_dir ...
+tst test_suite: DBG: {combining='resources'}
+tst {combining_scenarios='resources'}: DBG: {definition_conf={bts=[{'label': 'sysmoCell 5000'}, {'label': 'sysmoCell 5000'}, {'type': 'sysmo'}], ip_address=[{}], modem=[{}, {}]}} [test_suite↪{combining_scenarios='resources'}]
+tst test_suite: DBG: {combining='modifiers'}
+tst {combining_scenarios='modifiers'}: DBG: {definition_conf={}} [test_suite↪{combining_scenarios='modifiers'}]
+tst test_suite: Reserving 3 x bts (candidates: 6)
+tst test_suite: DBG: Picked - _hash: a59640b8ba6a373552b24a6f9f65cadd2347bace
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - max_power_red: '3'
+ nominal_power: '10'
+ - max_power_red: '0'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: c2feabd082c36a1cdeccb9a5237dfff7dbadb009
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - nominal_power: '10'
+ - max_power_red: '1'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: 07d9c8aaa940b674efcbbabdd69f58a6ce4e94f9
+ addr: 10.42.42.114
+ band: GSM-1800
+ ipa_unit_id: '1'
+ label: sysmoBTS 1002
+ type: sysmo
+tst test_suite: Reserving 1 x ip_address (candidates: 3)
+tst test_suite: DBG: Picked - _hash: cde1debf28f07f94f92c761b4b7c6bf35785ced4
+ addr: 10.42.42.1
+tst test_suite: Reserving 2 x modem (candidates: 16)
+tst test_suite: DBG: Picked - _hash: 19c69e45aa090fb511446bd00797690aa82ff52f
+ imsi: '901700000007801'
+ ki: D620F48487B1B782DA55DF6717F08FF9
+ label: m7801
+ path: /wavecom_0
+- _hash: e1a46516a1fb493b2617ab14fc1693a9a45ec254
+ imsi: '901700000007802'
+ ki: 47FDB2D55CE6A10A85ABDAD034A5B7B3
+ label: m7802
+ path: /wavecom_1
+
+----------------------------------------------
+trial test_suite hello_world.py
+----------------------------------------------
+tst hello_world.py:[LINENR]: hello world [test_suite↪hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: I am 'test_suite' / 'hello_world.py:[LINENR]' [test_suite↪hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: one [test_suite↪hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: two [test_suite↪hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: three [test_suite↪hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR] Test passed (N.N sec) [test_suite↪hello_world.py]
+---------------------------------------------------------------------
+trial test_suite PASS
+---------------------------------------------------------------------
+PASS: test_suite (pass: 1, skip: 5)
+ pass: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ skip: test_error.py
+ skip: test_fail.py
+ skip: test_fail_raise.py
+
+- a test with an error
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+
+----------------------------------------------
+trial test_suite test_error.py
+----------------------------------------------
+tst test_error.py:[LINENR]: I am 'test_suite' / 'test_error.py:[LINENR]' [test_suite↪test_error.py:[LINENR]] [test_error.py:[LINENR]]
+tst test_error.py:[LINENR]: ERR: AssertionError: test_error.py:[LINENR]: assert False [test_suite↪test_error.py:[LINENR]] [test_error.py:[LINENR]: assert False]
+tst test_error.py:[LINENR]: Test FAILED (N.N sec) [test_suite↪test_error.py:[LINENR]] [test.py:[LINENR]]
+---------------------------------------------------------------------
+trial test_suite FAIL
+---------------------------------------------------------------------
+FAIL: test_suite (fail: 1, skip: 5)
+ skip: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ FAIL: test_error.py (N.N sec) AssertionError: test_error.py:[LINENR]: assert False
+ skip: test_fail.py
+ skip: test_fail_raise.py
+
+- a test with a failure
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+
+----------------------------------------------
+trial test_suite test_fail.py
+----------------------------------------------
+tst test_fail.py:[LINENR]: I am 'test_suite' / 'test_fail.py:[LINENR]' [test_suite↪test_fail.py:[LINENR]] [test_fail.py:[LINENR]]
+tst test_fail.py:[LINENR]: ERR: EpicFail: This failure is expected [test_suite↪test_fail.py:[LINENR]] [test_fail.py:[LINENR]]
+tst test_fail.py:[LINENR]: Test FAILED (N.N sec) [test_suite↪test_fail.py:[LINENR]] [test.py:[LINENR]]
+---------------------------------------------------------------------
+trial test_suite FAIL
+---------------------------------------------------------------------
+FAIL: test_suite (fail: 1, skip: 5)
+ skip: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ skip: test_error.py (N.N sec)
+ FAIL: test_fail.py (N.N sec) EpicFail: This failure is expected
+ skip: test_fail_raise.py
+
+- a test with a raised failure
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+
+----------------------------------------------
+trial test_suite test_fail_raise.py
+----------------------------------------------
+tst test_fail_raise.py:[LINENR]: ERR: ExpectedFail: This failure is expected [test_suite↪test_fail_raise.py:[LINENR]] [test_fail_raise.py:[LINENR]: raise ExpectedFail('This failure is expected')]
+tst test_fail_raise.py:[LINENR]: Test FAILED (N.N sec) [test_suite↪test_fail_raise.py:[LINENR]] [test.py:[LINENR]]
+---------------------------------------------------------------------
+trial test_suite FAIL
+---------------------------------------------------------------------
+FAIL: test_suite (fail: 1, skip: 5)
+ skip: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ skip: test_error.py (N.N sec)
+ skip: test_fail.py (N.N sec)
+ FAIL: test_fail_raise.py (N.N sec) ExpectedFail: This failure is expected
+- test with half empty scenario
+cnf ResourcesPool: DBG: Found config file resources.conf as [PATH]/selftest/suite_test/resources.conf in ./suite_test which is [PATH]/selftest/suite_test [config.py:[LINENR]]
+cnf ResourcesPool: DBG: Found path state_dir as [PATH]/selftest/suite_test/test_work/state_dir [config.py:[LINENR]]
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+tst test_suite: reserving resources in [PATH]/selftest/suite_test/test_work/state_dir ... [suite.py:[LINENR]]
+tst test_suite: DBG: {combining='resources'} [suite.py:[LINENR]]
+tst {combining_scenarios='resources'}: DBG: {definition_conf={bts=[{'label': 'sysmoCell 5000'}, {'label': 'sysmoCell 5000'}, {'type': 'sysmo'}], ip_address=[{}], modem=[{}, {}]}} [test_suite↪{combining_scenarios='resources'}] [suite.py:[LINENR]]
+tst {combining_scenarios='resources', scenario='foo'}: [RESOURCE_DICT]
+tst test_suite: DBG: {combining='modifiers'} [suite.py:[LINENR]]
+tst {combining_scenarios='modifiers'}: DBG: {definition_conf={}} [test_suite↪{combining_scenarios='modifiers'}] [suite.py:[LINENR]]
+tst {combining_scenarios='modifiers', scenario='foo'}: DBG: {conf={}, scenario='foo'} [test_suite↪{combining_scenarios='modifiers', scenario='foo'}] [suite.py:[LINENR]]
+tst test_suite: Reserving 3 x bts (candidates: 6) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: a59640b8ba6a373552b24a6f9f65cadd2347bace
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - max_power_red: '3'
+ nominal_power: '10'
+ - max_power_red: '0'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: c2feabd082c36a1cdeccb9a5237dfff7dbadb009
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - nominal_power: '10'
+ - max_power_red: '1'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: 07d9c8aaa940b674efcbbabdd69f58a6ce4e94f9
+ addr: 10.42.42.114
+ band: GSM-1800
+ ipa_unit_id: '1'
+ label: sysmoBTS 1002
+ type: sysmo
+ [resource.py:[LINENR]]
+tst test_suite: Reserving 1 x ip_address (candidates: 3) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: cde1debf28f07f94f92c761b4b7c6bf35785ced4
+ addr: 10.42.42.1
+ [resource.py:[LINENR]]
+tst test_suite: Reserving 2 x modem (candidates: 16) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: 19c69e45aa090fb511446bd00797690aa82ff52f
+ imsi: '901700000007801'
+ ki: D620F48487B1B782DA55DF6717F08FF9
+ label: m7801
+ path: /wavecom_0
+- _hash: e1a46516a1fb493b2617ab14fc1693a9a45ec254
+ imsi: '901700000007802'
+ ki: 47FDB2D55CE6A10A85ABDAD034A5B7B3
+ label: m7802
+ path: /wavecom_1
+ [resource.py:[LINENR]]
+
+----------------------------------------------
+trial test_suite hello_world.py
+----------------------------------------------
+tst hello_world.py:[LINENR]: hello world [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: I am 'test_suite' / 'hello_world.py:[LINENR]' [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: one [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: two [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: three [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR] Test passed (N.N sec) [test_suite↪hello_world.py] [test.py:[LINENR]]
+---------------------------------------------------------------------
+trial test_suite PASS
+---------------------------------------------------------------------
+PASS: test_suite (pass: 1, skip: 5)
+ pass: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ skip: test_error.py
+ skip: test_fail.py
+ skip: test_fail_raise.py
+- test with scenario
+cnf ResourcesPool: DBG: Found config file resources.conf as [PATH]/selftest/suite_test/resources.conf in ./suite_test which is [PATH]/selftest/suite_test [config.py:[LINENR]]
+cnf ResourcesPool: DBG: Found path state_dir as [PATH]/selftest/suite_test/test_work/state_dir [config.py:[LINENR]]
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+tst test_suite: reserving resources in [PATH]/selftest/suite_test/test_work/state_dir ... [suite.py:[LINENR]]
+tst test_suite: DBG: {combining='resources'} [suite.py:[LINENR]]
+tst {combining_scenarios='resources'}: DBG: {definition_conf={bts=[{'label': 'sysmoCell 5000'}, {'label': 'sysmoCell 5000'}, {'type': 'sysmo'}], ip_address=[{}], modem=[{}, {}]}} [test_suite↪{combining_scenarios='resources'}] [suite.py:[LINENR]]
+tst {combining_scenarios='resources', scenario='foo'}: [RESOURCE_DICT]
+tst test_suite: DBG: {combining='modifiers'} [suite.py:[LINENR]]
+tst {combining_scenarios='modifiers'}: DBG: {definition_conf={}} [test_suite↪{combining_scenarios='modifiers'}] [suite.py:[LINENR]]
+tst {combining_scenarios='modifiers', scenario='foo'}: DBG: {conf={}, scenario='foo'} [test_suite↪{combining_scenarios='modifiers', scenario='foo'}] [suite.py:[LINENR]]
+tst test_suite: Reserving 3 x bts (candidates: 6) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: a59640b8ba6a373552b24a6f9f65cadd2347bace
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - max_power_red: '3'
+ nominal_power: '10'
+ - max_power_red: '0'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: c2feabd082c36a1cdeccb9a5237dfff7dbadb009
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - nominal_power: '10'
+ - max_power_red: '1'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: 07d9c8aaa940b674efcbbabdd69f58a6ce4e94f9
+ addr: 10.42.42.114
+ band: GSM-1800
+ ipa_unit_id: '1'
+ label: sysmoBTS 1002
+ type: sysmo
+ [resource.py:[LINENR]]
+tst test_suite: Reserving 1 x ip_address (candidates: 3) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: cde1debf28f07f94f92c761b4b7c6bf35785ced4
+ addr: 10.42.42.1
+ [resource.py:[LINENR]]
+tst test_suite: Reserving 2 x modem (candidates: 16) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: 19c69e45aa090fb511446bd00797690aa82ff52f
+ imsi: '901700000007801'
+ ki: D620F48487B1B782DA55DF6717F08FF9
+ label: m7801
+ path: /wavecom_0
+- _hash: e1a46516a1fb493b2617ab14fc1693a9a45ec254
+ imsi: '901700000007802'
+ ki: 47FDB2D55CE6A10A85ABDAD034A5B7B3
+ label: m7802
+ path: /wavecom_1
+ [resource.py:[LINENR]]
+
+----------------------------------------------
+trial test_suite hello_world.py
+----------------------------------------------
+tst hello_world.py:[LINENR]: hello world [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: I am 'test_suite' / 'hello_world.py:[LINENR]' [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: one [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: two [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: three [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR] Test passed (N.N sec) [test_suite↪hello_world.py] [test.py:[LINENR]]
+---------------------------------------------------------------------
+trial test_suite PASS
+---------------------------------------------------------------------
+PASS: test_suite (pass: 1, skip: 5)
+ pass: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ skip: test_error.py
+ skip: test_fail.py
+ skip: test_fail_raise.py
+- test with scenario and modifiers
+cnf ResourcesPool: DBG: Found config file resources.conf as [PATH]/selftest/suite_test/resources.conf in ./suite_test which is [PATH]/selftest/suite_test [config.py:[LINENR]]
+cnf ResourcesPool: DBG: Found path state_dir as [PATH]/selftest/suite_test/test_work/state_dir [config.py:[LINENR]]
+tst test_suite: reserving resources in [PATH]/selftest/suite_test/test_work/state_dir ... [suite.py:[LINENR]]
+tst test_suite: DBG: {combining='resources'} [suite.py:[LINENR]]
+tst {combining_scenarios='resources'}: DBG: {definition_conf={bts=[{'label': 'sysmoCell 5000'}, {'label': 'sysmoCell 5000'}, {'type': 'sysmo'}], ip_address=[{}], modem=[{}, {}]}} [test_suite↪{combining_scenarios='resources'}] [suite.py:[LINENR]]
+tst {combining_scenarios='resources', scenario='foo'}: [RESOURCE_DICT]
+tst test_suite: DBG: {combining='modifiers'} [suite.py:[LINENR]]
+tst {combining_scenarios='modifiers'}: DBG: {definition_conf={}} [test_suite↪{combining_scenarios='modifiers'}] [suite.py:[LINENR]]
+tst {combining_scenarios='modifiers', scenario='foo'}: DBG: {conf={bts=[{'trx_list': [{'nominal_power': '20'}, {'nominal_power': '20'}]}, {'trx_list': [{'nominal_power': '20'}, {'nominal_power': '20'}]}, {'type': 'sysmo'}]}, scenario='foo'} [test_suite↪{combining_scenarios='modifiers', scenario='foo'}] [suite.py:[LINENR]]
+tst test_suite: Reserving 3 x bts (candidates: 6) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: a59640b8ba6a373552b24a6f9f65cadd2347bace
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - max_power_red: '3'
+ nominal_power: '10'
+ - max_power_red: '0'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: c2feabd082c36a1cdeccb9a5237dfff7dbadb009
+ addr: 10.42.42.53
+ band: GSM-1800
+ ipa_unit_id: '7'
+ label: sysmoCell 5000
+ osmo_trx:
+ clock_reference: external
+ launch_trx: 'False'
+ trx_ip: 10.42.42.112
+ trx_list:
+ - nominal_power: '10'
+ - max_power_red: '1'
+ nominal_power: '12'
+ type: osmo-bts-trx
+- _hash: 07d9c8aaa940b674efcbbabdd69f58a6ce4e94f9
+ addr: 10.42.42.114
+ band: GSM-1800
+ ipa_unit_id: '1'
+ label: sysmoBTS 1002
+ type: sysmo
+ [resource.py:[LINENR]]
+tst test_suite: Reserving 1 x ip_address (candidates: 3) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: cde1debf28f07f94f92c761b4b7c6bf35785ced4
+ addr: 10.42.42.1
+ [resource.py:[LINENR]]
+tst test_suite: Reserving 2 x modem (candidates: 16) [resource.py:[LINENR]]
+tst test_suite: DBG: Picked - _hash: 19c69e45aa090fb511446bd00797690aa82ff52f
+ imsi: '901700000007801'
+ ki: D620F48487B1B782DA55DF6717F08FF9
+ label: m7801
+ path: /wavecom_0
+- _hash: e1a46516a1fb493b2617ab14fc1693a9a45ec254
+ imsi: '901700000007802'
+ ki: 47FDB2D55CE6A10A85ABDAD034A5B7B3
+ label: m7802
+ path: /wavecom_1
+ [resource.py:[LINENR]]
+resources(test_suite)={'bts': [{'_hash': 'a59640b8ba6a373552b24a6f9f65cadd2347bace',
+ '_reserved_by': 'test_suite-[ID_NUM]-[ID_NUM]',
+ 'addr': '10.42.42.53',
+ 'band': 'GSM-1800',
+ 'ipa_unit_id': '7',
+ 'label': 'sysmoCell 5000',
+ 'osmo_trx': {'clock_reference': 'external',
+ 'launch_trx': 'False',
+ 'trx_ip': '10.42.42.112'},
+ 'trx_list': [{'max_power_red': '3', 'nominal_power': '20'},
+ {'max_power_red': '0', 'nominal_power': '20'}],
+ 'type': 'osmo-bts-trx'},
+ {'_hash': 'c2feabd082c36a1cdeccb9a5237dfff7dbadb009',
+ '_reserved_by': 'test_suite-[ID_NUM]-[ID_NUM]',
+ 'addr': '10.42.42.53',
+ 'band': 'GSM-1800',
+ 'ipa_unit_id': '7',
+ 'label': 'sysmoCell 5000',
+ 'osmo_trx': {'clock_reference': 'external',
+ 'launch_trx': 'False',
+ 'trx_ip': '10.42.42.112'},
+ 'trx_list': [{'nominal_power': '20'},
+ {'max_power_red': '1', 'nominal_power': '20'}],
+ 'type': 'osmo-bts-trx'},
+ {'_hash': '07d9c8aaa940b674efcbbabdd69f58a6ce4e94f9',
+ '_reserved_by': 'test_suite-[ID_NUM]-[ID_NUM]',
+ 'addr': '10.42.42.114',
+ 'band': 'GSM-1800',
+ 'ipa_unit_id': '1',
+ 'label': 'sysmoBTS 1002',
+ 'type': 'sysmo'}],
+ 'ip_address': [{'_hash': 'cde1debf28f07f94f92c761b4b7c6bf35785ced4',
+ '_reserved_by': 'test_suite-[ID_NUM]-[ID_NUM]',
+ 'addr': '10.42.42.1'}],
+ 'modem': [{'_hash': '19c69e45aa090fb511446bd00797690aa82ff52f',
+ '_reserved_by': 'test_suite-[ID_NUM]-[ID_NUM]',
+ 'imsi': '901700000007801',
+ 'ki': 'D620F48487B1B782DA55DF6717F08FF9',
+ 'label': 'm7801',
+ 'path': '/wavecom_0'},
+ {'_hash': 'e1a46516a1fb493b2617ab14fc1693a9a45ec254',
+ '_reserved_by': 'test_suite-[ID_NUM]-[ID_NUM]',
+ 'imsi': '901700000007802',
+ 'ki': '47FDB2D55CE6A10A85ABDAD034A5B7B3',
+ 'label': 'm7802',
+ 'path': '/wavecom_1'}]}
+
+---------------------------------------------------------------------
+trial test_suite
+---------------------------------------------------------------------
+
+----------------------------------------------
+trial test_suite hello_world.py
+----------------------------------------------
+tst hello_world.py:[LINENR]: hello world [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: I am 'test_suite' / 'hello_world.py:[LINENR]' [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: one [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: two [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR]: three [test_suite↪hello_world.py:[LINENR]] [hello_world.py:[LINENR]]
+tst hello_world.py:[LINENR] Test passed (N.N sec) [test_suite↪hello_world.py] [test.py:[LINENR]]
+---------------------------------------------------------------------
+trial test_suite PASS
+---------------------------------------------------------------------
+PASS: test_suite (pass: 1, skip: 5)
+ pass: hello_world.py (N.N sec)
+ skip: mo_mt_sms.py
+ skip: mo_sms.py
+ skip: test_error.py
+ skip: test_fail.py
+ skip: test_fail_raise.py
+
+- graceful exit.
diff --git a/selftest/suite_test.ok.ign b/selftest/suite_test.ok.ign
new file mode 100644
index 0000000..9bd168f
--- /dev/null
+++ b/selftest/suite_test.ok.ign
@@ -0,0 +1,5 @@
+/[^ ]*/selftest/ [PATH]/selftest/
+\.py:[0-9]* .py:[LINENR]
+\([0-9.]+ sec\) (N.N sec)
+{combining_scenarios='resources', scenario='foo'}:.* {combining_scenarios='resources', scenario='foo'}: [RESOURCE_DICT]
+test_suite-[0-9]*-[0-9]* test_suite-[ID_NUM]-[ID_NUM]
diff --git a/selftest/suite_test.py b/selftest/suite_test.py
new file mode 100755
index 0000000..db19ccc
--- /dev/null
+++ b/selftest/suite_test.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+import os
+import _prep
+from osmo_gsm_tester import log, suite, config, report
+
+config.ENV_CONF = './suite_test'
+
+#log.style_change(trace=True)
+
+print('- non-existing suite dir')
+assert(log.run_logging_exceptions(suite.load, 'does_not_exist') == None)
+
+print('- no suite.conf')
+assert(log.run_logging_exceptions(suite.load, 'empty_dir') == None)
+
+print('- valid suite dir')
+example_suite_dir = os.path.join('test_suite')
+s_def = suite.load(example_suite_dir)
+assert(isinstance(s_def, suite.SuiteDefinition))
+print(config.tostr(s_def.conf))
+
+print('- run hello world test')
+trial = log.Origin(log.C_TST, 'trial')
+s = suite.SuiteRun(trial, 'test_suite', s_def)
+results = s.run_tests('hello_world.py')
+print(report.suite_to_text(s))
+
+log.style_change(src=True)
+#log.style_change(trace=True)
+print('\n- a test with an error')
+results = s.run_tests('test_error.py')
+output = report.suite_to_text(s)
+print(output)
+
+print('\n- a test with a failure')
+results = s.run_tests('test_fail.py')
+output = report.suite_to_text(s)
+print(output)
+
+print('\n- a test with a raised failure')
+results = s.run_tests('test_fail_raise.py')
+output = report.suite_to_text(s)
+print(output)
+
+print('- test with half empty scenario')
+trial = log.Origin(log.C_TST, 'trial')
+scenario = config.Scenario('foo', 'bar')
+scenario['resources'] = { 'bts': [{'type': 'osmo-bts-trx'}] }
+s = suite.SuiteRun(trial, 'test_suite', s_def, [scenario])
+results = s.run_tests('hello_world.py')
+print(report.suite_to_text(s))
+
+print('- test with scenario')
+trial = log.Origin(log.C_TST, 'trial')
+scenario = config.Scenario('foo', 'bar')
+scenario['resources'] = { 'bts': [{ 'times': '2', 'type': 'osmo-bts-trx', 'trx_list': [{'nominal_power': '10'}, {'nominal_power': '12'}]}, {'type': 'sysmo'}] }
+s = suite.SuiteRun(trial, 'test_suite', s_def, [scenario])
+results = s.run_tests('hello_world.py')
+print(report.suite_to_text(s))
+
+print('- test with scenario and modifiers')
+trial = log.Origin(log.C_TST, 'trial')
+scenario = config.Scenario('foo', 'bar')
+scenario['resources'] = { 'bts': [{ 'times': '2', 'type': 'osmo-bts-trx', 'trx_list': [{'nominal_power': '10'}, {'nominal_power': '12'}]}, {'type': 'sysmo'}] }
+scenario['modifiers'] = { 'bts': [{ 'times': '2', 'trx_list': [{'nominal_power': '20'}, {'nominal_power': '20'}]}, {'type': 'sysmo'}] }
+s = suite.SuiteRun(trial, 'test_suite', s_def, [scenario])
+s.reserve_resources()
+print(repr(s.reserved_resources))
+results = s.run_tests('hello_world.py')
+print(report.suite_to_text(s))
+
+print('\n- graceful exit.')
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/suite_test/empty_dir/.unrelated_file b/selftest/suite_test/empty_dir/.unrelated_file
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/suite_test/empty_dir/.unrelated_file
diff --git a/selftest/suite_test/paths.conf b/selftest/suite_test/paths.conf
new file mode 100644
index 0000000..2b0a274
--- /dev/null
+++ b/selftest/suite_test/paths.conf
@@ -0,0 +1,2 @@
+state_dir: ./test_work/state_dir
+suites_dir: .
diff --git a/selftest/suite_test/resources.conf b/selftest/suite_test/resources.conf
new file mode 100644
index 0000000..bd121cb
--- /dev/null
+++ b/selftest/suite_test/resources.conf
@@ -0,0 +1,175 @@
+# all hardware and interfaces available to this osmo-gsm-tester
+
+ip_address:
+- addr: 10.42.42.1
+- addr: 10.42.42.2
+- addr: 10.42.42.3
+
+bts:
+- label: sysmoBTS 1002
+ type: sysmo
+ ipa_unit_id: 1
+ addr: 10.42.42.114
+ band: GSM-1800
+
+- label: octBTS 3000
+ type: oct
+ ipa_unit_id: 5
+ addr: 10.42.42.115
+ band: GSM-1800
+ trx_list:
+ - hw_addr: 00:0c:90:32:b5:8a
+
+- label: nanoBTS 1900
+ type: nanobts
+ ipa_unit_id: 1902
+ addr: 10.42.42.190
+ band: GSM-1900
+ trx_list:
+ - hw_addr: 00:02:95:00:41:b3
+
+- label: Ettus B200
+ type: osmo-bts-trx
+ ipa_unit_id: 6
+ addr: 10.42.42.52
+ band: GSM-1800
+ osmo_trx:
+ launch_trx: true
+ clock_reference: external
+ trx_list:
+ - nominal_power: 10
+ max_power_red: 2
+ - nominal_power: 12
+
+- label: sysmoCell 5000
+ type: osmo-bts-trx
+ ipa_unit_id: 7
+ addr: 10.42.42.53
+ band: GSM-1800
+ osmo_trx:
+ launch_trx: false
+ clock_reference: external
+ trx_ip: 10.42.42.112
+ trx_list:
+ - nominal_power: 10
+ max_power_red: 3
+ - nominal_power: 12
+ max_power_red: 0
+
+- label: sysmoCell 5000
+ type: osmo-bts-trx
+ ipa_unit_id: 7
+ addr: 10.42.42.53
+ band: GSM-1800
+ osmo_trx:
+ launch_trx: false
+ clock_reference: external
+ trx_ip: 10.42.42.112
+ trx_list:
+ - nominal_power: 10
+ - nominal_power: 12
+ max_power_red: 1
+
+arfcn:
+ - arfcn: 512
+ band: GSM-1800
+ - arfcn: 514
+ band: GSM-1800
+ - arfcn: 516
+ band: GSM-1800
+ - arfcn: 518
+ band: GSM-1800
+ - arfcn: 520
+ band: GSM-1800
+
+ - arfcn: 540
+ band: GSM-1900
+ - arfcn: 542
+ band: GSM-1900
+ - arfcn: 544
+ band: GSM-1900
+ - arfcn: 546
+ band: GSM-1900
+ - arfcn: 548
+ band: GSM-1900
+
+modem:
+- label: m7801
+ path: '/wavecom_0'
+ imsi: 901700000007801
+ ki: D620F48487B1B782DA55DF6717F08FF9
+
+- label: m7802
+ path: '/wavecom_1'
+ imsi: 901700000007802
+ ki: 47FDB2D55CE6A10A85ABDAD034A5B7B3
+
+- label: m7803
+ path: '/wavecom_2'
+ imsi: 901700000007803
+ ki: ABBED4C91417DF710F60675B6EE2C8D2
+
+- label: m7804
+ path: '/wavecom_3'
+ imsi: 901700000007804
+ ki: 8BA541179156F2BF0918CA3CFF9351B0
+
+- label: m7805
+ path: '/wavecom_4'
+ imsi: 901700000007805
+ ki: 82BEC24B5B50C9FAA69D17DEC0883A23
+
+- label: m7806
+ path: '/wavecom_5'
+ imsi: 901700000007806
+ ki: DAF6BD6A188F7A4F09866030BF0F723D
+
+- label: m7807
+ path: '/wavecom_6'
+ imsi: 901700000007807
+ ki: AEB411CFE39681A6352A1EAE4DDC9DBA
+
+- label: m7808
+ path: '/wavecom_7'
+ imsi: 901700000007808
+ ki: F5DEF8692B305D7A65C677CA9EEE09C4
+
+- label: m7809
+ path: '/wavecom_8'
+ imsi: 901700000007809
+ ki: A644F4503E812FD75329B1C8D625DA44
+
+- label: m7810
+ path: '/wavecom_9'
+ imsi: 901700000007810
+ ki: EF663BDF3477DCD18D3D2293A2BAED67
+
+- label: m7811
+ path: '/wavecom_10'
+ imsi: 901700000007811
+ ki: E88F37F048A86A9BC4D652539228C039
+
+- label: m7812
+ path: '/wavecom_11'
+ imsi: 901700000007812
+ ki: E8D940DD66FCF6F1CD2C0F8F8C45633D
+
+- label: m7813
+ path: '/wavecom_12'
+ imsi: 901700000007813
+ ki: DBF534700C10141C49F699B0419107E3
+
+- label: m7814
+ path: '/wavecom_13'
+ imsi: 901700000007814
+ ki: B36021DEB90C4EA607E408A92F3B024D
+
+- label: m7815
+ path: '/wavecom_14'
+ imsi: 901700000007815
+ ki: 1E209F6F839F9195778C4F96BE281A24
+
+- label: m7816
+ path: '/wavecom_15'
+ imsi: 901700000007816
+ ki: BF827D219E739DD189F6F59E60D6455C
diff --git a/selftest/suite_test/test_suite/hello_world.py b/selftest/suite_test/test_suite/hello_world.py
new file mode 100644
index 0000000..073d07f
--- /dev/null
+++ b/selftest/suite_test/test_suite/hello_world.py
@@ -0,0 +1,5 @@
+from osmo_gsm_tester.testenv import *
+
+print('hello world')
+print('I am %r / %r' % (suite.name(), test.name()))
+print('one\ntwo\nthree')
diff --git a/selftest/suite_test/test_suite/mo_mt_sms.py b/selftest/suite_test/test_suite/mo_mt_sms.py
new file mode 100644
index 0000000..c9558d8
--- /dev/null
+++ b/selftest/suite_test/test_suite/mo_mt_sms.py
@@ -0,0 +1,18 @@
+ip_address = resources.ip_address()
+nitb = resources.nitb()
+bts = resources.bts()
+ms_mo = resources.modem()
+ms_mt = resources.modem()
+
+nitb.start(ip_address)
+bts.start(nitb)
+
+nitb.add_subscriber(ms_mo, resources.msisdn())
+nitb.add_subscriber(ms_mt, resources.msisdn())
+
+ms_mo.start()
+ms_mt.start()
+wait(nitb.subscriber_attached, ms_mo, ms_mt)
+
+sms = ms_mo.sms_send(ms_mt)
+wait(nitb.sms_received, sms)
diff --git a/selftest/suite_test/test_suite/mo_sms.py b/selftest/suite_test/test_suite/mo_sms.py
new file mode 100644
index 0000000..768a99c
--- /dev/null
+++ b/selftest/suite_test/test_suite/mo_sms.py
@@ -0,0 +1,20 @@
+ip_address = resources.ip_address()
+nitb = resources.nitb()
+bts = resources.bts()
+ms_ext = resources.msisdn()
+fake_ext = resources.msisdn()
+ms = resources.modem()
+
+nitb.configure(ip_address, bts)
+bts.configure(nitb)
+
+nitb.start()
+bts.start()
+
+nitb.add_fake_ext(fake_ext)
+nitb.add_subscriber(ms, ms_ext)
+
+ms.start()
+wait(nitb.subscriber_attached, ms)
+sms = ms.sms_send(fake_ext)
+wait(nitb.sms_received, sms)
diff --git a/selftest/suite_test/test_suite/suite.conf b/selftest/suite_test/test_suite/suite.conf
new file mode 100644
index 0000000..925dedb
--- /dev/null
+++ b/selftest/suite_test/test_suite/suite.conf
@@ -0,0 +1,13 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 2
+ label: sysmoCell 5000
+ - times: 1
+ type: sysmo
+ modem:
+ - times: 2
+
+defaults:
+ timeout: 60s
diff --git a/selftest/suite_test/test_suite/test_error.py b/selftest/suite_test/test_suite/test_error.py
new file mode 100755
index 0000000..c0583ff
--- /dev/null
+++ b/selftest/suite_test/test_suite/test_error.py
@@ -0,0 +1,5 @@
+from osmo_gsm_tester.testenv import *
+
+print('I am %r / %r' % (suite.name(), test.name()))
+
+assert False
diff --git a/selftest/suite_test/test_suite/test_fail.py b/selftest/suite_test/test_suite/test_fail.py
new file mode 100755
index 0000000..cbaeded
--- /dev/null
+++ b/selftest/suite_test/test_suite/test_fail.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+print('I am %r / %r' % (suite.name(), test.name()))
+
+test.set_fail('EpicFail', 'This failure is expected')
diff --git a/selftest/suite_test/test_suite/test_fail_raise.py b/selftest/suite_test/test_suite/test_fail_raise.py
new file mode 100755
index 0000000..4e5eddb
--- /dev/null
+++ b/selftest/suite_test/test_suite/test_fail_raise.py
@@ -0,0 +1,3 @@
+class ExpectedFail(Exception):
+ pass
+raise ExpectedFail('This failure is expected')
diff --git a/selftest/template_test.err b/selftest/template_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/template_test.err
diff --git a/selftest/template_test.ok b/selftest/template_test.ok
new file mode 100644
index 0000000..88b77b0
--- /dev/null
+++ b/selftest/template_test.ok
@@ -0,0 +1,193 @@
+- Testing: fill a config file with values
+cnf Templates: DBG: rendering osmo-nitb.cfg.tmpl
+! Configuration rendered by osmo-gsm-tester
+password foo
+!
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+!
+line vty
+ no login
+ bind val_ip_address
+!
+e1_input
+ e1_line 0 driver ipa
+ ipa bind val_ip_address
+network
+ network country code val_mcc
+ mobile network code val_mnc
+ short name val_short_name
+ long name val_long_name
+ auth policy val_auth_policy
+ location updating reject cause 13
+ encryption val_encryption
+ neci 1
+ rrlp mode none
+ mm info 1
+ handover 0
+ handover window rxlev averaging 10
+ handover window rxqual averaging 1
+ handover window rxlev neighbor averaging 10
+ handover power budget interval 6
+ handover power budget hysteresis 3
+ handover maximum distance 9999
+ bts 0
+ type val_type_bts0
+ band val_band_bts0
+ cell_identity val_bts.cell_identity_bts0
+ location_area_code val_bts.location_area_code_bts0
+ training_sequence_code 7
+ base_station_id_code val_bts.base_station_id_code_bts0
+ ms max power 33
+ cell reselection hysteresis 4
+ rxlev access min 0
+ channel allocator ascending
+ rach tx integer 9
+ rach max transmission 7
+ ip.access unit_id val_bts.unit_id_bts0 0
+ oml ip.access stream_id val_bts.stream_id_bts0 line 0
+ gprs mode gprs
+ gprs routing area val_bts.routing_area_code_bts0
+ gprs network-control-order nc1
+ gprs cell bvci val_bts.bvci_bts0
+ gprs cell timer blocking-timer 3
+ gprs cell timer blocking-retries 3
+ gprs cell timer unblocking-retries 3
+ gprs cell timer reset-timer 3
+ gprs cell timer reset-retries 3
+ gprs cell timer suspend-timer 10
+ gprs cell timer suspend-retries 3
+ gprs cell timer resume-timer 10
+ gprs cell timer resume-retries 3
+ gprs cell timer capability-update-timer 10
+ gprs cell timer capability-update-retries 3
+ gprs nsei val_bts.bvci_bts0
+ gprs ns timer tns-block 3
+ gprs ns timer tns-block-retries 3
+ gprs ns timer tns-reset 3
+ gprs ns timer tns-reset-retries 3
+ gprs ns timer tns-test 30
+ gprs ns timer tns-alive 3
+ gprs ns timer tns-alive-retries 10
+ gprs nsvc 0 nsvci val_bts.bvci_bts0
+ gprs nsvc 0 local udp port 23020
+ gprs nsvc 0 remote udp port 23000
+ gprs nsvc 0 remote ip val_bts.sgsn_ip_addr_bts0
+ trx 0
+ rf_locked 0
+ arfcn val_trx_arfcn_trx0
+ nominal power val_trx_nominal_power_trx0
+ max_power_red val_trx_max_power_red_trx0
+ rsl e1 tei 0
+ timeslot 0
+ phys_chan_config val_phys_chan_config_0
+ timeslot 1
+ phys_chan_config val_phys_chan_config_1
+ timeslot 2
+ phys_chan_config val_phys_chan_config_2
+ timeslot 3
+ phys_chan_config val_phys_chan_config_3
+ trx 1
+ rf_locked 0
+ arfcn val_trx_arfcn_trx1
+ nominal power val_trx_nominal_power_trx1
+ max_power_red val_trx_max_power_red_trx1
+ rsl e1 tei 0
+ timeslot 0
+ phys_chan_config val_phys_chan_config_0
+ timeslot 1
+ phys_chan_config val_phys_chan_config_1
+ timeslot 2
+ phys_chan_config val_phys_chan_config_2
+ timeslot 3
+ phys_chan_config val_phys_chan_config_3
+ bts 1
+ type val_type_bts1
+ band val_band_bts1
+ cell_identity val_bts.cell_identity_bts1
+ location_area_code val_bts.location_area_code_bts1
+ training_sequence_code 7
+ base_station_id_code val_bts.base_station_id_code_bts1
+ ms max power 33
+ cell reselection hysteresis 4
+ rxlev access min 0
+ channel allocator ascending
+ rach tx integer 9
+ rach max transmission 7
+ ip.access unit_id val_bts.unit_id_bts1 0
+ oml ip.access stream_id val_bts.stream_id_bts1 line 0
+ gprs mode gprs
+ gprs routing area val_bts.routing_area_code_bts1
+ gprs network-control-order nc1
+ gprs cell bvci val_bts.bvci_bts1
+ gprs cell timer blocking-timer 3
+ gprs cell timer blocking-retries 3
+ gprs cell timer unblocking-retries 3
+ gprs cell timer reset-timer 3
+ gprs cell timer reset-retries 3
+ gprs cell timer suspend-timer 10
+ gprs cell timer suspend-retries 3
+ gprs cell timer resume-timer 10
+ gprs cell timer resume-retries 3
+ gprs cell timer capability-update-timer 10
+ gprs cell timer capability-update-retries 3
+ gprs nsei val_bts.bvci_bts1
+ gprs ns timer tns-block 3
+ gprs ns timer tns-block-retries 3
+ gprs ns timer tns-reset 3
+ gprs ns timer tns-reset-retries 3
+ gprs ns timer tns-test 30
+ gprs ns timer tns-alive 3
+ gprs ns timer tns-alive-retries 10
+ gprs nsvc 0 nsvci val_bts.bvci_bts1
+ gprs nsvc 0 local udp port 23020
+ gprs nsvc 0 remote udp port 23000
+ gprs nsvc 0 remote ip val_bts.sgsn_ip_addr_bts1
+ trx 0
+ rf_locked 0
+ arfcn val_trx_arfcn_trx0
+ nominal power val_trx_nominal_power_trx0
+ max_power_red val_trx_max_power_red_trx0
+ rsl e1 tei 0
+ timeslot 0
+ phys_chan_config val_phys_chan_config_0
+ timeslot 1
+ phys_chan_config val_phys_chan_config_1
+ timeslot 2
+ phys_chan_config val_phys_chan_config_2
+ timeslot 3
+ phys_chan_config val_phys_chan_config_3
+ trx 1
+ rf_locked 0
+ arfcn val_trx_arfcn_trx1
+ nominal power val_trx_nominal_power_trx1
+ max_power_red val_trx_max_power_red_trx1
+ rsl e1 tei 0
+ timeslot 0
+ phys_chan_config val_phys_chan_config_0
+ timeslot 1
+ phys_chan_config val_phys_chan_config_1
+ timeslot 2
+ phys_chan_config val_phys_chan_config_2
+ timeslot 3
+ phys_chan_config val_phys_chan_config_3
+smpp
+ local-tcp-ip val_ip_address 2775
+ system-id test-nitb
+ policy val_smsc_policy
+ esme val_system_id_esme0
+ password val_password_esme0
+ default-route
+ esme val_system_id_esme1
+ no password
+ default-route
+ctrl
+ bind val_ip_address
+
+- Testing: expect to fail on invalid templates dir
+sucess: setting non-existing templates dir raised RuntimeError
+
diff --git a/selftest/template_test.py b/selftest/template_test.py
new file mode 100755
index 0000000..f4f1bd5
--- /dev/null
+++ b/selftest/template_test.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+
+import _prep
+
+import sys
+import os
+
+from osmo_gsm_tester import template, log
+
+log.set_level(log.C_CNF, log.L_DBG)
+
+print('- Testing: fill a config file with values')
+
+mock_timeslot_list=(
+ { 'phys_chan_config': 'val_phys_chan_config_0' },
+ { 'phys_chan_config': 'val_phys_chan_config_1' },
+ { 'phys_chan_config': 'val_phys_chan_config_2' },
+ { 'phys_chan_config': 'val_phys_chan_config_3' },
+ )
+
+mock_bts = {
+ 'osmobsc_bts_type': 'val_type',
+ 'band': 'val_band',
+ 'location_area_code': 'val_bts.location_area_code',
+ 'routing_area_code': 'val_bts.routing_area_code',
+ 'cell_identity': 'val_bts.cell_identity',
+ 'bvci': 'val_bts.bvci',
+ 'base_station_id_code': 'val_bts.base_station_id_code',
+ 'ipa_unit_id': 'val_bts.unit_id',
+ 'stream_id': 'val_bts.stream_id',
+ 'sgsn': (dict(ip_address=dict(addr='val_bts.sgsn_ip_addr'))),
+ 'trx_list': (
+ dict(arfcn='val_trx_arfcn_trx0',
+ nominal_power='val_trx_nominal_power_trx0',
+ max_power_red='val_trx_max_power_red_trx0',
+ timeslot_list=mock_timeslot_list),
+ dict(arfcn='val_trx_arfcn_trx1',
+ nominal_power='val_trx_nominal_power_trx1',
+ max_power_red='val_trx_max_power_red_trx1',
+ timeslot_list=mock_timeslot_list),
+ )
+}
+
+mock_esme = {
+ 'system_id': 'val_system_id',
+ 'password': 'val_password'
+}
+
+def clone_mod(d, val_ext):
+ c = dict(d)
+ for name in c.keys():
+ if isinstance(c[name], str):
+ c[name] = c[name] + val_ext
+ elif isinstance(c[name], dict):
+ c[name] = clone_mod(c[name], val_ext)
+ return c
+
+mock_bts0 = clone_mod(mock_bts, '_bts0')
+mock_bts1 = clone_mod(mock_bts, '_bts1')
+
+mock_esme0 = clone_mod(mock_esme, '_esme0')
+mock_esme1 = clone_mod(mock_esme, '_esme1')
+mock_esme1['password'] = ''
+
+vals = dict(nitb=dict(
+ net=dict(
+ mcc='val_mcc',
+ mnc='val_mnc',
+ short_name='val_short_name',
+ long_name='val_long_name',
+ auth_policy='val_auth_policy',
+ encryption='val_encryption',
+ bts_list=(mock_bts0, mock_bts1)
+ ),
+ ip_address=dict(addr='val_ip_address'),
+ ),
+ smsc=dict(
+ policy='val_smsc_policy',
+ esme_list=(mock_esme0, mock_esme1)
+ ),
+ )
+
+print(template.render('osmo-nitb.cfg', vals))
+
+print('- Testing: expect to fail on invalid templates dir')
+try:
+ template.set_templates_dir('non-existing dir')
+ sys.stderr.write('Error: setting non-existing templates dir should raise RuntimeError\n')
+ assert(False)
+except RuntimeError:
+ # not logging exception to omit non-constant path name from expected output
+ print('sucess: setting non-existing templates dir raised RuntimeError\n')
+ pass
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/template_test/osmo-nitb.cfg.tmpl b/selftest/template_test/osmo-nitb.cfg.tmpl
new file mode 100644
index 0000000..c0a5c46
--- /dev/null
+++ b/selftest/template_test/osmo-nitb.cfg.tmpl
@@ -0,0 +1,82 @@
+!
+! OpenBSC configuration saved from vty
+!
+password foo
+!
+log stderr
+ logging filter all 1
+ logging color 0
+ logging print category 0
+ logging print extended-timestamp 1
+ logging level set-all debug
+!
+line vty
+ no login
+ bind ${vty_bind_ip}
+!
+e1_input
+ e1_line 0 driver ipa
+ ipa bind ${abis_bind_ip}
+network
+ network country code ${mcc}
+ mobile network code ${mnc}
+ short name ${net_name_short}
+ long name ${net_name_long}
+ auth policy ${net_auth_policy}
+ location updating reject cause 13
+ encryption ${encryption}
+ neci 1
+ rrlp mode none
+ mm info 1
+ handover 0
+ handover window rxlev averaging 10
+ handover window rxqual averaging 1
+ handover window rxlev neighbor averaging 10
+ handover power budget interval 6
+ handover power budget hysteresis 3
+ handover maximum distance 9999
+smpp
+ local-tcp-ip ${nitb.ip_address.addr} 2775
+ system-id test-nitb
+ policy ${smsc.policy}
+ %for esme in esme_list:
+ esme ${esme.system_id}
+ % if esme.password == '':
+ no password
+ % else:
+ password ${esme.password}
+ % endif
+ default-route
+ %endfor
+ctrl
+ bind ${ctrl_bind_ip}
+%for bts in bts_list:
+ bts ${loop.index}
+ type ${bts.type}
+ band ${bts.band}
+ cell_identity ${bts.cell_identity}
+ location_area_code ${bts.location_area_code}
+ training_sequence_code 7
+ base_station_id_code ${bts.base_station_id_code}
+ ms max power 15
+ cell reselection hysteresis 4
+ rxlev access min 0
+ channel allocator ascending
+ rach tx integer 9
+ rach max transmission 7
+ ip.access unit_id ${bts.unit_id} 0
+ oml ip.access stream_id ${bts.stream_id} line 0
+ gprs mode none
+% for trx in bts.trx_list:
+ trx ${loop.index}
+ rf_locked 0
+ arfcn ${trx.arfcn}
+ nominal power 23
+ max_power_red ${trx.max_power_red}
+ rsl e1 tei 0
+% for ts in trx.timeslot_list:
+ timeslot ${loop.index}
+ phys_chan_config ${ts.phys_chan_config}
+% endfor
+% endfor
+%endfor
diff --git a/selftest/trial_test.err b/selftest/trial_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/trial_test.err
diff --git a/selftest/trial_test.ok b/selftest/trial_test.ok
new file mode 100644
index 0000000..6ad39a9
--- /dev/null
+++ b/selftest/trial_test.ok
@@ -0,0 +1,16 @@
+- make a few trials dirs
+[TMP]/first
+[TMP]/second
+[TMP]/third
+- fetch trial dirs in order
+first
+['taken']
+second
+third
+- no more trial dirs left
+None
+- test checksum verification
+- detect wrong checksum
+ok, got RuntimeError("Checksum mismatch for '[PATH]/trial_test/invalid_checksum/file2' vs. '[PATH]/trial_test/invalid_checksum/checksums.md5' line 2",)
+- detect missing file
+ok, got RuntimeError("File listed in checksums file but missing in trials dir: '[PATH]/trial_test/missing_file/file2' vs. '[PATH]/trial_test/missing_file/checksums.md5' line 2",)
diff --git a/selftest/trial_test.ok.ign b/selftest/trial_test.ok.ign
new file mode 100644
index 0000000..1a969de
--- /dev/null
+++ b/selftest/trial_test.ok.ign
@@ -0,0 +1,3 @@
+/tmp/[^/]* [TMP]
+....-..-.._..-..-.. [TIMESTAMP]
+'[^']*/trial_test '[PATH]/trial_test
diff --git a/selftest/trial_test.py b/selftest/trial_test.py
new file mode 100755
index 0000000..ba3f01b
--- /dev/null
+++ b/selftest/trial_test.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+
+import time
+import _prep
+import os
+from osmo_gsm_tester import util
+from osmo_gsm_tester.trial import Trial
+
+workdir = util.get_tempdir()
+
+trials_dir = util.Dir(workdir)
+
+print('- make a few trials dirs')
+print(trials_dir.mkdir('first'))
+time.sleep(1)
+print(trials_dir.mkdir('second'))
+time.sleep(1)
+print(trials_dir.mkdir('third'))
+
+print('- fetch trial dirs in order')
+t = Trial.next(trials_dir)
+print(t)
+print(repr(sorted(t.dir.children())))
+print(Trial.next(trials_dir))
+print(Trial.next(trials_dir))
+
+print('- no more trial dirs left')
+print(repr(Trial.next(trials_dir)))
+
+print('- test checksum verification')
+d = util.Dir('trial_test')
+t = Trial(d.child('valid_checksums'))
+t.verify()
+
+print('- detect wrong checksum')
+t = Trial(d.child('invalid_checksum'))
+try:
+ t.verify()
+except RuntimeError as e:
+ print('ok, got %r' % e)
+
+print('- detect missing file')
+t = Trial(d.child('missing_file'))
+try:
+ t.verify()
+except RuntimeError as e:
+ print('ok, got %r' % e)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/selftest/trial_test/invalid_checksum/checksums.md5 b/selftest/trial_test/invalid_checksum/checksums.md5
new file mode 100644
index 0000000..90d3547
--- /dev/null
+++ b/selftest/trial_test/invalid_checksum/checksums.md5
@@ -0,0 +1,3 @@
+5149d403009a139c7e085405ef762e1a file1
+3d709e89c8ce201e3c928eb917989aef file2
+60b91f1875424d3b4322b0fdd0529d5d file3
diff --git a/selftest/trial_test/invalid_checksum/file1 b/selftest/trial_test/invalid_checksum/file1
new file mode 100644
index 0000000..e212970
--- /dev/null
+++ b/selftest/trial_test/invalid_checksum/file1
@@ -0,0 +1 @@
+file1
diff --git a/selftest/trial_test/invalid_checksum/file2 b/selftest/trial_test/invalid_checksum/file2
new file mode 100644
index 0000000..34ccdac
--- /dev/null
+++ b/selftest/trial_test/invalid_checksum/file2
@@ -0,0 +1 @@
+no no no
diff --git a/selftest/trial_test/invalid_checksum/file3 b/selftest/trial_test/invalid_checksum/file3
new file mode 100644
index 0000000..7c8ac2f
--- /dev/null
+++ b/selftest/trial_test/invalid_checksum/file3
@@ -0,0 +1 @@
+file3
diff --git a/selftest/trial_test/missing_file/checksums.md5 b/selftest/trial_test/missing_file/checksums.md5
new file mode 100644
index 0000000..90d3547
--- /dev/null
+++ b/selftest/trial_test/missing_file/checksums.md5
@@ -0,0 +1,3 @@
+5149d403009a139c7e085405ef762e1a file1
+3d709e89c8ce201e3c928eb917989aef file2
+60b91f1875424d3b4322b0fdd0529d5d file3
diff --git a/selftest/trial_test/missing_file/file1 b/selftest/trial_test/missing_file/file1
new file mode 100644
index 0000000..e212970
--- /dev/null
+++ b/selftest/trial_test/missing_file/file1
@@ -0,0 +1 @@
+file1
diff --git a/selftest/trial_test/missing_file/file3 b/selftest/trial_test/missing_file/file3
new file mode 100644
index 0000000..7c8ac2f
--- /dev/null
+++ b/selftest/trial_test/missing_file/file3
@@ -0,0 +1 @@
+file3
diff --git a/selftest/trial_test/valid_checksums/checksums.md5 b/selftest/trial_test/valid_checksums/checksums.md5
new file mode 100644
index 0000000..90d3547
--- /dev/null
+++ b/selftest/trial_test/valid_checksums/checksums.md5
@@ -0,0 +1,3 @@
+5149d403009a139c7e085405ef762e1a file1
+3d709e89c8ce201e3c928eb917989aef file2
+60b91f1875424d3b4322b0fdd0529d5d file3
diff --git a/selftest/trial_test/valid_checksums/file1 b/selftest/trial_test/valid_checksums/file1
new file mode 100644
index 0000000..e212970
--- /dev/null
+++ b/selftest/trial_test/valid_checksums/file1
@@ -0,0 +1 @@
+file1
diff --git a/selftest/trial_test/valid_checksums/file2 b/selftest/trial_test/valid_checksums/file2
new file mode 100644
index 0000000..6c493ff
--- /dev/null
+++ b/selftest/trial_test/valid_checksums/file2
@@ -0,0 +1 @@
+file2
diff --git a/selftest/trial_test/valid_checksums/file3 b/selftest/trial_test/valid_checksums/file3
new file mode 100644
index 0000000..7c8ac2f
--- /dev/null
+++ b/selftest/trial_test/valid_checksums/file3
@@ -0,0 +1 @@
+file3
diff --git a/selftest/util_test.err b/selftest/util_test.err
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/selftest/util_test.err
diff --git a/selftest/util_test.ok b/selftest/util_test.ok
new file mode 100644
index 0000000..c2c5f87
--- /dev/null
+++ b/selftest/util_test.ok
@@ -0,0 +1,5 @@
+- expect the same hashes on every test run
+a9993e364706816aba3e25717850c26c9cd0d89d
+356a192b7913b04c54574d18c28d46e6395428ab
+40bd001563085fc35165329ea1ff5c5ecbdbbeef
+c129b324aee662b04eccf68babba85851346dff9
diff --git a/selftest/util_test.py b/selftest/util_test.py
new file mode 100755
index 0000000..c517655
--- /dev/null
+++ b/selftest/util_test.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+import _prep
+
+from osmo_gsm_tester.util import hash_obj
+
+print('- expect the same hashes on every test run')
+print(hash_obj('abc'))
+print(hash_obj(1))
+print(hash_obj([1, 2, 3]))
+print(hash_obj({ 'k': [ {'a': 1, 'b': 2}, {'a': 3, 'b': 4}, ],
+ 'i': [ {'c': 1, 'd': 2}, {'c': 3, 'd': 4}, ] }))
+
diff --git a/src/osmo-gsm-tester.py b/src/osmo-gsm-tester.py
new file mode 100755
index 0000000..18b852b
--- /dev/null
+++ b/src/osmo-gsm-tester.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python3
+
+# osmo_gsm_tester: invoke a single test run
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+'''osmo_gsm_tester: invoke a single test run.
+
+Examples:
+
+./run_once.py ~/my_trial_package/ -s osmo_trx
+./run_once.py ~/my_trial_package/ -c sms_tests:dyn_ts+eu_band+bts_sysmo
+./run_once.py ~/my_trial_package/ -c sms_tests/mo_mt_sms:bts_trx
+
+(The names for test suite, scenario and series names used in these examples
+must be defined by the osmo-gsm-tester configuration.)
+
+A trial package contains binaries (usually built by a jenkins job) of GSM
+software, including the core network programs as well as binaries for the
+various BTS models.
+
+A test suite defines specific actions to be taken and verifies their outcome.
+Such a test suite may leave certain aspects of a setup undefined, e.g. it may
+be BTS model agnostic or does not care which voice codecs are chosen.
+
+A test scenario completes the picture in that it defines which specific choices
+shall be made to run a test suite. Any one test suite may thus run on any
+number of different scenarios, e.g. to test various voice codecs.
+
+Test scenarios may be combined. For example, one scenario may define a timeslot
+configuration to use, while another scenario may define the voice codec
+configuration.
+
+There may still be aspects that are neither required by a test suite nor
+strictly defined by a scenario, which will be resolved automatically, e.g. by
+choosing the first available item that matches the other constraints.
+
+A test run thus needs to define: a trial package containing built binaries, a
+combination of scenarios to run a suite in, and a test suite to launch in the
+given scenario with the given binaries.
+
+The osmo-gsm-tester configuration may define one or more series as a number of
+suite:scenario combinations. So instead of a specific suite:scenario
+combination, the name of such a series can be passed.
+
+If neither a combination or series is specified, the default series will be run
+as defined in the osmo-gsm-tester configuration.
+
+The scenarios and suites run for a given trial will be recorded in a trial
+package's directory: Upon launch, a 'test_package/run.<date>' directory will be
+created, which will collect logs and reports.
+'''
+
+import sys
+import argparse
+from signal import *
+from osmo_gsm_tester import __version__
+from osmo_gsm_tester import trial, suite, log, config
+
+def sig_handler_cleanup(signum, frame):
+ print("killed by signal %d" % signum)
+ # This sys.exit() will raise a SystemExit base exception at the current
+ # point of execution. Code must be prepared to clean system-wide resources
+ # by using the "finally" section. This allows at the end 'atexit' hooks to
+ # be called before exiting.
+ sys.exit(1)
+
+def main():
+
+ for sig in (SIGINT, SIGTERM, SIGQUIT, SIGPIPE, SIGHUP):
+ signal(sig, sig_handler_cleanup)
+
+ parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawTextHelpFormatter)
+ # Note: since we're using RawTextHelpFormatter to keep nicely separate
+ # paragraphs in the long help text, we unfortunately also need to take care
+ # of line wraps in the shorter cmdline options help.
+ # The line width here is what remains of screen width after the list of
+ # options placed by ArgumentParser. That's unfortunately subject to change
+ # and undefined, so when things change, just run a local
+ # ./osmo-gsm-tester.py --help and try to keep everything in 80 chars width.
+ # The help text is indented automatically, but line width is manual.
+ # Using multi-line strings here -- doesn't look nice in the python flow but
+ # is easiest to maintain.
+ parser.add_argument('-V', '--version', action='store_true',
+ help='Show version')
+ parser.add_argument('trial_package',
+ help='Directory containing binaries to test')
+ parser.add_argument('-s', '--suite-scenario', dest='suite_scenario', action='append',
+ help='''A suite-scenarios combination
+like suite:scenario+scenario''')
+ parser.add_argument('-S', '--series', dest='series', action='append',
+ help='''A series of suite-scenarios combinations
+as defined in the osmo-gsm-tester configuration''')
+ parser.add_argument('-t', '--test', dest='test', action='append',
+ help='''Run only tests matching this name.
+Any test name that contains the given string is run.
+To get an exact match, prepend a "=" like
+"-t =my_exact_name". The ".py" suffix is always
+optional.''')
+ parser.add_argument('-l', '--log-level', dest='log_level', choices=log.LEVEL_STRS.keys(),
+ default=None,
+ help='Set logging level for all categories (on stdout)')
+ parser.add_argument('-T', '--traceback', dest='trace', action='store_true',
+ help='Enable stdout logging of tracebacks')
+ parser.add_argument('-R', '--source', dest='source', action='store_true',
+ help='Enable stdout logging of source file')
+ parser.add_argument('-c', '--conf-dir', dest='conf_dir',
+ help='''Specify configuration dir (overrides
+OSMO_GSM_TESTER_CONF env and default locations)''')
+ args = parser.parse_args()
+
+ if args.version:
+ print(__version__)
+ exit(0)
+
+ print('combinations:', repr(args.suite_scenario))
+ print('series:', repr(args.series))
+ print('trial:', repr(args.trial_package))
+ print('tests:', repr(args.test))
+
+ # create a default log to stdout
+ log.LogTarget().style(all_origins_on_levels=(log.L_ERR, log.L_TRACEBACK), src=False)
+
+ if args.log_level:
+ log.set_all_levels(log.LEVEL_STRS.get(args.log_level))
+ if args.trace:
+ log.style_change(trace=True)
+ if args.source:
+ log.style_change(src=True)
+ if args.conf_dir:
+ config.override_conf = args.conf_dir
+
+ combination_strs = list(args.suite_scenario or [])
+ # for series in args.series:
+ # combination_strs.extend(config.get_series(series))
+
+ if not combination_strs:
+ combination_strs = config.read_config_file(config.DEFAULT_SUITES_CONF, if_missing_return=[])
+
+ if combination_strs:
+ print('Running default suites:\n ' + ('\n '.join(combination_strs)))
+ else:
+ print('No default suites configured (%r)' % config.DEFAULT_SUITES_CONF)
+
+
+ if not combination_strs:
+ raise RuntimeError('Need at least one suite:scenario or series to run')
+
+ # make sure all suite:scenarios exist
+ suite_scenarios = []
+ for combination_str in combination_strs:
+ suite_scenarios.append(suite.load_suite_scenario_str(combination_str))
+
+ # pick tests and make sure they exist
+ test_names = []
+ for test_name in (args.test or []):
+ found = False
+ if test_name.startswith('=') and not test_name.endswith('.py'):
+ test_name = test_name + '.py'
+ for suite_scenario_str, suite_def, scenarios in suite_scenarios:
+ for def_test_name in suite_def.test_basenames:
+ if test_name.startswith('='):
+ match = test_name[1:] == def_test_name
+ else:
+ match = test_name in def_test_name
+ if match:
+ found = True
+ test_names.append(def_test_name)
+ if not found:
+ raise RuntimeError('No test found for %r' % test_name)
+ if test_names:
+ test_names = sorted(set(test_names))
+ print(repr(test_names))
+
+ with trial.Trial(args.trial_package) as current_trial:
+ current_trial.verify()
+ for suite_scenario_str, suite_def, scenarios in suite_scenarios:
+ current_trial.add_suite_run(suite_scenario_str, suite_def, scenarios)
+ current_trial.run_suites(test_names)
+
+ if current_trial.status != trial.Trial.PASS:
+ return 1
+ return 0
+
+if __name__ == '__main__':
+ rc = 2
+ try:
+ rc = main()
+ except:
+ # Tell the log to show the exception, then terminate the program with the exception anyway.
+ # Since exceptions within test runs should be caught and evaluated, this is basically about
+ # exceptions during command line parsing and such, so it's appropriate to abort immediately.
+ log.log_exn()
+ raise
+ exit(rc)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/__init__.py b/src/osmo_gsm_tester/__init__.py
new file mode 100644
index 0000000..d3c1590
--- /dev/null
+++ b/src/osmo_gsm_tester/__init__.py
@@ -0,0 +1,29 @@
+# osmo_gsm_tester: automated cellular network hardware tests
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Authors: D. Lazlo Sitzer <dlsitzer@sysmocom.de>
+# Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+__version__ = 'UNKNOWN'
+
+try:
+ from ._version import _version
+ __version__ = _version
+except:
+ pass
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts.py b/src/osmo_gsm_tester/bts.py
new file mode 100644
index 0000000..6b0331e
--- /dev/null
+++ b/src/osmo_gsm_tester/bts.py
@@ -0,0 +1,196 @@
+# osmo_gsm_tester: base classes to share code among BTS subclasses.
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import copy
+from abc import ABCMeta, abstractmethod
+from . import log, config, schema
+
+class Bts(log.Origin, metaclass=ABCMeta):
+
+##############
+# PROTECTED
+##############
+ def __init__(self, suite_run, conf, name, defaults_cfg_name):
+ super().__init__(log.C_RUN, name)
+ self.bsc = None
+ self.sgsn = None
+ self.lac = None
+ self.rac = None
+ self.cellid = None
+ self.bvci = None
+ self._num_trx = 1
+ self._max_trx = None
+ self.overlay_trx_list = []
+ self.suite_run = suite_run
+ self.conf = conf
+ self.defaults_cfg_name = defaults_cfg_name
+ self._init_num_trx()
+
+ def _resolve_bts_cfg(self, cfg_name):
+ res = None
+ val = config.get_defaults('bsc_bts').get(cfg_name)
+ if val is not None:
+ res = val
+ val = config.get_defaults(self.defaults_cfg_name).get(cfg_name)
+ if val is not None:
+ res = val
+ val = self.conf.get(cfg_name)
+ if val is not None:
+ res = val
+ return res
+
+ def _init_num_trx(self):
+ self._num_trx = 1
+ self._max_trx = None
+ val = self._resolve_bts_cfg('num_trx')
+ if val is not None:
+ self._num_trx = int(val)
+ val = self._resolve_bts_cfg('max_trx')
+ if val is not None:
+ self._max_trx = int(val)
+ self._validate_new_num_trx(self._num_trx)
+ self.overlay_trx_list = [Bts._new_default_trx_cfg() for trx in range(self._num_trx)]
+
+ def _validate_new_num_trx(self, num_trx):
+ if self._max_trx is not None and num_trx > self._max_trx:
+ raise log.Error('Amount of TRX requested is too high for maximum allowed: %u > %u' %(num_trx, self._max_trx))
+
+ @staticmethod
+ def _new_default_trx_cfg():
+ return {'timeslot_list':[{} for ts in range(8)]}
+
+ @staticmethod
+ def _trx_list_recreate(trx_list, new_size):
+ curr_len = len(trx_list)
+ if new_size < curr_len:
+ trx_list = trx_list[0:new_size]
+ elif new_size > curr_len:
+ for i in range(new_size - curr_len):
+ trx_list.append(Bts._new_default_trx_cfg())
+ return trx_list
+
+ def conf_for_bsc_prepare(self):
+ values = config.get_defaults('bsc_bts')
+ # Make sure the trx_list is adapted to num of trx configured at runtime
+ # to avoid overlay issues.
+ trx_list = values.get('trx_list')
+ if trx_list and len(trx_list) != self.num_trx():
+ values['trx_list'] = Bts._trx_list_recreate(trx_list, self.num_trx())
+
+ bts_defaults = config.get_defaults(self.defaults_cfg_name)
+ trx_list = bts_defaults.get('trx_list')
+ if trx_list and len(trx_list) != self.num_trx():
+ bts_defaults['trx_list'] = Bts._trx_list_recreate(trx_list, self.num_trx())
+
+ config.overlay(values, bts_defaults)
+ if self.lac is not None:
+ config.overlay(values, { 'location_area_code': self.lac })
+ if self.rac is not None:
+ config.overlay(values, { 'routing_area_code': self.rac })
+ if self.cellid is not None:
+ config.overlay(values, { 'cell_identity': self.cellid })
+ if self.bvci is not None:
+ config.overlay(values, { 'bvci': self.bvci })
+
+ conf = copy.deepcopy(self.conf)
+ trx_list = conf.get('trx_list')
+ if trx_list and len(trx_list) != self.num_trx():
+ conf['trx_list'] = Bts._trx_list_recreate(trx_list, self.num_trx())
+ config.overlay(values, conf)
+
+ sgsn_conf = {} if self.sgsn is None else self.sgsn.conf_for_client()
+ config.overlay(values, sgsn_conf)
+
+ config.overlay(values, { 'trx_list': self.overlay_trx_list })
+ return values
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ @abstractmethod
+ def conf_for_bsc(self):
+ 'Used by bsc objects to get path to socket.'
+ pass
+
+ def remote_addr(self):
+ return self.conf.get('addr')
+
+ def cleanup(self):
+ 'Nothing to do by default. Subclass can override if required.'
+ pass
+
+###################
+# PUBLIC (test API included)
+###################
+ @abstractmethod
+ def start(self, keepalive=False):
+ '''Starts BTS. If keepalive is set, it will expect internal issues and
+ respawn related processes when detected'''
+ pass
+
+ @abstractmethod
+ def ready_for_pcu(self):
+ 'True if the BTS is prepared to have a PCU connected, false otherwise'
+ pass
+
+ @abstractmethod
+ def pcu(self):
+ 'Get the Pcu object associated with the BTS'
+ pass
+
+ def bts_type(self):
+ 'Get the type of BTS'
+ return self.conf.get('type')
+
+ def set_bsc(self, bsc):
+ self.bsc = bsc
+
+ def set_sgsn(self, sgsn):
+ self.sgsn = sgsn
+
+ def set_lac(self, lac):
+ self.lac = lac
+
+ def set_rac(self, rac):
+ self.rac = rac
+
+ def set_cellid(self, cellid):
+ self.cellid = cellid
+
+ def set_bvci(self, bvci):
+ self.bvci = bvci
+
+ def set_num_trx(self, num_trx):
+ assert num_trx > 0
+ self._validate_new_num_trx(num_trx)
+ if num_trx == self._num_trx:
+ return
+ self._num_trx = num_trx
+ self.overlay_trx_list = Bts._trx_list_recreate(self.overlay_trx_list, num_trx)
+
+ def num_trx(self):
+ return self._num_trx
+
+ def set_trx_phy_channel(self, trx_idx, ts_idx, config):
+ assert trx_idx < self._num_trx
+ assert ts_idx < 8
+ schema.phy_channel_config(config) # validation
+ self.overlay_trx_list[trx_idx]['timeslot_list'][ts_idx]['phys_chan_config'] = config
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts_nanobts.py b/src/osmo_gsm_tester/bts_nanobts.py
new file mode 100644
index 0000000..ab75b16
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_nanobts.py
@@ -0,0 +1,293 @@
+# osmo_gsm_tester: specifics for running an ip.access nanoBTS
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from . import log, config, util, process, pcap_recorder, bts, pcu
+from . import powersupply
+from .event_loop import MainLoop
+
+class NanoBts(bts.Bts):
+
+##############
+# PROTECTED
+##############
+ def __init__(self, suite_run, conf):
+ super().__init__(suite_run, conf, 'nanobts_%s' % conf.get('label', 'nolabel'), 'nanobts')
+ self.pwsup_list = []
+ self._pcu = None
+
+ def _configure(self):
+ if self.bsc is None:
+ raise log.Error('BTS needs to be added to a BSC or NITB before it can be configured')
+
+ for trx_i in range(self.num_trx()):
+ pwsup_opt = self.conf.get('trx_list')[trx_i].get('power_supply', {})
+ if not pwsup_opt:
+ raise log.Error('No power_supply attribute provided in conf for TRX %d!' % trx_i)
+ pwsup_type = pwsup_opt.get('type')
+ if not pwsup_type:
+ raise log.Error('No type attribute provided in power_supply conf for TRX %d!' % trx_i)
+ self.pwsup_list.append(powersupply.get_instance_by_type(pwsup_type, pwsup_opt))
+
+
+ def get_pcap_filter_all_trx_ip(self):
+ ret = "("
+ for trx_i in range(self.num_trx()):
+ if trx_i != 0:
+ ret = ret + " or "
+ bts_trx_ip = self.conf.get('trx_list')[trx_i].get('addr')
+ ret = ret + "host " + bts_trx_ip
+ ret = ret + ")"
+ return ret
+
+########################
+# PUBLIC - INTERNAL API
+########################
+
+ def conf_for_bsc(self):
+ values = self.conf_for_bsc_prepare()
+ # Hack until we have proper ARFCN resource allocation support (OS#2230)
+ band = values.get('band')
+ trx_list = values.get('trx_list')
+ if band == 'GSM-1900':
+ for trx_i in range(len(trx_list)):
+ config.overlay(trx_list[trx_i], { 'arfcn' : str(531 + trx_i * 2) })
+ elif band == 'GSM-900':
+ for trx_i in range(len(trx_list)):
+ config.overlay(trx_list[trx_i], { 'arfcn' : str(50 + trx_i * 2) })
+
+ config.overlay(values, { 'osmobsc_bts_type': 'nanobts' })
+
+ self.dbg(conf=values)
+ return values
+
+
+ def cleanup(self):
+ for pwsup in self.pwsup_list:
+ self.dbg('Powering off NanoBTS TRX')
+ pwsup.power_set(False)
+ self.pwsup_list = []
+
+###################
+# PUBLIC (test API included)
+###################
+
+ def start(self, keepalive=False):
+ if self.conf.get('ipa_unit_id') is None:
+ raise log.Error('No attribute %s provided in conf!' % attr)
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self._configure()
+
+ unitid = int(self.conf.get('ipa_unit_id'))
+
+ # Make sure all nanoBTS TRX are powered and in a clean state:
+ for pwsup in self.pwsup_list:
+ self.dbg('Powering cycling NanoBTS TRX')
+ pwsup.power_cycle(1.0)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ '%s and port not 22' % self.get_pcap_filter_all_trx_ip())
+
+
+ # TODO: If setting N TRX, we should set up them in parallel instead of waiting for each one.
+ for trx_i in range(self.num_trx()):
+ bts_trx_ip = self.conf.get('trx_list')[trx_i].get('addr')
+ # This fine for now, however concurrent tests using Nanobts may run into "address already in use" since dst is broadcast.
+ # Once concurrency is needed, a new config attr should be added to have an extra static IP assigned on the main-unit to each Nanobts resource.
+ local_bind_ip = util.dst_ip_get_local_bind(bts_trx_ip)
+
+ self.log('Finding nanobts %s, binding on %s...' % (bts_trx_ip, local_bind_ip))
+ ipfind = AbisIpFind(self.suite_run, self.run_dir, local_bind_ip, 'preconf')
+ ipfind.start()
+ ipfind.wait_bts_ready(bts_trx_ip)
+ running_unitid, running_trx = ipfind.get_unitid_by_ip(bts_trx_ip)
+ self.log('Found nanobts %s with unit_id %d trx %d' % (bts_trx_ip, running_unitid, running_trx))
+ ipfind.stop()
+
+ ipconfig = IpAccessConfig(self.suite_run, self.run_dir, bts_trx_ip)
+ if running_unitid != unitid or running_trx != trx_i:
+ if not ipconfig.set_unit_id(unitid, trx_i, False):
+ raise log.Error('Failed configuring unit id %d trx %d' % (unitid, trx_i))
+ # Apply OML IP and restart nanoBTS as it is required to apply the changes.
+ if not ipconfig.set_oml_ip(self.bsc.addr(), True):
+ raise log.Error('Failed configuring OML IP %s' % bts_trx_ip)
+
+ # Let some time for BTS to restart. It takes much more than 20 secs, and
+ # this way we make sure we don't catch responses in abisip-find prior to
+ # BTS restarting.
+ MainLoop.sleep(self, 20)
+
+ self.log('Starting to connect id %d trx %d to' % (unitid, trx_i), self.bsc)
+ ipfind = AbisIpFind(self.suite_run, self.run_dir, local_bind_ip, 'postconf')
+ ipfind.start()
+ ipfind.wait_bts_ready(bts_trx_ip)
+ self.log('nanoBTS id %d trx %d configured and running' % (unitid, trx_i))
+ ipfind.stop()
+
+ MainLoop.wait(self, self.bsc.bts_is_connected, self, timeout=600)
+ self.log('nanoBTS connected to BSC')
+
+ #According to roh, it can be configured to use a static IP in a permanent way:
+ # 1- use abisip-find to find the default address
+ # 2- use ./ipaccess-config --ip-address IP/MASK
+ # 3- use ./ipaccess-config --ip-gateway IP to set the IP of the main unit
+ # 4- use ./ipaccess-config --restart to restart and apply the changes
+
+ #Start must do the following:
+ # 1- use abisip-find to find the default address
+ # 2- use ./ipaccess-config --unit-id UNIT_ID
+ # 3- use ./ipaccess-config --oml-ip --restart to set the IP of the BSC and apply+restart.
+ # According to roh, using the 3 of them together was not reliable to work properly.
+
+ def ready_for_pcu(self):
+ """We don't really care as we use a Dummy PCU class."""
+ return True
+
+ def pcu(self):
+ if not self._pcu:
+ self._pcu = pcu.PcuDummy(self.suite_run, self, self.conf)
+ return self._pcu
+
+
+class AbisIpFind(log.Origin):
+ suite_run = None
+ parent_run_dir = None
+ run_dir = None
+ inst = None
+ env = None
+ bind_ip = None
+ proc = None
+
+ BIN_ABISIP_FIND = 'abisip-find'
+ BTS_UNIT_ID_RE = re.compile("Unit_ID='(?P<unit_id>\d+)/\d+/(?P<trx_id>\d+)'")
+
+ def __init__(self, suite_run, parent_run_dir, bind_ip, name_suffix):
+ super().__init__(log.C_RUN, AbisIpFind.BIN_ABISIP_FIND + '-' + name_suffix)
+ self.suite_run = suite_run
+ self.parent_run_dir = parent_run_dir
+ self.bind_ip = bind_ip
+ self.env = {}
+
+ def start(self):
+ self.run_dir = util.Dir(self.parent_run_dir.new_dir(self.name()))
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-bsc')))
+
+ lib = self.inst.child('lib')
+ if not os.path.isdir(lib):
+ raise log.Error('No lib/ in %r' % self.inst)
+ ipfind_path = self.inst.child('bin', AbisIpFind.BIN_ABISIP_FIND)
+ if not os.path.isfile(ipfind_path):
+ raise RuntimeError('Binary missing: %r' % ipfind_path)
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+ self.proc = process.Process(self.name(), self.run_dir,
+ (ipfind_path, '-i', '1', '-b', self.bind_ip),
+ env=env)
+ self.suite_run.remember_to_stop(self.proc)
+ self.proc.launch()
+
+ def stop(self):
+ self.suite_run.stop_process(self.proc)
+
+ def get_line_by_ip(self, ipaddr):
+ """Get latest line (more up to date) from abisip-find based on ip address."""
+ token = "IP_Address='%s'" % ipaddr
+ myline = None
+ for line in (self.proc.get_stdout() or '').splitlines():
+ if token in line:
+ myline = line
+ return myline
+
+ def get_unitid_by_ip(self, ipaddr):
+ line = self.get_line_by_ip(ipaddr)
+ if line is None:
+ return None
+ res = AbisIpFind.BTS_UNIT_ID_RE.search(line)
+ if res:
+ unit_id = int(res.group('unit_id'))
+ trx_id = int(res.group('trx_id'))
+ return (unit_id, trx_id)
+ raise log.Error('abisip-find unit_id field for nanobts %s not found in %s' %(ipaddr, line))
+
+ def bts_ready(self, ipaddr):
+ return self.get_line_by_ip(ipaddr) is not None
+
+ def wait_bts_ready(self, ipaddr):
+ MainLoop.wait(self, self.bts_ready, ipaddr)
+ # There's a period of time after boot in which nanobts answers to
+ # abisip-find but tcp RSTs ipacces-config conns. Let's wait in here a
+ # bit more time to avoid failing after stating the BTS is ready.
+ MainLoop.sleep(self, 2)
+
+class IpAccessConfig(log.Origin):
+ suite_run = None
+ parent_run_dir = None
+ run_dir = None
+ inst = None
+ env = None
+ bts_ip = None
+
+ BIN_IPACCESS_CONFIG = 'ipaccess-config'
+
+ def __init__(self, suite_run, parent_run_dir, bts_ip):
+ super().__init__(log.C_RUN, IpAccessConfig.BIN_IPACCESS_CONFIG)
+ self.suite_run = suite_run
+ self.parent_run_dir = parent_run_dir
+ self.bts_ip = bts_ip
+ self.env = {}
+
+ def create_process(self, binary_name, *args):
+ binary = os.path.abspath(self.inst.child('bin', binary_name))
+ run_dir = self.run_dir.new_dir(binary_name)
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ proc = process.Process(binary_name, run_dir,
+ (binary,) + args,
+ env=self.env)
+ return proc
+
+ def run(self, name_suffix, *args):
+ self.run_dir = util.Dir(self.parent_run_dir.new_dir(self.name()+'-'+name_suffix))
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-bsc')))
+ lib = self.inst.child('lib')
+ self.env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+ self.proc = self.create_process(IpAccessConfig.BIN_IPACCESS_CONFIG, *args)
+ return self.proc.launch_sync(raise_nonsuccess=False)
+
+ def set_unit_id(self, unitid, trx_num, restart=False):
+ uid_str = '%d/0/%d' % (unitid, trx_num)
+ if restart:
+ retcode = self.run('unitid', '--restart', '--unit-id', '%s' % uid_str, self.bts_ip)
+ else:
+ retcode = self.run('unitid', '--unit-id', '%s' % uid_str, self.bts_ip)
+ if retcode != 0:
+ log.err('ipaccess-config --unit-id %s returned error code %d' % (uid_str, retcode))
+ return retcode == 0
+
+ def set_oml_ip(self, omlip, restart=False):
+ if restart:
+ retcode = self.run('oml', '--restart', '--oml-ip', omlip, self.bts_ip)
+ else:
+ retcode = self.run('oml', '--oml-ip', omlip, self.bts_ip)
+ if retcode != 0:
+ self.error('ipaccess-config --oml-ip %s returned error code %d' % (omlip, retcode))
+ return retcode == 0
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts_octphy.py b/src/osmo_gsm_tester/bts_octphy.py
new file mode 100644
index 0000000..a1dd494
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_octphy.py
@@ -0,0 +1,143 @@
+# osmo_gsm_tester: specifics for running an osmo-bts-octphy
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+from . import log, config, util, template, process, bts_osmo
+
+class OsmoBtsOctphy(bts_osmo.OsmoBtsMainUnit):
+
+##############
+# PROTECTED
+##############
+
+ BIN_BTS_OCTPHY = 'osmo-bts-octphy'
+ CONF_BTS_OCTPHY = 'osmo-bts-octphy.cfg'
+
+ def __init__(self, suite_run, conf):
+ super().__init__(suite_run, conf, OsmoBtsOctphy.BIN_BTS_OCTPHY, 'osmo_bts_octphy')
+ self.run_dir = None
+ self.inst = None
+ self.env = {}
+ self.values = {}
+
+ def launch_process(self, binary_name, *args):
+ binary = os.path.abspath(self.inst.child('bin', binary_name))
+ run_dir = self.run_dir.new_dir(binary_name)
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ proc = process.Process(binary_name, run_dir,
+ (binary,) + args,
+ env=self.env)
+ self.suite_run.remember_to_stop(proc)
+ proc.launch()
+ return proc
+
+ def allocate_phy_instances(self, c):
+ '''
+ Generate match trx Z <-> phy X inst Y to use in vty config
+
+ We create a new phy for each trx found with a new hwaddr. If hwaddr is
+ already there, increase num_instances and give last instance index to
+ the current trx.
+ '''
+ phy_list = []
+ for trx in c.get('trx_list', []):
+ hwaddr = trx.get('hw_addr', None)
+ netdev = trx.get('net_device', None)
+ if hwaddr is None:
+ raise log.Error('Expected hw-addr value not found!')
+ found = False
+ phy_idx = 0
+ for phy in phy_list:
+ if phy['hw_addr'] == hwaddr:
+ phy['num_instances'] += 1
+ found = True
+ break
+ phy_idx += 1
+ if not found:
+ phy_list.append({'hw_addr': hwaddr, 'net_device': netdev, 'num_instances': 1})
+ trx['phy_idx'] = phy_idx
+ trx['instance_idx'] = phy_list[phy_idx]['num_instances'] - 1
+ c['phy_list'] = phy_list
+
+ def configure(self):
+ if self.bsc is None:
+ raise RuntimeError('BTS needs to be added to a BSC or NITB before it can be configured')
+ self.config_file = self.run_dir.new_file(OsmoBtsOctphy.CONF_BTS_OCTPHY)
+ self.dbg(config_file=self.config_file)
+
+ values = dict(osmo_bts_octphy=config.get_defaults('osmo_bts_octphy'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, {
+ 'osmo_bts_octphy': {
+ 'oml_remote_ip': self.bsc.addr(),
+ 'pcu_socket_path': self.pcu_socket_path(),
+ }
+ })
+ config.overlay(values, { 'osmo_bts_octphy': self.conf })
+
+ self.allocate_phy_instances(values['osmo_bts_octphy'])
+
+ self.dbg('OSMO-BTS-OCTPHY CONFIG:\n' + pprint.pformat(values))
+ self.values = values
+ with open(self.config_file, 'w') as f:
+ r = template.render(OsmoBtsOctphy.CONF_BTS_OCTPHY, values)
+ self.dbg(r)
+ f.write(r)
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ def conf_for_bsc(self):
+ values = self.conf_for_bsc_prepare()
+ self.dbg(conf=values)
+ return values
+
+###################
+# PUBLIC (test API included)
+###################
+ def start(self):
+ if self.bsc is None:
+ raise RuntimeError('BTS needs to be added to a BSC or NITB before it can be started')
+ self.suite_run.poll()
+
+ self.log('Starting to connect to', self.bsc)
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-bts')))
+ btsoct_path = self.inst.child('bin', OsmoBtsOctphy.BIN_BTS_OCTPHY)
+ lib = self.inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % self.inst)
+
+ # setting capabilities will later disable use of LD_LIBRARY_PATH from ELF loader -> modify RPATH instead.
+ self.log('Setting RPATH for', OsmoBtsOctphy.BIN_BTS_OCTPHY)
+ util.change_elf_rpath(btsoct_path, util.prepend_library_path(lib), self.run_dir.new_dir('patchelf'))
+ # osmo-bty-octphy requires CAP_NET_RAW to open AF_PACKET socket:
+ self.log('Applying CAP_NET_RAW capability to', OsmoBtsOctphy.BIN_BTS_OCTPHY)
+ util.setcap_net_raw(btsoct_path, self.run_dir.new_dir('setcap_net_raw'))
+
+ self.proc_bts = self.launch_process(OsmoBtsOctphy.BIN_BTS_OCTPHY, '-r', '1',
+ '-c', os.path.abspath(self.config_file),
+ '-i', self.bsc.addr(), '-t', str(self.num_trx()))
+ self.suite_run.poll()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts_osmo.py b/src/osmo_gsm_tester/bts_osmo.py
new file mode 100644
index 0000000..9105c28
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_osmo.py
@@ -0,0 +1,117 @@
+# osmo_gsm_tester: base classes to share code among BTS subclasses.
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import tempfile
+from abc import ABCMeta, abstractmethod
+from . import log, bts, pcu_osmo
+
+class OsmoBts(bts.Bts, metaclass=ABCMeta):
+
+##############
+# PROTECTED
+##############
+ def __init__(self, suite_run, conf, name, defaults_cfg_name):
+ super().__init__(suite_run, conf, name, defaults_cfg_name)
+ self._pcu = None
+ self.proc_bts = None
+ if len(self.pcu_socket_path().encode()) > 107:
+ raise log.Error('Path for pcu socket is longer than max allowed len for unix socket path (107):', self.pcu_socket_path())
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ @abstractmethod
+ def conf_for_bsc(self):
+ # coming from bts.Bts, we forward the implementation to children.
+ pass
+
+ @abstractmethod
+ def pcu_socket_path(self):
+ 'Used by pcu objects to get path to socket.'
+ pass
+
+ @abstractmethod
+ def create_pcu(self):
+ 'Used by base class. Subclass can create different pcu implementations.'
+ pass
+
+###################
+# PUBLIC (test API included)
+###################
+ @abstractmethod
+ def start(self, keepalive=False):
+ # coming from bts.Bts, we forward the implementation to children.
+ pass
+
+ @abstractmethod
+ def ready_for_pcu(self):
+ 'Used by tests to know when BTS is prepared and PCU can be started.'
+ pass
+
+ def pcu(self):
+ if self._pcu is None:
+ self._pcu = self.create_pcu()
+ return self._pcu
+
+class OsmoBtsMainUnit(OsmoBts, metaclass=ABCMeta):
+##############
+# PROTECTED
+##############
+
+ def __init__(self, suite_run, conf, name, defaults_cfg_name):
+ self.pcu_sk_tmp_dir = None
+ super().__init__(suite_run, conf, name, defaults_cfg_name)
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ @abstractmethod
+ def conf_for_bsc(self):
+ # coming from bts.Bts, we forward the implementation to children.
+ pass
+
+ def cleanup(self):
+ if self.pcu_sk_tmp_dir:
+ try:
+ os.remove(self.pcu_socket_path())
+ except OSError:
+ pass
+ os.rmdir(self.pcu_sk_tmp_dir)
+
+ def create_pcu(self):
+ return pcu_osmo.OsmoPcu(self.suite_run, self, self.conf)
+
+ def pcu_socket_path(self):
+ if self.pcu_sk_tmp_dir is None:
+ self.pcu_sk_tmp_dir = tempfile.mkdtemp('', 'ogtpcusk')
+ return os.path.join(self.pcu_sk_tmp_dir, 'pcu_bts')
+
+###################
+# PUBLIC (test API included)
+###################
+ def ready_for_pcu(self):
+ if not self.proc_bts or not self.proc_bts.is_running:
+ return False
+ return os.path.exists(self.pcu_socket_path())
+
+ @abstractmethod
+ def start(self, keepalive=False):
+ # coming from bts.Bts, we forward the implementation to children.
+ pass
diff --git a/src/osmo_gsm_tester/bts_osmotrx.py b/src/osmo_gsm_tester/bts_osmotrx.py
new file mode 100644
index 0000000..0c941e0
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_osmotrx.py
@@ -0,0 +1,295 @@
+# osmo_gsm_tester: specifics for running an osmo-bts-trx
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import stat
+import pprint
+from abc import ABCMeta, abstractmethod
+from . import log, config, util, template, process, bts_osmo
+from .event_loop import MainLoop
+
+class OsmoBtsTrx(bts_osmo.OsmoBtsMainUnit):
+##############
+# PROTECTED
+##############
+
+ BIN_BTS_TRX = 'osmo-bts-trx'
+ BIN_PCU = 'osmo-pcu'
+
+ CONF_BTS_TRX = 'osmo-bts-trx.cfg'
+
+ def __init__(self, suite_run, conf):
+ super().__init__(suite_run, conf, OsmoBtsTrx.BIN_BTS_TRX, 'osmo_bts_trx')
+ self.run_dir = None
+ self.inst = None
+ self.trx = None
+ self.env = {}
+ self.gen_conf = {}
+
+ def trx_remote_ip(self):
+ conf_ip = self.conf.get('osmo_trx', {}).get('trx_ip', None)
+ if conf_ip is not None:
+ return conf_ip
+ # if 'trx_remote_ip' is not configured, use same IP as BTS
+ return self.remote_addr()
+
+ def launch_process(self, keepalive, binary_name, *args):
+ binary = os.path.abspath(self.inst.child('bin', binary_name))
+ run_dir = self.run_dir.new_dir(binary_name)
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ proc = process.Process(binary_name, run_dir,
+ (binary,) + args,
+ env=self.env)
+ self.suite_run.remember_to_stop(proc, keepalive)
+ proc.launch()
+ return proc
+
+ def configure(self):
+ if self.bsc is None:
+ raise RuntimeError('BTS needs to be added to a BSC or NITB before it can be configured')
+ self.config_file = self.run_dir.new_file(OsmoBtsTrx.CONF_BTS_TRX)
+ self.dbg(config_file=self.config_file)
+
+ values = dict(osmo_bts_trx=config.get_defaults('osmo_bts_trx'))
+ config.overlay(values, dict(osmo_bts_trx=dict(osmo_trx=config.get_defaults('osmo_trx'))))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, {
+ 'osmo_bts_trx': {
+ 'oml_remote_ip': self.bsc.addr(),
+ 'pcu_socket_path': self.pcu_socket_path(),
+ 'osmo_trx': {
+ 'bts_ip': self.remote_addr(),
+ 'trx_ip': self.trx_remote_ip(),
+ 'egprs': 'enable' if self.conf_for_bsc()['gprs_mode'] == 'egprs' else 'disable',
+ 'channels': [{} for trx_i in range(self.num_trx())]
+ }
+ }
+ })
+ config.overlay(values, { 'osmo_bts_trx': self.conf })
+
+ self.gen_conf = values
+ self.dbg('OSMO-BTS-TRX CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render(OsmoBtsTrx.CONF_BTS_TRX, values)
+ self.dbg(r)
+ f.write(r)
+
+ def launch_trx_enabled(self):
+ return util.str2bool(self.gen_conf['osmo_bts_trx'].get('osmo_trx', {}).get('launch_trx'))
+
+ def get_osmo_trx_type(self):
+ return self.gen_conf['osmo_bts_trx'].get('osmo_trx', {}).get('type')
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ def conf_for_bsc(self):
+ values = self.conf_for_bsc_prepare()
+ self.dbg(conf=values)
+ return values
+
+ def conf_for_osmotrx(self):
+ return dict(osmo_trx=self.gen_conf['osmo_bts_trx'].get('osmo_trx', {}))
+
+###################
+# PUBLIC (test API included)
+###################
+ def start(self, keepalive=False):
+ if self.bsc is None:
+ raise RuntimeError('BTS needs to be added to a BSC or NITB before it can be started')
+ self.suite_run.poll()
+
+ self.log('Starting to connect to', self.bsc)
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ if self.launch_trx_enabled():
+ self.trx = OsmoTrx.get_instance_by_type(self.get_osmo_trx_type(), self.suite_run, self.conf_for_osmotrx())
+ self.trx.start(keepalive)
+ self.log('Waiting for %s to start up...' % self.trx.name())
+ MainLoop.wait(self, self.trx.trx_ready)
+
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-bts')))
+ lib = self.inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % self.inst)
+ self.env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.proc_bts = self.launch_process(keepalive, OsmoBtsTrx.BIN_BTS_TRX, '-r', '1',
+ '-c', os.path.abspath(self.config_file),
+ '-i', self.bsc.addr(),
+ '-t', str(self.num_trx()))
+ self.suite_run.poll()
+
+class OsmoTrx(log.Origin, metaclass=ABCMeta):
+
+ CONF_OSMO_TRX = 'osmo-trx.cfg'
+ REMOTE_DIR = '/osmo-gsm-tester-trx/last_run'
+ WRAPPER_SCRIPT = 'ssh_sigkiller.sh'
+
+##############
+# PROTECTED
+##############
+ def __init__(self, suite_run, conf):
+ super().__init__(log.C_RUN, self.binary_name())
+ self.suite_run = suite_run
+ self.conf = conf
+ self.env = {}
+ self.log("OSMOTRX CONF: %r" % conf)
+ self.listen_ip = conf.get('osmo_trx', {}).get('trx_ip')
+ self.bts_ip = conf.get('osmo_trx', {}).get('bts_ip')
+ self.remote_user = conf.get('osmo_trx', {}).get('remote_user', None)
+ self.run_dir = None
+ self.inst = None
+ self.proc_trx = None
+
+ @classmethod
+ def get_instance_by_type(cls, type, suite_run, conf):
+ KNOWN_OSMOTRX_TYPES = {
+ 'uhd': OsmoTrxUHD,
+ 'lms': OsmoTrxLMS,
+ }
+ osmo_trx_class = KNOWN_OSMOTRX_TYPES.get(type)
+ return osmo_trx_class(suite_run, conf)
+
+ @abstractmethod
+ def binary_name(self):
+ 'Used by base class. Subclass can create different OsmoTRX implementations.'
+ pass
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file(OsmoTrx.CONF_OSMO_TRX)
+ self.dbg(config_file=self.config_file)
+
+ values = self.conf
+
+ # we don't need to enable multi-arfcn for single channel
+ if len(values.get('osmo_trx', {}).get('channels', [])) > 1:
+ multi_arfcn_bool = util.str2bool(values.get('osmo_trx', {}).get('multi_arfcn', False))
+ else:
+ multi_arfcn_bool = False
+ config.overlay(values, { 'osmo_trx': { 'multi_arfcn': multi_arfcn_bool } })
+
+ self.dbg('OSMO-TRX CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render(OsmoTrx.CONF_OSMO_TRX, values)
+ self.dbg(r)
+ f.write(r)
+
+ def launch_process_local(self, keepalive, binary_name, *args):
+ binary = os.path.abspath(self.inst.child('bin', binary_name))
+ run_dir = self.run_dir.new_dir(binary_name)
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ proc = process.Process(binary_name, run_dir,
+ (binary,) + args,
+ env=self.env)
+ self.suite_run.remember_to_stop(proc, keepalive)
+ proc.launch()
+ return proc
+
+ def launch_process_remote(self, name, popen_args, remote_cwd=None, keepalive=False):
+ run_dir = self.run_dir.new_dir(name)
+ proc = process.RemoteProcess(name, run_dir, self.remote_user, self.listen_ip, remote_cwd,
+ popen_args)
+ self.suite_run.remember_to_stop(proc, keepalive)
+ proc.launch()
+ return proc
+
+ def generate_wrapper_script(self):
+ wrapper_script = self.run_dir.new_file(OsmoTrx.WRAPPER_SCRIPT)
+ with open(wrapper_script, 'w') as f:
+ r = """#!/bin/bash
+ mypid=0
+ sign_handler() {
+ sig=$1
+ echo "received signal handler $sig, killing $mypid"
+ kill $mypid
+ }
+ trap 'sign_handler SIGINT' SIGINT
+ trap 'sign_handler SIGHUP' SIGHUP
+ "$@" &
+ mypid=$!
+ echo "waiting for $mypid"
+ wait $mypid
+ """
+ f.write(r)
+ st = os.stat(wrapper_script)
+ os.chmod(wrapper_script, st.st_mode | stat.S_IEXEC)
+ return wrapper_script
+
+##############
+# PUBLIC (test API included)
+##############
+ def start(self, keepalive=False):
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-trx')))
+ if not self.remote_user:
+ # Run locally if ssh user is not set
+ lib = self.inst.child('lib')
+ self.env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+ self.proc_trx = self.launch_process_local(keepalive, self.binary_name(),
+ '-C', os.path.abspath(self.config_file))
+ else:
+ # Run remotely through ssh. We need to run osmo-trx under a wrapper
+ # script since osmo-trx ignores SIGHUP and will keep running after
+ # we close local ssh session. The wrapper script catches SIGHUP and
+ # sends SIGINT to it.
+ wrapper_script = self.generate_wrapper_script()
+ remote_run_dir = util.Dir(OsmoTrx.REMOTE_DIR)
+ self.remote_inst = process.copy_inst_ssh(self.run_dir, self.inst, remote_run_dir, self.remote_user,
+ self.listen_ip, self.binary_name(), self.config_file)
+ remote_wrapper_script = remote_run_dir.child(OsmoTrx.WRAPPER_SCRIPT)
+ remote_config_file = remote_run_dir.child(OsmoTrx.CONF_OSMO_TRX)
+ remote_lib = self.remote_inst.child('lib')
+ remote_binary = self.remote_inst.child('bin', self.binary_name())
+ process.scp(self.run_dir, self.remote_user, self.listen_ip, 'scp-wrapper-to-remote', wrapper_script, remote_wrapper_script)
+
+ args = ('LD_LIBRARY_PATH=%s' % remote_lib, remote_wrapper_script, remote_binary, '-C', remote_config_file)
+ self.proc_trx = self.launch_process_remote(self.binary_name(), args, remote_cwd=remote_run_dir, keepalive=keepalive)
+
+ def trx_ready(self):
+ if not self.proc_trx or not self.proc_trx.is_running:
+ return False
+ return '-- Transceiver active with' in (self.proc_trx.get_stdout() or '')
+
+class OsmoTrxUHD(OsmoTrx):
+ BIN_TRX = 'osmo-trx-uhd'
+
+ def __init__(self, suite_run, conf):
+ super().__init__(suite_run, conf)
+
+ def binary_name(self):
+ return OsmoTrxUHD.BIN_TRX
+
+class OsmoTrxLMS(OsmoTrx):
+ BIN_TRX = 'osmo-trx-lms'
+
+ def __init__(self, suite_run, conf):
+ super().__init__(suite_run, conf)
+ self.conf['osmo_trx']['channels'][0]['rx_path'] = 'LNAW'
+
+ def binary_name(self):
+ return OsmoTrxLMS.BIN_TRX
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/bts_sysmo.py b/src/osmo_gsm_tester/bts_sysmo.py
new file mode 100644
index 0000000..66d305a
--- /dev/null
+++ b/src/osmo_gsm_tester/bts_sysmo.py
@@ -0,0 +1,133 @@
+# osmo_gsm_tester: specifics for running a sysmoBTS
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+from . import log, config, util, template, process, pcu_sysmo, bts_osmo
+
+class SysmoBts(bts_osmo.OsmoBts):
+##############
+# PROTECTED
+##############
+
+ REMOTE_DIR = '/osmo-gsm-tester-bts'
+ BTS_SYSMO_BIN = 'osmo-bts-sysmo'
+ BTS_SYSMO_CFG = 'osmo-bts-sysmo.cfg'
+
+ def __init__(self, suite_run, conf):
+ super().__init__(suite_run, conf, SysmoBts.BTS_SYSMO_BIN, 'osmo_bts_sysmo')
+ self.run_dir = None
+ self.inst = None
+ self.remote_inst = None
+ self.remote_dir = None
+ self.remote_user = 'root'
+
+ def _direct_pcu_enabled(self):
+ return util.str2bool(self.conf.get('direct_pcu'))
+
+ def launch_remote(self, name, popen_args, remote_cwd=None, keepalive=False):
+ run_dir = self.run_dir.new_dir(name)
+ proc = process.RemoteProcess(name, run_dir, self.remote_user, self.remote_addr(), remote_cwd,
+ popen_args)
+ self.suite_run.remember_to_stop(proc, keepalive)
+ proc.launch()
+ return proc
+
+ def create_pcu(self):
+ return pcu_sysmo.OsmoPcuSysmo(self.suite_run, self, self.conf)
+
+ def configure(self):
+ if self.bsc is None:
+ raise RuntimeError('BTS needs to be added to a BSC or NITB before it can be configured')
+
+ self.config_file = self.run_dir.new_file(SysmoBts.BTS_SYSMO_CFG)
+ self.dbg(config_file=self.config_file)
+
+ values = { 'osmo_bts_sysmo': config.get_defaults('osmo_bts_sysmo') }
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, {
+ 'osmo_bts_sysmo': {
+ 'oml_remote_ip': self.bsc.addr(),
+ 'pcu_socket_path': self.pcu_socket_path(),
+ }
+ })
+ config.overlay(values, { 'osmo_bts_sysmo': self.conf })
+
+ self.dbg('SYSMOBTS CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render(SysmoBts.BTS_SYSMO_CFG, values)
+ self.dbg(r)
+ f.write(r)
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ def pcu_socket_path(self):
+ return os.path.join(SysmoBts.REMOTE_DIR, 'pcu_bts')
+
+ def conf_for_bsc(self):
+ values = self.conf_for_bsc_prepare()
+ self.dbg(conf=values)
+ return values
+
+###################
+# PUBLIC (test API included)
+###################
+ # We get log from ssh stdout instead of usual stderr.
+ def ready_for_pcu(self):
+ if not self.proc_bts or not self.proc_bts.is_running:
+ return False
+ return 'BTS is up' in (self.proc_bts.get_stdout() or '')
+
+ def start(self, keepalive=False):
+ if self.bsc is None:
+ raise RuntimeError('BTS needs to be added to a BSC or NITB before it can be started')
+ log.log('Starting sysmoBTS to connect to', self.bsc)
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst(SysmoBts.BTS_SYSMO_BIN)))
+ lib = self.inst.child('lib')
+ if not os.path.isdir(lib):
+ raise log.Error('No lib/ in', self.inst)
+ if not self.inst.isfile('bin', SysmoBts.BTS_SYSMO_BIN):
+ raise log.Error('No osmo-bts-sysmo binary in', self.inst)
+
+ remote_run_dir = util.Dir(SysmoBts.REMOTE_DIR)
+
+ self.remote_inst = process.copy_inst_ssh(self.run_dir, self.inst, remote_run_dir, self.remote_user,
+ self.remote_addr(), SysmoBts.BTS_SYSMO_BIN, self.config_file)
+ process.run_remote_sync(self.run_dir, self.remote_user, self.remote_addr(), 'reload-dsp-firmware',
+ ('/bin/sh', '-c', '"cat /lib/firmware/sysmobts-v?.bit > /dev/fpgadl_par0 ; cat /lib/firmware/sysmobts-v?.out > /dev/dspdl_dm644x_0"'))
+
+ remote_config_file = remote_run_dir.child(SysmoBts.BTS_SYSMO_CFG)
+ remote_lib = self.remote_inst.child('lib')
+ remote_binary = self.remote_inst.child('bin', 'osmo-bts-sysmo')
+
+ args = ('LD_LIBRARY_PATH=%s' % remote_lib,
+ remote_binary, '-c', remote_config_file, '-r', '1',
+ '-i', self.bsc.addr())
+
+ if self._direct_pcu_enabled():
+ args += ('-M',)
+
+ self.proc_bts = self.launch_remote('osmo-bts-sysmo', args, remote_cwd=remote_run_dir, keepalive=keepalive)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/config.py b/src/osmo_gsm_tester/config.py
new file mode 100644
index 0000000..7f1e52f
--- /dev/null
+++ b/src/osmo_gsm_tester/config.py
@@ -0,0 +1,312 @@
+# osmo_gsm_tester: read and manage config files and global config
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# discussion for choice of config file format:
+#
+# Python syntax is insane, because it allows the config file to run arbitrary
+# python commands.
+#
+# INI file format is nice and simple, but it doesn't allow having the same
+# section numerous times (e.g. to define several modems or BTS models) and does
+# not support nesting.
+#
+# JSON has too much braces and quotes to be easy to type
+#
+# YAML formatting is lean, but:
+# - too powerful. The normal load() allows arbitrary code execution. There is
+# safe_load().
+# - allows several alternative ways of formatting, better to have just one
+# authoritative style.
+# - tries to detect types. It would be better to receive every setting as
+# simple string rather than e.g. an IMSI as an integer.
+# - e.g. an IMSI starting with a zero is interpreted as octal value, resulting
+# in super confusing error messages if the user merely forgets to quote it.
+# - does not tell me which line a config item came from, so no detailed error
+# message is possible.
+#
+# The Python ConfigParserShootout page has numerous contestants, but many of
+# those seem to be not widely used / standardized or even tested.
+# https://wiki.python.org/moin/ConfigParserShootout
+#
+# The optimum would be a stripped down YAML format.
+# In the lack of that, we shall go with yaml.load_safe() + a round trip
+# (feeding back to itself), converting keys to lowercase and values to string.
+# There is no solution for octal interpretations nor config file source lines
+# unless, apparently, we implement our own config parser.
+
+import yaml
+import os
+import copy
+
+from . import log, schema, util
+from .util import is_dict, is_list, Dir, get_tempdir
+
+ENV_PREFIX = 'OSMO_GSM_TESTER_'
+ENV_CONF = os.getenv(ENV_PREFIX + 'CONF')
+
+override_conf = None
+
+DEFAULT_CONFIG_LOCATIONS = [
+ '.',
+ os.path.join(os.getenv('HOME'), '.config', 'osmo-gsm-tester'),
+ '/usr/local/etc/osmo-gsm-tester',
+ '/etc/osmo-gsm-tester'
+ ]
+
+PATHS_CONF = 'paths.conf'
+DEFAULT_SUITES_CONF = 'default-suites.conf'
+PATH_STATE_DIR = 'state_dir'
+PATH_SUITES_DIR = 'suites_dir'
+PATH_SCENARIOS_DIR = 'scenarios_dir'
+PATHS_SCHEMA = {
+ PATH_STATE_DIR: schema.STR,
+ PATH_SUITES_DIR: schema.STR,
+ PATH_SCENARIOS_DIR: schema.STR,
+ }
+
+PATHS_TEMPDIR_STR = '$TEMPDIR'
+
+PATHS = None
+
+def _get_config_file(basename, fail_if_missing=True):
+ if override_conf:
+ locations = [ override_conf ]
+ elif ENV_CONF:
+ locations = [ ENV_CONF ]
+ else:
+ locations = DEFAULT_CONFIG_LOCATIONS
+
+ for l in locations:
+ real_l = os.path.realpath(l)
+ p = os.path.realpath(os.path.join(real_l, basename))
+ if os.path.isfile(p):
+ log.dbg('Found config file', basename, 'as', p, 'in', l, 'which is', real_l, _category=log.C_CNF)
+ return (p, real_l)
+ if not fail_if_missing:
+ return None, None
+ raise RuntimeError('configuration file not found: %r in %r' % (basename,
+ [os.path.abspath(p) for p in locations]))
+
+def get_config_file(basename, fail_if_missing=True):
+ path, found_in = _get_config_file(basename, fail_if_missing)
+ return path
+
+def read_config_file(basename, validation_schema=None, if_missing_return=False):
+ fail_if_missing = True
+ if if_missing_return is not False:
+ fail_if_missing = False
+ path = get_config_file(basename, fail_if_missing=fail_if_missing)
+ if path is None:
+ return if_missing_return
+ return read(path, validation_schema=validation_schema, if_missing_return=if_missing_return)
+
+def get_configured_path(label, allow_unset=False):
+ global PATHS
+
+ env_name = ENV_PREFIX + label.upper()
+ env_path = os.getenv(env_name)
+ if env_path:
+ real_env_path = os.path.realpath(env_path)
+ log.dbg('Found path', label, 'as', env_path, 'in', '$' + env_name, 'which is', real_env_path, _category=log.C_CNF)
+ return real_env_path
+
+ if PATHS is None:
+ paths_file, found_in = _get_config_file(PATHS_CONF)
+ PATHS = read(paths_file, PATHS_SCHEMA)
+ # sorted for deterministic regression test results
+ for key, path in sorted(PATHS.items()):
+ if not path.startswith(os.pathsep):
+ PATHS[key] = os.path.realpath(os.path.join(found_in, path))
+ log.dbg(paths_file + ': relative path', path, 'is', PATHS[key], _category=log.C_CNF)
+ p = PATHS.get(label)
+ if p is None and not allow_unset:
+ raise RuntimeError('missing configuration in %s: %r' % (PATHS_CONF, label))
+
+ log.dbg('Found path', label, 'as', p, _category=log.C_CNF)
+ if p.startswith(PATHS_TEMPDIR_STR):
+ p = os.path.join(get_tempdir(), p[len(PATHS_TEMPDIR_STR):])
+ log.dbg('Path', label, 'contained', PATHS_TEMPDIR_STR, 'and becomes', p, _category=log.C_CNF)
+ return p
+
+def get_state_dir():
+ return Dir(get_configured_path(PATH_STATE_DIR))
+
+def get_suites_dir():
+ return Dir(get_configured_path(PATH_SUITES_DIR))
+
+def get_scenarios_dir():
+ return Dir(get_configured_path(PATH_SCENARIOS_DIR))
+
+def read(path, validation_schema=None, if_missing_return=False):
+ log.ctx(path)
+ if not os.path.isfile(path) and if_missing_return is not False:
+ return if_missing_return
+ with open(path, 'r') as f:
+ config = yaml.safe_load(f)
+ config = _standardize(config)
+ if validation_schema:
+ schema.validate(config, validation_schema)
+ return config
+
+def write(path, config):
+ log.ctx(path)
+ with open(path, 'w') as f:
+ f.write(tostr(config))
+
+def tostr(config):
+ return _tostr(_standardize(config))
+
+def _tostr(config):
+ return yaml.dump(config, default_flow_style=False)
+
+def _standardize_item(item):
+ if isinstance(item, (tuple, list)):
+ return [_standardize_item(i) for i in item]
+ if isinstance(item, dict):
+ return dict([(key.lower(), _standardize_item(val)) for key,val in item.items()])
+ return str(item)
+
+def _standardize(config):
+ config = yaml.safe_load(_tostr(_standardize_item(config)))
+ return config
+
+def get_defaults(for_kind):
+ defaults = read_config_file('defaults.conf', if_missing_return={})
+ return defaults.get(for_kind, {})
+
+class Scenario(log.Origin, dict):
+ def __init__(self, name, path):
+ super().__init__(log.C_TST, name)
+ self.path = path
+
+def get_scenario(name, validation_schema=None):
+ scenarios_dir = get_scenarios_dir()
+ if not name.endswith('.conf'):
+ name = name + '.conf'
+ path = scenarios_dir.child(name)
+ if not os.path.isfile(path):
+ raise RuntimeError('No such scenario file: %r' % path)
+ sc = Scenario(name, path)
+ sc.update(read(path, validation_schema=validation_schema))
+ return sc
+
+def add(dest, src):
+ if is_dict(dest):
+ if not is_dict(src):
+ raise ValueError('cannot add to dict a value of type: %r' % type(src))
+
+ for key, val in src.items():
+ dest_val = dest.get(key)
+ if dest_val is None:
+ dest[key] = val
+ else:
+ log.ctx(key=key)
+ add(dest_val, val)
+ return
+ if is_list(dest):
+ if not is_list(src):
+ raise ValueError('cannot add to list a value of type: %r' % type(src))
+ dest.extend(src)
+ return
+ if dest == src:
+ return
+ raise ValueError('cannot add dicts, conflicting items (values %r and %r)'
+ % (dest, src))
+
+def combine(dest, src):
+ if is_dict(dest):
+ if not is_dict(src):
+ raise ValueError('cannot combine dict with a value of type: %r' % type(src))
+
+ for key, val in src.items():
+ log.ctx(key=key)
+ dest_val = dest.get(key)
+ if dest_val is None:
+ dest[key] = val
+ else:
+ combine(dest_val, val)
+ return
+ if is_list(dest):
+ if not is_list(src):
+ raise ValueError('cannot combine list with a value of type: %r' % type(src))
+ # Validate that all elements in both lists are of the same type:
+ t = util.list_validate_same_elem_type(src + dest)
+ if t is None:
+ return # both lists are empty, return
+ # For lists of complex objects, we expect them to be sorted lists:
+ if t in (dict, list, tuple):
+ for i in range(len(dest)):
+ log.ctx(idx=i)
+ src_it = src[i] if i < len(src) else util.empty_instance_type(t)
+ combine(dest[i], src_it)
+ for i in range(len(dest), len(src)):
+ log.ctx(idx=i)
+ dest.append(src[i])
+ else: # for lists of basic elements, we handle them as unsorted sets:
+ for elem in src:
+ if elem not in dest:
+ dest.append(elem)
+ return
+ if dest == src:
+ return
+ raise ValueError('cannot combine dicts, conflicting items (values %r and %r)'
+ % (dest, src))
+
+def overlay(dest, src):
+ if is_dict(dest):
+ if not is_dict(src):
+ raise ValueError('cannot combine dict with a value of type: %r' % type(src))
+
+ for key, val in src.items():
+ log.ctx(key=key)
+ dest_val = dest.get(key)
+ dest[key] = overlay(dest_val, val)
+ return dest
+ if is_list(dest):
+ if not is_list(src):
+ raise ValueError('cannot combine list with a value of type: %r' % type(src))
+ copy_len = min(len(src),len(dest))
+ for i in range(copy_len):
+ log.ctx(idx=i)
+ dest[i] = overlay(dest[i], src[i])
+ for i in range(copy_len, len(src)):
+ dest.append(src[i])
+ return dest
+ return src
+
+def replicate_times(d):
+ '''
+ replicate items that have a "times" > 1
+
+ 'd' is a dict matching WANT_SCHEMA, which is the same as
+ the RESOURCES_SCHEMA, except each entity that can be reserved has a 'times'
+ field added, to indicate how many of those should be reserved.
+ '''
+ d = copy.deepcopy(d)
+ for key, item_list in d.items():
+ idx = 0
+ while idx < len(item_list):
+ item = item_list[idx]
+ times = int(item.pop('times', 1))
+ for j in range(1, times):
+ item_list.insert(idx + j, copy.deepcopy(item))
+ idx += times
+ return d
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/esme.py b/src/osmo_gsm_tester/esme.py
new file mode 100644
index 0000000..de3ac16
--- /dev/null
+++ b/src/osmo_gsm_tester/esme.py
@@ -0,0 +1,185 @@
+# osmo_gsm_tester: SMPP ESME to talk to SMSC
+#
+# Copyright (C) 2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import smpplib.gsm
+import smpplib.client
+import smpplib.command
+import smpplib.consts
+import smpplib.exceptions
+
+from . import log
+from .event_loop import MainLoop
+
+# if you want to know what's happening inside python-smpplib
+#import logging
+#logging.basicConfig(level='DEBUG')
+
+MAX_SYS_ID_LEN = 16
+MAX_PASSWD_LEN = 16
+
+class Esme(log.Origin):
+
+ MSGMODE_TRANSACTION = smpplib.consts.SMPP_MSGMODE_FORWARD
+ MSGMODE_STOREFORWARD = smpplib.consts.SMPP_MSGMODE_STOREFORWARD
+
+ def __init__(self, msisdn):
+ self.msisdn = msisdn
+ # Get last characters of msisdn to stay inside MAX_SYS_ID_LEN. Similar to modulus operator.
+ self.set_system_id('esme-' + self.msisdn[-11:])
+ super().__init__(log.C_TST, self.system_id)
+ self.client = None
+ self.smsc = None
+ self.set_password('esme-pwd')
+ self.connected = False
+ self.bound = False
+ self.listening = False
+ self.references_pending_receipt = []
+ self.next_user_message_reference = 1
+
+ def __del__(self):
+ self.cleanup()
+
+ def cleanup(self):
+ try:
+ self.disconnect()
+ except smpplib.exceptions.ConnectionError:
+ pass
+
+ def set_smsc(self, smsc):
+ self.smsc = smsc
+
+ def set_system_id(self, name):
+ if len(name) > MAX_SYS_ID_LEN:
+ raise log.Error('Esme system_id too long! %d vs %d', len(name), MAX_SYS_ID_LEN)
+ self.system_id = name
+
+ def set_password(self, password):
+ if len(password) > MAX_PASSWD_LEN:
+ raise log.Error('Esme password too long! %d vs %d', len(password), MAX_PASSWD_LEN)
+ self.password = password
+
+ def conf_for_smsc(self):
+ config = { 'system_id': self.system_id, 'password': self.password }
+ return config
+
+ def poll(self):
+ self.client.poll()
+
+ def start_listening(self):
+ self.listening = True
+ MainLoop.register_poll_func(self.poll)
+
+ def stop_listening(self):
+ if not self.listening:
+ return
+ self.listening = False
+ # Empty the queue before processing the unbind + disconnect PDUs
+ MainLoop.unregister_poll_func(self.poll)
+ self.poll()
+
+ def connect(self):
+ host, port = self.smsc.addr_port
+ if self.client:
+ self.disconnect()
+ self.client = smpplib.client.Client(host, port, timeout=None)
+ self.client.set_message_sent_handler(
+ lambda pdu: self.dbg('Unhandled submit_sm_resp message:', pdu.sequence) )
+ self.client.set_message_received_handler(self._message_received_handler)
+ self.client.connect()
+ self.connected = True
+ self.client.bind_transceiver(system_id=self.system_id, password=self.password)
+ self.bound = True
+ self.log('Connected and bound successfully to %s (%s:%d). Starting to listen.' % (self.system_id, host, port))
+ self.start_listening()
+
+ def disconnect(self):
+ self.stop_listening()
+ if self.bound:
+ self.client.unbind()
+ self.bound = False
+ if self.connected:
+ self.client.disconnect()
+ self.connected = False
+
+ def _message_received_handler(self, pdu, *args):
+ self.dbg('message received:', seq=pdu.sequence)
+ if isinstance(pdu, smpplib.command.AlertNotification):
+ self.dbg('message received: AlertNotification:', ms_availability_status=pdu.ms_availability_status)
+ elif isinstance(pdu, smpplib.command.DeliverSM):
+ umref = int(pdu.user_message_reference)
+ self.dbg('message received: DeliverSM', references_pending_receipt=self.references_pending_receipt, user_message_reference=umref)
+ self.references_pending_receipt.remove(umref)
+
+ def receipt_was_received(self, umref):
+ return umref not in self.references_pending_receipt
+
+ def run_method_expect_failure(self, errcode, method, *args):
+ try:
+ method(*args)
+ #it should not succeed, raise an exception:
+ raise log.Error('SMPP Failure: %s should have failed with SMPP error %d (%s) but succeeded.' % (method, errcode, smpplib.consts.DESCRIPTIONS[errcode]))
+ except smpplib.exceptions.PDUError as e:
+ if e.args[1] != errcode:
+ raise e
+ self.dbg('Expected failure triggered: %d' % errcode)
+
+ def sms_send(self, sms_obj, mode, receipt=False):
+ parts, encoding_flag, msg_type_flag = smpplib.gsm.make_parts(str(sms_obj))
+ seqs = []
+ self.log('Sending SMS "%s" to %s' % (str(sms_obj), sms_obj.dst_msisdn()))
+ umref = self.next_user_message_reference
+ self.next_user_message_reference = (self.next_user_message_reference + 1) % (1 << 8)
+ for part in parts:
+ pdu = self.client.send_message(
+ source_addr_ton=smpplib.consts.SMPP_TON_INTL,
+ source_addr_npi=smpplib.consts.SMPP_NPI_ISDN,
+ source_addr=sms_obj.src_msisdn(),
+ dest_addr_ton=smpplib.consts.SMPP_TON_INTL,
+ dest_addr_npi=smpplib.consts.SMPP_NPI_ISDN,
+ destination_addr=sms_obj.dst_msisdn(),
+ short_message=part,
+ data_coding=encoding_flag,
+ esm_class=mode,
+ registered_delivery=receipt,
+ user_message_reference=umref,
+ )
+
+ self.dbg('sent part with seq', pdu.sequence)
+ seqs.append(pdu.sequence)
+ if receipt:
+ self.references_pending_receipt.append(umref)
+ return umref, seqs
+
+ def _process_pdus_pending(self, pdu, **kwargs):
+ self.dbg('message sent resp with seq', pdu.sequence, ', pdus_pending:', self.pdus_pending)
+ if pdu.sequence in self.pdus_pending:
+ self.pdus_pending.remove(pdu.sequence)
+
+ def sms_send_wait_resp(self, sms_obj, mode, receipt=False):
+ old_func = self.client.message_sent_handler
+ try:
+ umref, self.pdus_pending = self.sms_send(sms_obj, mode, receipt)
+ self.dbg('pdus_pending:', self.pdus_pending)
+ self.client.set_message_sent_handler(self._process_pdus_pending)
+ MainLoop.wait(self, lambda: len(self.pdus_pending) == 0, timeout=10)
+ return umref
+ finally:
+ self.client.set_message_sent_handler(old_func)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/event_loop.py b/src/osmo_gsm_tester/event_loop.py
new file mode 100644
index 0000000..fe88ef4
--- /dev/null
+++ b/src/osmo_gsm_tester/event_loop.py
@@ -0,0 +1,121 @@
+# osmo_gsm_tester: Event loop
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import time
+from gi.repository import GLib, GObject
+
+from . import log
+
+class DeferredHandling:
+
+ def __init__(self):
+ self.defer_queue = []
+
+ def handle_queue(self):
+ while self.defer_queue:
+ handler, args, kwargs = self.defer_queue.pop(0)
+ handler(*args, **kwargs)
+
+ def defer(self, handler, *args, **kwargs):
+ self.defer_queue.append((handler, args, kwargs))
+
+class WaitRequest:
+
+ def __init__(self, condition, condition_args, condition_kwargs, timeout, timestep):
+ self.timeout_ack = False
+ self.condition_ack = False
+ self.timeout_started = time.time()
+ self.timeout = timeout
+ self.condition = condition
+ self.condition_args = condition_args
+ self.condition_kwargs = condition_kwargs
+
+ def condition_check(self):
+ #print("_wait_condition_check")
+ waited = time.time() - self.timeout_started
+ if self.condition(*self.condition_args, **self.condition_kwargs):
+ self.condition_ack = True
+ elif waited > self.timeout:
+ self.timeout_ack = True
+
+class EventLoop:
+
+ def __init__(self):
+ self.poll_funcs = []
+ self.gloop = GLib.MainLoop()
+ self.gctx = self.gloop.get_context()
+ self.deferred_handling = DeferredHandling()
+
+ def _trigger_cb_func(self, user_data):
+ self.defer(user_data)
+ return True #to retrigger the timeout
+
+ def defer(self, handler, *args, **kwargs):
+ self.deferred_handling.defer(handler, *args, **kwargs)
+
+ def register_poll_func(self, func, timestep=1):
+ id = GObject.timeout_add(timestep*1000, self._trigger_cb_func, func) # in 1/1000th of a sec
+ self.poll_funcs.append((func, id))
+
+ def unregister_poll_func(self, func):
+ for pair in self.poll_funcs:
+ f, id = pair
+ if f == func:
+ GObject.source_remove(id)
+ self.poll_funcs.remove(pair)
+ return
+
+ def poll(self, may_block=False):
+ self.gctx.iteration(may_block)
+ self.deferred_handling.handle_queue()
+
+ def wait_no_raise(self, log_obj, condition, condition_args, condition_kwargs, timeout, timestep):
+ if not timeout or timeout < 0:
+ self = log_obj
+ raise log.Error('wait() *must* time out at some point.', timeout=timeout)
+ if timestep < 0.1:
+ timestep = 0.1
+
+ wait_req = WaitRequest(condition, condition_args, condition_kwargs, timeout, timestep)
+ wait_id = GObject.timeout_add(timestep*1000, self._trigger_cb_func, wait_req.condition_check)
+ while True:
+ try:
+ self.poll(may_block=True)
+ except Exception: # cleanup of temporary resources in the wait scope
+ GObject.source_remove(wait_id)
+ raise
+ if wait_req.condition_ack or wait_req.timeout_ack:
+ GObject.source_remove(wait_id)
+ success = wait_req.condition_ack
+ return success
+
+ def wait(self, log_obj, condition, *condition_args, timeout=300, timestep=1, **condition_kwargs):
+ if not self.wait_no_raise(log_obj, condition, condition_args, condition_kwargs, timeout, timestep):
+ log.ctx(log_obj)
+ raise log.Error('Wait timeout', condition=condition, timeout=timeout, timestep=timestep)
+
+ def sleep(self, log_obj, seconds):
+ assert seconds > 0.
+ self.wait_no_raise(log_obj, lambda: False, [], {}, timeout=seconds, timestep=seconds)
+
+
+MainLoop = EventLoop()
+
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/iperf3.py b/src/osmo_gsm_tester/iperf3.py
new file mode 100644
index 0000000..55cb546
--- /dev/null
+++ b/src/osmo_gsm_tester/iperf3.py
@@ -0,0 +1,123 @@
+# osmo_gsm_tester: specifics for running an iperf3 client and server
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+
+from . import log, util, process, pcap_recorder
+
+def iperf3_result_to_json(file):
+ with open(file) as f:
+ # Sometimes iperf3 provides 2 dictionaries, the 2nd one being an error about being interrupted (by us).
+ # json parser doesn't support (raises exception) parsing several dictionaries at a time (not a valid json object).
+ # We are only interested in the first dictionary, the regular results one:
+ d = f.read().split("\n}\n")[0] + "\n}\n"
+ data = json.loads(d)
+ return data
+
+
+class IPerf3Server(log.Origin):
+
+ DEFAULT_SRV_PORT = 5003
+
+ def __init__(self, suite_run, ip_address):
+ super().__init__(log.C_RUN, 'iperf3-srv_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+ self._port = IPerf3Server.DEFAULT_SRV_PORT
+
+ def start(self):
+ self.log('Starting iperf3-srv')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.addr())
+
+ self.log_file = self.run_dir.new_file('iperf3_srv.json')
+ self.process = process.Process(self.name(), self.run_dir,
+ ('iperf3', '-s', '-B', self.addr(),
+ '-p', str(self._port), '-J',
+ '--logfile', os.path.abspath(self.log_file)),
+ env={})
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def set_port(self, port):
+ self._port = port
+
+ def stop(self):
+ self.suite_run.stop_process(self.process)
+
+ def get_results(self):
+ return iperf3_result_to_json(self.log_file)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def port(self):
+ return self._port
+
+ def __str__(self):
+ return "%s:%u" %(self.addr(), self.port())
+
+ def running(self):
+ return not self.process.terminated()
+
+ def create_client(self):
+ return IPerf3Client(self.suite_run, self)
+
+class IPerf3Client(log.Origin):
+
+ def __init__(self, suite_run, iperf3srv):
+ super().__init__(log.C_RUN, 'iperf3-cli_%s' % iperf3srv.addr())
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.server = iperf3srv
+ self.suite_run = suite_run
+
+ def prepare_test_proc(self, netns=None):
+ self.log('Starting iperf3-client connecting to %s:%d' % (self.server.addr(), self.server.port()))
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.server.addr(), netns)
+
+ self.log_file = self.run_dir.new_file('iperf3_cli.json')
+ popen_args = ('iperf3', '-c', self.server.addr(),
+ '-p', str(self.server.port()), '-J',
+ '--logfile', os.path.abspath(self.log_file))
+ if netns:
+ self.process = process.NetNSProcess(self.name(), self.run_dir, netns, popen_args, env={})
+ else:
+ self.process = process.Process(self.name(), self.run_dir, popen_args, env={})
+ return self.process
+
+ def run_test_sync(self, netns=None):
+ self.prepare_test_proc(netns)
+ self.process.launch_sync()
+ return self.get_results()
+
+ def get_results(self):
+ return iperf3_result_to_json(self.log_file)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/log.py b/src/osmo_gsm_tester/log.py
new file mode 100644
index 0000000..7c4ae44
--- /dev/null
+++ b/src/osmo_gsm_tester/log.py
@@ -0,0 +1,601 @@
+# osmo_gsm_tester: global logging
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import time
+import traceback
+import contextlib
+import atexit
+from datetime import datetime # we need this for strftime as the one from time doesn't carry microsecond info
+from inspect import getframeinfo, stack
+
+from .util import is_dict
+
+L_ERR = 30
+L_LOG = 20
+L_DBG = 10
+L_TRACEBACK = 'TRACEBACK'
+
+LEVEL_STRS = {
+ 'err': L_ERR,
+ 'log': L_LOG,
+ 'dbg': L_DBG,
+ }
+
+C_NET = 'net'
+C_RUN = 'run'
+C_TST = 'tst'
+C_CNF = 'cnf'
+C_BUS = 'bus'
+C_DEFAULT = '---'
+
+LOG_CTX_VAR = '_log_ctx_'
+
+def dbg(*messages, _origin=None, _category=None, _src=None, **named_items):
+ '''Log on debug level. See also log()'''
+ _log(messages, named_items, origin=_origin, category=_category, level=L_DBG, src=_src)
+
+def log(*messages, _origin=None, _category=None, _level=L_LOG, _src=None, **named_items):
+ '''Log a message. The origin, an Origin class instance, is normally
+ determined by stack magic, only pass _origin to override. The category is
+ taken from the origin. _src is normally an integer indicating how many
+ levels up the stack sits the interesting source file to log about, can also
+ be a string. The log message is composed of all *messages and
+ **named_items, for example:
+ log('frobnicate:', thing, key=current_key, prop=erty)
+ '''
+ _log(messages, named_items, origin=_origin, category=_category, level=_level, src=_src)
+
+def err(*messages, _origin=None, _category=None, _src=None, **named_items):
+ '''Log on error level. See also log()'''
+ _log(messages, named_items, origin=_origin, category=_category, level=L_ERR, src=_src)
+
+def _log(messages=[], named_items={}, origin=None, category=None, level=L_LOG, src=None):
+ if origin is None:
+ origin = Origin.find_on_stack()
+ if category is None and isinstance(origin, Origin):
+ category = origin._log_category
+ if src is None:
+ # two levels up
+ src = 2
+ if isinstance(src, int):
+ src = get_src_from_caller(src + 1)
+ for target in LogTarget.all_targets:
+ target.log(origin, category, level, src, messages, named_items)
+
+
+LONG_DATEFMT = '%Y-%m-%d_%H:%M:%S.%f'
+DATEFMT = '%H:%M:%S.%f'
+
+# may be overridden by regression tests
+get_process_id = lambda: '%d-%d' % (os.getpid(), time.time())
+
+class Error(Exception):
+ def __init__(self, *messages, **named_items):
+ super().__init__(compose_message(messages, named_items))
+
+class LogTarget:
+ all_targets = []
+
+ do_log_time = None
+ do_log_category = None
+ do_log_level = None
+ do_log_origin = None
+ do_log_all_origins_on_levels = None
+ do_log_traceback = None
+ do_log_src = None
+ origin_width = None
+ origin_fmt = None
+ all_levels = None
+
+ # redirected by logging test
+ get_time_str = lambda self: datetime.now().strftime(self.log_time_fmt)
+
+ # sink that gets each complete logging line
+ log_write_func = None
+
+ category_levels = None
+
+ def __init__(self, log_write_func=None):
+ if log_write_func is None:
+ log_write_func = sys.__stdout__.write
+ self.log_write_func = log_write_func
+ self.category_levels = {}
+ self.style()
+ LogTarget.all_targets.append(self)
+
+ def remove(self):
+ LogTarget.all_targets.remove(self)
+
+ def style(self, time=True, time_fmt=DATEFMT, category=True, level=True, origin=True, origin_width=32, src=True, trace=False, all_origins_on_levels=(L_ERR, L_LOG, L_DBG, L_TRACEBACK)):
+ '''
+ set all logging format aspects, to defaults if not passed:
+ time: log timestamps;
+ time_fmt: format of timestamps;
+ category: print the logging category (three letters);
+ level: print the logging level, unless it is L_LOG;
+ origin: print which object(s) the message originated from;
+ origin_width: fill up the origin string with whitespace to this witdh;
+ src: log the source file and line number the log comes from;
+ trace: on exceptions, log the full stack trace;
+ all_origins_on_levels: pass a tuple of logging levels that should have a full trace of origins
+ '''
+ self.log_time_fmt = time_fmt
+ self.do_log_time = bool(time)
+ if not self.log_time_fmt:
+ self.do_log_time = False
+ self.do_log_category = bool(category)
+ self.do_log_level = bool(level)
+ self.do_log_origin = bool(origin)
+ self.origin_width = int(origin_width)
+ self.origin_fmt = '{:>%ds}' % self.origin_width
+ self.do_log_src = src
+ self.do_log_traceback = trace
+ self.do_log_all_origins_on_levels = tuple(all_origins_on_levels or [])
+ return self
+
+ def style_change(self, time=None, time_fmt=None, category=None, level=None, origin=None, origin_width=None, src=None, trace=None, all_origins_on_levels=None):
+ 'modify only the given aspects of the logging format'
+ self.style(
+ time=(time if time is not None else self.do_log_time),
+ time_fmt=(time_fmt if time_fmt is not None else self.log_time_fmt),
+ category=(category if category is not None else self.do_log_category),
+ level=(level if level is not None else self.do_log_level),
+ origin=(origin if origin is not None else self.do_log_origin),
+ origin_width=(origin_width if origin_width is not None else self.origin_width),
+ src=(src if src is not None else self.do_log_src),
+ trace=(trace if trace is not None else self.do_log_traceback),
+ all_origins_on_levels=(all_origins_on_levels if all_origins_on_levels is not None else self.do_log_all_origins_on_levels),
+ )
+ return self
+
+ def set_level(self, category, level):
+ 'set global logging log.L_* level for a given log.C_* category'
+ self.category_levels[category] = level
+ return self
+
+ def set_all_levels(self, level):
+ self.all_levels = level
+ return self
+
+ def is_enabled(self, category, level):
+ if level == L_TRACEBACK:
+ return self.do_log_traceback
+ if self.all_levels is not None:
+ is_level = self.all_levels
+ else:
+ is_level = self.category_levels.get(category)
+ if is_level is None:
+ is_level = L_LOG
+ if level < is_level:
+ return False
+ return True
+
+ def log(self, origin, category, level, src, messages, named_items):
+ if category and len(category) != 3:
+ self.log_write_func('WARNING: INVALID LOGGING CATEGORY %r\n' % category)
+ self.log_write_func('origin=%r category=%r level=%r\n' % (origin, category, level));
+
+ if not category:
+ category = C_DEFAULT
+ if not self.is_enabled(category, level):
+ return
+
+ log_pre = []
+ if self.do_log_time:
+ log_pre.append(self.get_time_str())
+
+ if self.do_log_category:
+ log_pre.append(category)
+
+ deeper_origins = ''
+ if self.do_log_origin:
+ if origin is None:
+ name = '-'
+ elif isinstance(origin, Origin):
+ name = origin.name()
+ # only log ancestry when there is more than one
+ if origin._parent is not None:
+ deeper_origins = origin.ancestry_str()
+ elif isinstance(origin, str):
+ name = origin or None
+ if not name:
+ name = str(origin.__class__.__name__)
+ log_pre.append(self.origin_fmt.format(name))
+
+ if self.do_log_level and level != L_LOG:
+ loglevel = '%s: ' % (level_str(level) or ('loglevel=' + str(level)))
+ else:
+ loglevel = ''
+
+ log_line = [compose_message(messages, named_items)]
+
+ if deeper_origins and (level in self.do_log_all_origins_on_levels):
+ log_line.append(' [%s]' % deeper_origins)
+
+ if self.do_log_src and src:
+ log_line.append(' [%s]' % str(src))
+
+ log_str = '%s%s%s%s' % (' '.join(log_pre),
+ ': ' if log_pre else '',
+ loglevel,
+ ' '.join(log_line))
+
+ if not log_str.endswith('\n'):
+ log_str = log_str + '\n'
+ self.log_write_func(log_str)
+
+ def large_separator(self, *msgs, sublevel=1, space_above=True):
+ sublevel = max(1, min(3, sublevel))
+ msg = ' '.join(msgs)
+ sep = '-' * int(23 * (5 - sublevel))
+ if not msg:
+ msg = sep
+ lines = [sep, msg, sep, '']
+ if space_above:
+ lines.insert(0, '')
+ self.log_write_func('\n'.join(lines))
+
+def level_str(level):
+ if level == L_TRACEBACK:
+ return L_TRACEBACK
+ if level <= L_DBG:
+ return 'DBG'
+ if level <= L_LOG:
+ return 'LOG'
+ return 'ERR'
+
+def _log_all_targets(origin, category, level, src, messages, named_items=None):
+ if origin is None:
+ origin = Origin.find_on_stack()
+ if isinstance(src, int):
+ src = get_src_from_caller(src + 1)
+ for target in LogTarget.all_targets:
+ target.log(origin, category, level, src, messages, named_items)
+
+def large_separator(*msgs, sublevel=1, space_above=True):
+ for target in LogTarget.all_targets:
+ target.large_separator(*msgs, sublevel=sublevel, space_above=space_above)
+
+def get_src_from_caller(levels_up=1):
+ # Poke into internal to avoid hitting the linecache which will make one or
+ # more calls to stat(2).
+ frame = sys._getframe(levels_up)
+ return '%s:%d' % (os.path.basename(frame.f_code.co_filename), frame.f_lineno)
+
+def get_src_from_exc_info(exc_info=None, levels_up=1):
+ if exc_info is None:
+ exc_info = sys.exc_info()
+ ftb = traceback.extract_tb(exc_info[2])
+ f,l,m,c = ftb[-levels_up]
+ f = os.path.basename(f)
+ return '%s:%s: %s' % (f, l, c)
+
+def get_line_for_src(src_path):
+ '''find a given source file on the stack and return the line number for
+ that file. (Used to indicate the position in a test script.)'''
+ etype, exception, tb = sys.exc_info()
+ if tb:
+ ftb = traceback.extract_tb(tb)
+ for f,l,m,c in ftb:
+ if f.endswith(src_path):
+ return l
+
+ for frame in stack():
+ caller = getframeinfo(frame[0])
+ if caller.filename.endswith(src_path):
+ return caller.lineno
+ return None
+
+def ctx(*name_items, **detail_items):
+ '''Store log context in the current frame. This string will appear as
+ origin information for exceptions thrown within the calling scope.'''
+ if not name_items and not detail_items:
+ ctx_obj(None)
+ if not detail_items and len(name_items) == 1 and isinstance(name_items[0], Origin):
+ ctx_obj(name_items[0])
+ else:
+ ctx_obj(compose_message(name_items, detail_items))
+
+def ctx_obj(origin_or_str):
+ f = sys._getframe(2)
+ if origin_or_str is None:
+ f.f_locals.pop(LOG_CTX_VAR, None)
+ else:
+ f.f_locals[LOG_CTX_VAR] = origin_or_str
+
+class OriginLoopError(Error):
+ pass
+
+class Origin:
+ '''
+ Base class for all classes that want to appear in the log.
+ It is a simple named marker to find in the stack frames.
+ This depends on the object instance named 'self' in each member class.
+
+ In addition, it provides a logging category and a globally unique ID for
+ each instance.
+
+ Each child class *must* call super().__init__(category, name), to allow
+ noting its parent origins.
+ '''
+
+ _global_id = None
+
+ _name = None
+ _origin_id = None
+ _log_category = None
+ _parent = None
+
+ @staticmethod
+ def find_on_stack(except_obj=None, f=None):
+ if f is None:
+ f = sys._getframe(2)
+ log_ctx_obj = None
+ origin = None
+ while f is not None:
+ l = f.f_locals
+
+ # if there is a log_ctx in the scope, add it, pointing to the next
+ # actual Origin class in the stack
+ log_ctx = l.get(LOG_CTX_VAR)
+ if log_ctx:
+ if isinstance(log_ctx, Origin):
+ new_log_ctx_obj = log_ctx
+ else:
+ new_log_ctx_obj = Origin(None, log_ctx, find_parent=False)
+ if log_ctx_obj is None:
+ log_ctx_obj = new_log_ctx_obj
+ else:
+ log_ctx_obj.highest_ancestor()._set_parent(new_log_ctx_obj)
+
+ obj = l.get('self')
+ if obj and isinstance(obj, Origin) and (except_obj is not obj):
+ origin = obj
+ break
+ f = f.f_back
+
+ if (origin is not None) and (log_ctx_obj is not None):
+ log_ctx_obj.highest_ancestor()._set_parent(origin)
+ p = log_ctx_obj
+ while p:
+ p._set_log_category(origin._log_category)
+ p = p._parent
+ if log_ctx_obj is not None:
+ return log_ctx_obj
+ # may return None
+ return origin
+
+ @staticmethod
+ def find_in_exc_info(exc_info):
+ tb = exc_info[2]
+ # get last tb ... I hope that's right
+ while tb.tb_next:
+ tb = tb.tb_next
+ return Origin.find_on_stack(f=tb.tb_frame)
+
+ def __init__(self, category, *name_items, find_parent=True, **detail_items):
+ self._set_log_category(category)
+ self.set_name(*name_items, **detail_items)
+ if find_parent:
+ self._set_parent(Origin.find_on_stack(except_obj=self))
+
+ def _set_parent(self, parent):
+ # make sure to avoid loops
+ p = parent
+ while p:
+ if p is self:
+ raise OriginLoopError('Origin parent loop')
+ p = p._parent
+ self._parent = parent
+
+ def set_name(self, *name_items, **detail_items):
+ '''Change the origin's name for log output; rather use the constructor.
+ This function can be used to change the name in case naming info
+ becomes available only after class creation (like a pid)'''
+ if name_items:
+ name = '-'.join([str(i) for i in name_items])
+ elif not detail_items:
+ name = self.__class__.__name__
+ else:
+ name = ''
+ if detail_items:
+ details = '(%s)' % (', '.join([("%s=%r" % (k,v))
+ for k,v in sorted(detail_items.items())]))
+ else:
+ details = ''
+ self._name = name + details
+
+ def name(self):
+ return self._name or self.__class__.__name__
+
+ __str__ = name
+ __repr__ = name
+
+ def origin_id(self):
+ if not self._origin_id:
+ if not Origin._global_id:
+ Origin._global_id = get_process_id()
+ self._origin_id = '%s-%s' % (self.name(), Origin._global_id)
+ return self._origin_id
+
+ def _set_log_category(self, category):
+ self._log_category = category
+
+ def redirect_stdout(self):
+ return contextlib.redirect_stdout(SafeRedirectStdout(self))
+
+ def ancestry(self):
+ origins = []
+ n = 10
+ origin = self
+ while origin:
+ origins.insert(0, origin)
+ origin = origin._parent
+ n -= 1
+ if n < 0:
+ break
+ return origins
+
+ def ancestry_str(self):
+ return '↪'.join([o.name() for o in self.ancestry()])
+
+ def highest_ancestor(self):
+ if self._parent:
+ return self._parent.highest_ancestor()
+ return self
+
+ def log(self, *messages, _src=3, **named_items):
+ '''same as log.log() but passes this object to skip looking up an origin'''
+ log(*messages, _origin=self, _src=_src, **named_items)
+
+ def dbg(self, *messages, _src=3, **named_items):
+ '''same as log.dbg() but passes this object to skip looking up an origin'''
+ dbg(*messages, _origin=self, _src=_src, **named_items)
+
+ def err(self, *messages, _src=3, **named_items):
+ '''same as log.err() but passes this object to skip looking up an origin'''
+ err(*messages, _origin=self, _src=_src, **named_items)
+
+class SafeRedirectStdout:
+ '''
+ To be able to use 'print' in test scripts, this is used to redirect stdout
+ to a test class' log() function. However, it turns out doing that breaks
+ python debugger sessions -- it uses extended features of stdout, and will
+ fail dismally if it finds this wrapper in sys.stdout. Luckily, overriding
+ __getattr__() to return the original sys.__stdout__ attributes for anything
+ else than write() makes the debugger session work nicely again!
+ '''
+ _log_line_buf = None
+
+ def __init__(self, origin):
+ self.origin = origin
+
+ def write(self, message):
+ lines = message.splitlines()
+ if not lines:
+ return
+ if self._log_line_buf:
+ lines[0] = self._log_line_buf + lines[0]
+ self._log_line_buf = None
+ if not message.endswith('\n'):
+ self._log_line_buf = lines[-1]
+ lines = lines[:-1]
+ for line in lines:
+ _log(messages=(line,),
+ origin=self.origin, level=L_LOG, src=2)
+
+ def __getattr__(self, name):
+ return sys.__stdout__.__getattribute__(name)
+
+def trace(exc_info=None, origin=None):
+ if exc_info is None:
+ exc_info = sys.exc_info()
+ if origin is None:
+ origin = Origin.find_in_exc_info(exc_info)
+ _log(messages=traceback.format_exception(*exc_info),
+ origin=origin, level=L_TRACEBACK)
+
+def log_exn():
+ exc_info = sys.exc_info()
+ origin = Origin.find_in_exc_info(exc_info)
+
+ etype, exception, tb = exc_info
+ if hasattr(exception, 'msg'):
+ msg = exception.msg
+ else:
+ msg = str(exception)
+
+ trace(exc_info, origin=origin)
+ _log(messages=('%s:' % str(etype.__name__), msg),
+ origin=origin, level=L_ERR, src=get_src_from_exc_info(exc_info))
+
+
+def set_all_levels(level):
+ for target in LogTarget.all_targets:
+ target.set_all_levels(level)
+
+def set_level(category, level):
+ for target in LogTarget.all_targets:
+ target.set_level(category, level)
+
+def style(**kwargs):
+ for target in LogTarget.all_targets:
+ target.style(**kwargs)
+
+def style_change(**kwargs):
+ for target in LogTarget.all_targets:
+ target.style_change(**kwargs)
+
+class TestsTarget(LogTarget):
+ 'LogTarget producing deterministic results for regression tests'
+ def __init__(self, log_write_func=None):
+ super().__init__(log_write_func)
+ self.style(time=False, src=False, origin_width=0)
+
+class FileLogTarget(LogTarget):
+ 'LogTarget to log to a file system path'
+ log_file = None
+
+ def __init__(self, log_path):
+ atexit.register(self.at_exit)
+ self.path = log_path
+ self.log_file = open(log_path, 'a')
+ super().__init__(self.write_to_log_and_flush)
+
+ def remove(self):
+ super().remove()
+ self.log_file.close()
+ self.log_file = None
+
+ def write_to_log_and_flush(self, msg):
+ self.log_file.write(msg)
+ self.log_file.flush()
+
+ def at_exit(self):
+ if self.log_file is not None:
+ self.log_file.flush()
+ self.log_file.close()
+
+def run_logging_exceptions(func, *func_args, return_on_failure=None, **func_kwargs):
+ try:
+ return func(*func_args, **func_kwargs)
+ except:
+ log_exn()
+ return return_on_failure
+
+def _compose_named_items(item):
+ 'make sure dicts are output sorted, for test expectations'
+ if is_dict(item):
+ return '{%s}' % (', '.join(
+ ['%s=%s' % (k, _compose_named_items(v))
+ for k,v in sorted(item.items())]))
+ return repr(item)
+
+def compose_message(messages, named_items):
+ msgs = [str(m) for m in messages]
+
+ if named_items:
+ # unfortunately needs to be sorted to get deterministic results
+ msgs.append(_compose_named_items(named_items))
+
+ return ' '.join(msgs)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/modem.py b/src/osmo_gsm_tester/modem.py
new file mode 100644
index 0000000..5106702
--- /dev/null
+++ b/src/osmo_gsm_tester/modem.py
@@ -0,0 +1,773 @@
+# osmo_gsm_tester: DBUS client to talk to ofono
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from . import log, util, sms, process
+from .event_loop import MainLoop
+
+from pydbus import SystemBus, Variant
+import os
+
+# Required for Gio.Cancellable.
+# See https://lazka.github.io/pgi-docs/Gio-2.0/classes/Cancellable.html#Gio.Cancellable
+from gi.module import get_introspection_module
+Gio = get_introspection_module('Gio')
+
+from gi.repository import GLib
+bus = SystemBus()
+
+I_MODEM = 'org.ofono.Modem'
+I_NETREG = 'org.ofono.NetworkRegistration'
+I_SMS = 'org.ofono.MessageManager'
+I_CONNMGR = 'org.ofono.ConnectionManager'
+I_CALLMGR = 'org.ofono.VoiceCallManager'
+I_CALL = 'org.ofono.VoiceCall'
+I_SS = 'org.ofono.SupplementaryServices'
+I_SIMMGR = 'org.ofono.SimManager'
+
+# See https://github.com/intgr/ofono/blob/master/doc/network-api.txt#L78
+NETREG_ST_REGISTERED = 'registered'
+NETREG_ST_ROAMING = 'roaming'
+
+NETREG_MAX_REGISTER_ATTEMPTS = 3
+
+class DeferredDBus:
+
+ def __init__(self, dbus_iface, handler):
+ self.handler = handler
+ self.subscription_id = dbus_iface.connect(self.receive_signal)
+
+ def receive_signal(self, *args, **kwargs):
+ MainLoop.defer(self.handler, *args, **kwargs)
+
+def dbus_connect(dbus_iface, handler):
+ '''This function shall be used instead of directly connecting DBus signals.
+ It ensures that we don't nest a glib main loop within another, and also
+ that we receive exceptions raised within the signal handlers. This makes it
+ so that a signal handler is invoked only after the DBus polling is through
+ by enlisting signals that should be handled in the
+ DeferredHandling.defer_queue.'''
+ return DeferredDBus(dbus_iface, handler).subscription_id
+
+def systembus_get(path):
+ global bus
+ return bus.get('org.ofono', path)
+
+def list_modems():
+ root = systembus_get('/')
+ return sorted(root.GetModems())
+
+def get_dbuspath_from_syspath(syspath):
+ modems = list_modems()
+ for dbuspath, props in modems:
+ if props.get('SystemPath', '') == syspath:
+ return dbuspath
+ raise ValueError('could not find %s in modem list: %s' % (syspath, modems))
+
+
+def _async_result_handler(obj, result, user_data):
+ '''Generic callback dispatcher called from glib loop when an async method
+ call has returned. This callback is set up by method dbus_async_call.'''
+ (result_callback, error_callback, real_user_data) = user_data
+ try:
+ ret = obj.call_finish(result)
+ except Exception as e:
+ if isinstance(e, GLib.Error) and e.code == Gio.IOErrorEnum.CANCELLED:
+ log.dbg('DBus method cancelled')
+ return
+
+ if error_callback:
+ error_callback(obj, e, real_user_data)
+ else:
+ result_callback(obj, e, real_user_data)
+ return
+
+ ret = ret.unpack()
+ # to be compatible with standard Python behaviour, unbox
+ # single-element tuples and return None for empty result tuples
+ if len(ret) == 1:
+ ret = ret[0]
+ elif len(ret) == 0:
+ ret = None
+ result_callback(obj, ret, real_user_data)
+
+def dbus_async_call(instance, proxymethod, *proxymethod_args,
+ result_handler=None, error_handler=None,
+ user_data=None, timeout=30, cancellable=None,
+ **proxymethod_kwargs):
+ '''pydbus doesn't support asynchronous methods. This method adds support for
+ it until pydbus implements it'''
+
+ argdiff = len(proxymethod_args) - len(proxymethod._inargs)
+ if argdiff < 0:
+ raise TypeError(proxymethod.__qualname__ + " missing {} required positional argument(s)".format(-argdiff))
+ elif argdiff > 0:
+ raise TypeError(proxymethod.__qualname__ + " takes {} positional argument(s) but {} was/were given".format(len(proxymethod._inargs), len(proxymethod_args)))
+
+ timeout = timeout * 1000
+ user_data = (result_handler, error_handler, user_data)
+
+ # See https://lazka.github.io/pgi-docs/Gio-2.0/classes/DBusProxy.html#Gio.DBusProxy.call
+ ret = instance._bus.con.call(
+ instance._bus_name, instance._path,
+ proxymethod._iface_name, proxymethod.__name__,
+ GLib.Variant(proxymethod._sinargs, proxymethod_args),
+ GLib.VariantType.new(proxymethod._soutargs),
+ 0, timeout, cancellable,
+ _async_result_handler, user_data)
+
+def dbus_call_dismiss_error(log_obj, err_str, method):
+ try:
+ method()
+ except GLib.Error as e:
+ if Gio.DBusError.is_remote_error(e) and Gio.DBusError.get_remote_error(e) == err_str:
+ log_obj.log('Dismissed Dbus method error: %r' % e)
+ return
+ raise e
+
+class ModemDbusInteraction(log.Origin):
+ '''Work around inconveniences specific to pydbus and ofono.
+ ofono adds and removes DBus interfaces and notifies about them.
+ Upon changes we need a fresh pydbus object to benefit from that.
+ Watching the interfaces change is optional; be sure to call
+ watch_interfaces() if you'd like to have signals subscribed.
+ Related: https://github.com/LEW21/pydbus/issues/56
+ '''
+
+ modem_path = None
+ watch_props_subscription = None
+ _dbus_obj = None
+ interfaces = None
+
+ def __init__(self, modem_path):
+ self.modem_path = modem_path
+ super().__init__(log.C_BUS, self.modem_path)
+ self.interfaces = set()
+
+ # A dict listing signal handlers to connect, e.g.
+ # { I_SMS: ( ('IncomingMessage', self._on_incoming_message), ), }
+ self.required_signals = {}
+
+ # A dict collecting subscription tokens for connected signal handlers.
+ # { I_SMS: ( token1, token2, ... ), }
+ self.connected_signals = util.listdict()
+
+ def cleanup(self):
+ self.set_powered(False)
+ self.unwatch_interfaces()
+ for interface_name in list(self.connected_signals.keys()):
+ self.remove_signals(interface_name)
+
+ def __del__(self):
+ self.cleanup()
+
+ def get_new_dbus_obj(self):
+ return systembus_get(self.modem_path)
+
+ def dbus_obj(self):
+ if self._dbus_obj is None:
+ self._dbus_obj = self.get_new_dbus_obj()
+ return self._dbus_obj
+
+ def interface(self, interface_name):
+ try:
+ return self.dbus_obj()[interface_name]
+ except KeyError:
+ raise log.Error('Modem interface is not available:', interface_name)
+
+ def signal(self, interface_name, signal):
+ return getattr(self.interface(interface_name), signal)
+
+ def watch_interfaces(self):
+ self.unwatch_interfaces()
+ # Note: we are watching the properties on a get_new_dbus_obj() that is
+ # separate from the one used to interact with interfaces. We need to
+ # refresh the pydbus object to interact with Interfaces that have newly
+ # appeared, but exchanging the DBus object to watch Interfaces being
+ # enabled and disabled is racy: we may skip some removals and
+ # additions. Hence do not exchange this DBus object. We don't even
+ # need to store the dbus object used for this, we will not touch it
+ # again. We only store the signal subscription.
+ self.watch_props_subscription = dbus_connect(self.get_new_dbus_obj().PropertyChanged,
+ self.on_property_change)
+ self.on_interfaces_change(self.properties().get('Interfaces'))
+
+ def unwatch_interfaces(self):
+ if self.watch_props_subscription is None:
+ return
+ self.watch_props_subscription.disconnect()
+ self.watch_props_subscription = None
+
+ def on_property_change(self, name, value):
+ if name == 'Interfaces':
+ self.on_interfaces_change(value)
+ else:
+ self.dbg('%r.PropertyChanged() -> %s=%s' % (I_MODEM, name, value))
+
+ def on_interfaces_change(self, interfaces_now):
+ # First some logging.
+ now = set(interfaces_now)
+ additions = now - self.interfaces
+ removals = self.interfaces - now
+ self.interfaces = now
+ if not (additions or removals):
+ # nothing changed.
+ return
+
+ if additions:
+ self.dbg('interface enabled:', ', '.join(sorted(additions)))
+
+ if removals:
+ self.dbg('interface disabled:', ', '.join(sorted(removals)))
+
+ # The dbus object is now stale and needs refreshing before we
+ # access the next interface function.
+ self._dbus_obj = None
+
+ # If an interface disappeared, disconnect the signal handlers for it.
+ # Even though we're going to use a fresh dbus object for new
+ # subscriptions, we will still keep active subscriptions alive on the
+ # old dbus object which will linger, associated with the respective
+ # signal subscription.
+ for removed in removals:
+ self.remove_signals(removed)
+
+ # Connect signals for added interfaces.
+ for interface_name in additions:
+ self.connect_signals(interface_name)
+
+ def remove_signals(self, interface_name):
+ got = self.connected_signals.pop(interface_name, [])
+
+ if not got:
+ return
+
+ self.dbg('Disconnecting', len(got), 'signals for', interface_name)
+ for subscription in got:
+ subscription.disconnect()
+
+ def connect_signals(self, interface_name):
+ # If an interface was added, it must not have existed before. For
+ # paranoia, make sure we have no handlers for those.
+ self.remove_signals(interface_name)
+
+ want = self.required_signals.get(interface_name, [])
+ if not want:
+ return
+
+ self.dbg('Connecting', len(want), 'signals for', interface_name)
+ for signal, cb in self.required_signals.get(interface_name, []):
+ subscription = dbus_connect(self.signal(interface_name, signal), cb)
+ self.connected_signals.add(interface_name, subscription)
+
+ def has_interface(self, *interface_names):
+ try:
+ for interface_name in interface_names:
+ self.dbus_obj()[interface_name]
+ result = True
+ except KeyError:
+ result = False
+ self.dbg('has_interface(%s) ==' % (', '.join(interface_names)), result)
+ return result
+
+ def properties(self, iface=I_MODEM):
+ return self.dbus_obj()[iface].GetProperties()
+
+ def property_is(self, name, val, iface=I_MODEM):
+ is_val = self.properties(iface).get(name)
+ self.dbg(name, '==', is_val)
+ return is_val is not None and is_val == val
+
+ def set_bool(self, name, bool_val, iface=I_MODEM):
+ # to make sure any pending signals are received before we send out more DBus requests
+ MainLoop.poll()
+
+ val = bool(bool_val)
+ self.log('Setting', name, val)
+ self.interface(iface).SetProperty(name, Variant('b', val))
+
+ MainLoop.wait(self, self.property_is, name, bool_val)
+
+ def set_powered(self, powered=True):
+ self.set_bool('Powered', powered)
+
+ def set_online(self, online=True):
+ self.set_bool('Online', online)
+
+ def is_powered(self):
+ return self.property_is('Powered', True)
+
+ def is_online(self):
+ return self.property_is('Online', True)
+
+
+
+class Modem(log.Origin):
+ 'convenience for ofono Modem interaction'
+
+ CTX_PROT_IPv4 = 'ip'
+ CTX_PROT_IPv6 = 'ipv6'
+ CTX_PROT_IPv46 = 'dual'
+
+ def __init__(self, suite_run, conf):
+ self.suite_run = suite_run
+ self.conf = conf
+ self.syspath = conf.get('path')
+ self.dbuspath = get_dbuspath_from_syspath(self.syspath)
+ super().__init__(log.C_TST, self.dbuspath)
+ self.dbg('creating from syspath %s' % self.syspath)
+ self.msisdn = None
+ self._ki = None
+ self._imsi = None
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name().strip('/')))
+ self.sms_received_list = []
+ self.dbus = ModemDbusInteraction(self.dbuspath)
+ self.register_attempts = 0
+ self.call_list = []
+ # one Cancellable can handle several concurrent methods.
+ self.cancellable = Gio.Cancellable.new()
+ self.dbus.required_signals = {
+ I_SMS: ( ('IncomingMessage', self._on_incoming_message), ),
+ I_NETREG: ( ('PropertyChanged', self._on_netreg_property_changed), ),
+ I_CONNMGR: ( ('PropertyChanged', self._on_connmgr_property_changed), ),
+ I_CALLMGR: ( ('PropertyChanged', self._on_callmgr_property_changed),
+ ('CallAdded', self._on_callmgr_call_added),
+ ('CallRemoved', self._on_callmgr_call_removed), ),
+ }
+ self.dbus.watch_interfaces()
+
+ def cleanup(self):
+ self.dbg('cleanup')
+ if self.cancellable:
+ self.cancel_pending_dbus_methods()
+ self.cancellable = None
+ if self.is_powered():
+ self.power_off()
+ self.dbus.cleanup()
+ self.dbus = None
+
+ def netns(self):
+ return os.path.basename(self.syspath.rstrip('/'))
+
+ def properties(self, *args, **kwargs):
+ '''Return a dict of properties on this modem. For the actual arguments,
+ see ModemDbusInteraction.properties(), which this function calls. The
+ returned dict is defined by ofono. An example is:
+ {'Lockdown': False,
+ 'Powered': True,
+ 'Model': 'MC7304',
+ 'Revision': 'SWI9X15C_05.05.66.00 r29972 CARMD-EV-FRMWR1 2015/10/08 08:36:28',
+ 'Manufacturer': 'Sierra Wireless, Incorporated',
+ 'Emergency': False,
+ 'Interfaces': ['org.ofono.SmartMessaging',
+ 'org.ofono.PushNotification',
+ 'org.ofono.MessageManager',
+ 'org.ofono.NetworkRegistration',
+ 'org.ofono.ConnectionManager',
+ 'org.ofono.SupplementaryServices',
+ 'org.ofono.RadioSettings',
+ 'org.ofono.AllowedAccessPoints',
+ 'org.ofono.SimManager',
+ 'org.ofono.LocationReporting',
+ 'org.ofono.VoiceCallManager'],
+ 'Serial': '356853054230919',
+ 'Features': ['sms', 'net', 'gprs', 'ussd', 'rat', 'sim', 'gps'],
+ 'Type': 'hardware',
+ 'Online': True}
+ '''
+ return self.dbus.properties(*args, **kwargs)
+
+ def set_powered(self, powered=True):
+ return self.dbus.set_powered(powered=powered)
+
+ def set_online(self, online=True):
+ return self.dbus.set_online(online=online)
+
+ def is_powered(self):
+ return self.dbus.is_powered()
+
+ def is_online(self):
+ return self.dbus.is_online()
+
+ def set_msisdn(self, msisdn):
+ self.msisdn = msisdn
+
+ def imsi(self):
+ if self._imsi is None:
+ if 'sim' in self.features():
+ if not self.is_powered():
+ self.set_powered()
+ # wait for SimManager iface to appear after we power on
+ MainLoop.wait(self, self.dbus.has_interface, I_SIMMGR, timeout=10)
+ simmgr = self.dbus.interface(I_SIMMGR)
+ # If properties are requested quickly, it may happen that Sim property is still not there.
+ MainLoop.wait(self, lambda: simmgr.GetProperties().get('SubscriberIdentity', None) is not None, timeout=10)
+ props = simmgr.GetProperties()
+ self.dbg('got SIM properties', props)
+ self._imsi = props.get('SubscriberIdentity', None)
+ else:
+ self._imsi = self.conf.get('imsi')
+ if self._imsi is None:
+ raise log.Error('No IMSI')
+ return self._imsi
+
+ def set_ki(self, ki):
+ self._ki = ki
+
+ def ki(self):
+ if self._ki is not None:
+ return self._ki
+ return self.conf.get('ki')
+
+ def auth_algo(self):
+ return self.conf.get('auth_algo', None)
+
+ def features(self):
+ return self.conf.get('features', [])
+
+ def _required_ifaces(self):
+ req_ifaces = (I_NETREG,)
+ req_ifaces += (I_SMS,) if 'sms' in self.features() else ()
+ req_ifaces += (I_SS,) if 'ussd' in self.features() else ()
+ req_ifaces += (I_CONNMGR,) if 'gprs' in self.features() else ()
+ req_ifaces += (I_SIMMGR,) if 'sim' in self.features() else ()
+ return req_ifaces
+
+ def _on_netreg_property_changed(self, name, value):
+ self.dbg('%r.PropertyChanged() -> %s=%s' % (I_NETREG, name, value))
+
+ def is_connected(self, mcc_mnc=None):
+ netreg = self.dbus.interface(I_NETREG)
+ prop = netreg.GetProperties()
+ status = prop.get('Status')
+ self.dbg('status:', status)
+ if not (status == NETREG_ST_REGISTERED or status == NETREG_ST_ROAMING):
+ return False
+ if mcc_mnc is None: # Any network is fine and we are registered.
+ return True
+ mcc = prop.get('MobileCountryCode')
+ mnc = prop.get('MobileNetworkCode')
+ if (mcc, mnc) == mcc_mnc:
+ return True
+ return False
+
+ def schedule_scan_register(self, mcc_mnc):
+ if self.register_attempts > NETREG_MAX_REGISTER_ATTEMPTS:
+ raise log.Error('Failed to find Network Operator', mcc_mnc=mcc_mnc, attempts=self.register_attempts)
+ self.register_attempts += 1
+ netreg = self.dbus.interface(I_NETREG)
+ self.dbg('Scanning for operators...')
+ # Scan method can take several seconds, and we don't want to block
+ # waiting for that. Make it async and try to register when the scan is
+ # finished.
+ register_func = self.scan_cb_register_automatic if mcc_mnc is None else self.scan_cb_register
+ result_handler = lambda obj, result, user_data: MainLoop.defer(register_func, result, user_data)
+ error_handler = lambda obj, e, user_data: MainLoop.defer(self.scan_cb_error_handler, e, mcc_mnc)
+ dbus_async_call(netreg, netreg.Scan, timeout=30, cancellable=self.cancellable,
+ result_handler=result_handler, error_handler=error_handler,
+ user_data=mcc_mnc)
+
+ def scan_cb_error_handler(self, e, mcc_mnc):
+ # It was detected that Scan() method can fail for some modems on some
+ # specific circumstances. For instance it fails with org.ofono.Error.Failed
+ # if the modem starts to register internally after we started Scan() and
+ # the registering succeeds while we are still waiting for Scan() to finsih.
+ # So far the easiest seems to check if we are now registered and
+ # otherwise schedule a scan again.
+ self.err('Scan() failed, retrying if needed:', e)
+ if not self.is_connected(mcc_mnc):
+ self.schedule_scan_register(mcc_mnc)
+ else:
+ self.log('Already registered with network', mcc_mnc)
+
+ def scan_cb_register_automatic(self, scanned_operators, mcc_mnc):
+ self.dbg('scanned operators: ', scanned_operators);
+ for op_path, op_prop in scanned_operators:
+ if op_prop.get('Status') == 'current':
+ mcc = op_prop.get('MobileCountryCode')
+ mnc = op_prop.get('MobileNetworkCode')
+ self.log('Already registered with network', (mcc, mnc))
+ return
+ self.log('Registering with the default network')
+ netreg = self.dbus.interface(I_NETREG)
+ dbus_call_dismiss_error(self, 'org.ofono.Error.InProgress', netreg.Register)
+
+
+ def scan_cb_register(self, scanned_operators, mcc_mnc):
+ self.dbg('scanned operators: ', scanned_operators);
+ matching_op_path = None
+ for op_path, op_prop in scanned_operators:
+ mcc = op_prop.get('MobileCountryCode')
+ mnc = op_prop.get('MobileNetworkCode')
+ if (mcc, mnc) == mcc_mnc:
+ if op_prop.get('Status') == 'current':
+ self.log('Already registered with network', mcc_mnc)
+ # We discovered the network and we are already registered
+ # with it. Avoid calling op.Register() in this case (it
+ # won't act as a NO-OP, it actually returns an error).
+ return
+ matching_op_path = op_path
+ break
+ if matching_op_path is None:
+ self.dbg('Failed to find Network Operator', mcc_mnc=mcc_mnc, attempts=self.register_attempts)
+ self.schedule_scan_register(mcc_mnc)
+ return
+ dbus_op = systembus_get(matching_op_path)
+ self.log('Registering with operator', matching_op_path, mcc_mnc)
+ try:
+ dbus_call_dismiss_error(self, 'org.ofono.Error.InProgress', dbus_op.Register)
+ except GLib.Error as e:
+ if Gio.DBusError.is_remote_error(e) and Gio.DBusError.get_remote_error(e) == 'org.ofono.Error.NotSupported':
+ self.log('modem does not support manual registering, attempting automatic registering')
+ self.scan_cb_register_automatic(scanned_operators, mcc_mnc)
+ return
+ raise e
+
+ def cancel_pending_dbus_methods(self):
+ self.cancellable.cancel()
+ # Cancel op is applied as a signal coming from glib mainloop, so we
+ # need to run it and wait for the callbacks to handle cancellations.
+ MainLoop.poll()
+ # once it has been triggered, create a new one for next operation:
+ self.cancellable = Gio.Cancellable.new()
+
+ def power_off(self):
+ if self.dbus.has_interface(I_CONNMGR) and self.is_attached():
+ self.detach()
+ self.set_online(False)
+ self.set_powered(False)
+ req_ifaces = self._required_ifaces()
+ for iface in req_ifaces:
+ MainLoop.wait(self, lambda: not self.dbus.has_interface(iface), timeout=10)
+
+ def power_cycle(self):
+ 'Power the modem and put it online, power cycle it if it was already on'
+ req_ifaces = self._required_ifaces()
+ if self.is_powered():
+ self.dbg('Power cycling')
+ MainLoop.sleep(self, 1.0) # workaround for ofono bug OS#3064
+ self.power_off()
+ else:
+ self.dbg('Powering on')
+ self.set_powered()
+ self.set_online()
+ MainLoop.wait(self, self.dbus.has_interface, *req_ifaces, timeout=10)
+
+ def connect(self, mcc_mnc=None):
+ 'Connect to MCC+MNC'
+ if (mcc_mnc is not None) and (len(mcc_mnc) != 2 or None in mcc_mnc):
+ raise log.Error('mcc_mnc value is invalid. It should be None or contain both valid mcc and mnc values:', mcc_mnc=mcc_mnc)
+ # if test called connect() before and async scanning has not finished, we need to get rid of it:
+ self.cancel_pending_dbus_methods()
+ self.power_cycle()
+ self.register_attempts = 0
+ if self.is_connected(mcc_mnc):
+ self.log('Already registered with', mcc_mnc if mcc_mnc else 'default network')
+ else:
+ self.log('Connect to', mcc_mnc if mcc_mnc else 'default network')
+ self.schedule_scan_register(mcc_mnc)
+
+ def is_attached(self):
+ connmgr = self.dbus.interface(I_CONNMGR)
+ prop = connmgr.GetProperties()
+ attached = prop.get('Attached')
+ self.dbg('attached:', attached)
+ return attached
+
+ def attach(self, allow_roaming=False):
+ self.dbg('attach')
+ if self.is_attached():
+ self.detach()
+ connmgr = self.dbus.interface(I_CONNMGR)
+ prop = connmgr.SetProperty('RoamingAllowed', Variant('b', allow_roaming))
+ prop = connmgr.SetProperty('Powered', Variant('b', True))
+
+ def detach(self):
+ self.dbg('detach')
+ connmgr = self.dbus.interface(I_CONNMGR)
+ prop = connmgr.SetProperty('RoamingAllowed', Variant('b', False))
+ prop = connmgr.SetProperty('Powered', Variant('b', False))
+ connmgr.DeactivateAll()
+ connmgr.ResetContexts() # Requires Powered=false
+
+ def activate_context(self, apn='internet', user='ogt', pwd='', protocol='ip'):
+ self.dbg('activate_context', apn=apn, user=user, protocol=protocol)
+
+ connmgr = self.dbus.interface(I_CONNMGR)
+ ctx_path = connmgr.AddContext('internet')
+
+ ctx = systembus_get(ctx_path)
+ ctx.SetProperty('AccessPointName', Variant('s', apn))
+ ctx.SetProperty('Username', Variant('s', user))
+ ctx.SetProperty('Password', Variant('s', pwd))
+ ctx.SetProperty('Protocol', Variant('s', protocol))
+
+ # Activate can only be called after we are attached
+ ctx.SetProperty('Active', Variant('b', True))
+ MainLoop.wait(self, lambda: ctx.GetProperties()['Active'] == True)
+ self.log('context activated', path=ctx_path, apn=apn, user=user, properties=ctx.GetProperties())
+ return ctx_path
+
+ def deactivate_context(self, ctx_id):
+ self.dbg('deactivate_context', path=ctx_id)
+ ctx = systembus_get(ctx_id)
+ ctx.SetProperty('Active', Variant('b', False))
+ MainLoop.wait(self, lambda: ctx.GetProperties()['Active'] == False)
+ self.dbg('deactivate_context active=false, removing', path=ctx_id)
+ connmgr = self.dbus.interface(I_CONNMGR)
+ connmgr.RemoveContext(ctx_id)
+ self.log('context deactivated', path=ctx_id)
+
+ def run_netns_wait(self, name, popen_args):
+ proc = process.NetNSProcess(name, self.run_dir.new_dir(name), self.netns(), popen_args,
+ env={})
+ proc.launch_sync()
+
+ def setup_context_data_plane(self, ctx_id):
+ self.dbg('setup_context_data', path=ctx_id)
+ ctx = systembus_get(ctx_id)
+ ctx_settings = ctx.GetProperties().get('Settings', None)
+ if not ctx_settings:
+ raise log.Error('%s no Settings found! No way to get iface!' % ctx_id)
+ iface = ctx_settings.get('Interface', None)
+ if not iface:
+ raise log.Error('%s Settings contains no iface! %r' % (ctx_id, repr(ctx_settings)))
+ self.run_netns_wait('ifup', ('ip', 'link', 'set', 'dev', iface, 'up'))
+ self.run_netns_wait('dhcp', ('udhcpc', '-q', '-i', iface))
+
+ def sms_send(self, to_msisdn_or_modem, *tokens):
+ if isinstance(to_msisdn_or_modem, Modem):
+ to_msisdn = to_msisdn_or_modem.msisdn
+ tokens = list(tokens)
+ tokens.append('to ' + to_msisdn_or_modem.name())
+ else:
+ to_msisdn = str(to_msisdn_or_modem)
+ msg = sms.Sms(self.msisdn, to_msisdn, 'from ' + self.name(), *tokens)
+ self.log('sending sms to MSISDN', to_msisdn, sms=msg)
+ mm = self.dbus.interface(I_SMS)
+ mm.SendMessage(to_msisdn, str(msg))
+ return msg
+
+ def _on_incoming_message(self, message, info):
+ self.log('Incoming SMS:', repr(message))
+ self.dbg(info=info)
+ self.sms_received_list.append((message, info))
+
+ def sms_was_received(self, sms_obj):
+ for msg, info in self.sms_received_list:
+ if sms_obj.matches(msg):
+ self.log('SMS received as expected:', repr(msg))
+ self.dbg(info=info)
+ return True
+ return False
+
+ def call_id_list(self):
+ self.dbg('call_id_list: %r' % self.call_list)
+ return self.call_list
+
+ def call_dial(self, to_msisdn_or_modem):
+ if isinstance(to_msisdn_or_modem, Modem):
+ to_msisdn = to_msisdn_or_modem.msisdn
+ else:
+ to_msisdn = str(to_msisdn_or_modem)
+ self.dbg('Dialing:', to_msisdn)
+ cmgr = self.dbus.interface(I_CALLMGR)
+ call_obj_path = cmgr.Dial(to_msisdn, 'default')
+ if call_obj_path not in self.call_list:
+ self.dbg('Adding %s to call list' % call_obj_path)
+ self.call_list.append(call_obj_path)
+ else:
+ self.dbg('Dial returned already existing call')
+ return call_obj_path
+
+ def _find_call_msisdn_state(self, msisdn, state):
+ cmgr = self.dbus.interface(I_CALLMGR)
+ ret = cmgr.GetCalls()
+ for obj_path, props in ret:
+ if props['LineIdentification'] == msisdn and props['State'] == state:
+ return obj_path
+ return None
+
+ def call_wait_incoming(self, caller_msisdn_or_modem, timeout=60):
+ if isinstance(caller_msisdn_or_modem, Modem):
+ caller_msisdn = caller_msisdn_or_modem.msisdn
+ else:
+ caller_msisdn = str(caller_msisdn_or_modem)
+ self.dbg('Waiting for incoming call from:', caller_msisdn)
+ MainLoop.wait(self, lambda: self._find_call_msisdn_state(caller_msisdn, 'incoming') is not None, timeout=timeout)
+ return self._find_call_msisdn_state(caller_msisdn, 'incoming')
+
+ def call_answer(self, call_id):
+ self.dbg('Answer call %s' % call_id)
+ assert self.call_state(call_id) == 'incoming'
+ call_dbus_obj = systembus_get(call_id)
+ call_dbus_obj.Answer()
+
+ def call_hangup(self, call_id):
+ self.dbg('Hang up call %s' % call_id)
+ call_dbus_obj = systembus_get(call_id)
+ call_dbus_obj.Hangup()
+
+ def call_is_active(self, call_id):
+ return self.call_state(call_id) == 'active'
+
+ def call_state(self, call_id):
+ try:
+ call_dbus_obj = systembus_get(call_id)
+ props = call_dbus_obj.GetProperties()
+ state = props.get('State')
+ except Exception as e:
+ self.log('asking call state for non existent call')
+ log.log_exn()
+ state = 'disconnected'
+ self.dbg('call state: %s' % state)
+ return state
+
+ def _on_callmgr_call_added(self, obj_path, properties):
+ self.dbg('%r.CallAdded() -> %s=%r' % (I_CALLMGR, obj_path, repr(properties)))
+ if obj_path not in self.call_list:
+ self.call_list.append(obj_path)
+ else:
+ self.dbg('Call already exists %r' % obj_path)
+
+ def _on_callmgr_call_removed(self, obj_path):
+ self.dbg('%r.CallRemoved() -> %s' % (I_CALLMGR, obj_path))
+ if obj_path in self.call_list:
+ self.call_list.remove(obj_path)
+ else:
+ self.dbg('Trying to remove non-existing call %r' % obj_path)
+
+ def _on_callmgr_property_changed(self, name, value):
+ self.dbg('%r.PropertyChanged() -> %s=%s' % (I_CALLMGR, name, value))
+
+ def _on_connmgr_property_changed(self, name, value):
+ self.dbg('%r.PropertyChanged() -> %s=%s' % (I_CONNMGR, name, value))
+
+ def info(self, keys=('Manufacturer', 'Model', 'Revision', 'Serial')):
+ props = self.properties()
+ return ', '.join(['%s: %r'%(k,props.get(k)) for k in keys])
+
+ def log_info(self, *args, **kwargs):
+ self.log(self.info(*args, **kwargs))
+
+ def ussd_send(self, command):
+ ss = self.dbus.interface(I_SS)
+ service_type, response = ss.Initiate(command)
+ return response
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/ms_driver.py b/src/osmo_gsm_tester/ms_driver.py
new file mode 100644
index 0000000..634370f
--- /dev/null
+++ b/src/osmo_gsm_tester/ms_driver.py
@@ -0,0 +1,158 @@
+# ms_driver: Launch OsmocomBB mobile's virtually connected to a BTS
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from datetime import timedelta
+from . import log, util
+from osmo_ms_driver.cdf import cdfs
+from osmo_ms_driver.event_server import EventServer
+from osmo_ms_driver.simple_loop import SimpleLoop
+from osmo_ms_driver.location_update_test import MassUpdateLocationTest
+from osmo_ms_driver.starter import BinaryOptions
+
+import os.path
+import shutil
+import tempfile
+
+class Subscriber(log.Origin):
+ def __init__(self, imsi, ki):
+ super().__init__(log.C_RUN, 'subscriber')
+ self._imsi = imsi
+ self._ki = ki
+ self._auth_algo = "comp128v1"
+ self._msisdn = None
+
+ def msisdn(self):
+ return self._msisdn
+
+ def set_msisdn(self, msisdn):
+ self._msisdn = msisdn
+
+ def imsi(self):
+ return self._imsi
+
+ def ki(self):
+ return self._ki
+
+ def auth_algo(self):
+ return self._auth_algo
+
+class MsDriver(log.Origin):
+
+ def __init__(self, suite_run):
+ super().__init__(log.C_RUN, 'ms-driver')
+ self._suite_run = suite_run
+
+ # TODO: take config out of the test scenario
+ self._num_ms = 10
+ self._time_start = timedelta(seconds=60)
+ self._time_step = timedelta(milliseconds=100)
+ self._test_duration = timedelta(seconds=120)
+ self._cdf = cdfs["ease_in_out"](self._time_start, self._time_step)
+ self._loop = SimpleLoop()
+ self._test_case = None
+ self.event_server_sk_tmp_dir = None
+
+ if len(self.event_server_path().encode()) > 107:
+ raise log.Error('Path for event_server socket is longer than max allowed len for unix socket path (107):', self.event_server_path())
+
+ def event_server_path(self):
+ if self.event_server_sk_tmp_dir is None:
+ self.event_server_sk_tmp_dir = tempfile.mkdtemp('', 'ogteventserversk')
+ return os.path.join(self.event_server_sk_tmp_dir, 'osmo_ms_driver.unix')
+
+ def build_binary_options(self):
+ """Builds an instance of BinaryOptions.
+
+ Populates the BinaryOptions by searching the virtphy and mobile
+ application within the trial directory.
+ """
+
+ # Get the base directory for the virtphy/mobile application
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmocom-bb')))
+
+ # Assume these are dynamically linked and verify there is a lib dir.
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ def check_and_return_binary(name):
+ """Checks the binary exists and returns the path."""
+ binary = inst.child('bin', name)
+ if not os.path.isfile(name):
+ raise RuntimeError('Binary missing: %r' % binary)
+ return binary
+
+ virtphy = check_and_return_binary('virtphy')
+ mobile = check_and_return_binary('mobile')
+ return BinaryOptions(virtphy, mobile, env)
+
+ def configure(self):
+ """
+ Configures the subscribers, tests and registration server. Needs to be
+ called after the complete configuration of this driver.
+ """
+ event_server_path = self.event_server_path()
+
+ self._ev_server = EventServer("ev_server", event_server_path)
+ self._ev_server.listen(self._loop)
+ options = self.build_binary_options()
+ self._test_case = MassUpdateLocationTest("mass", options, self._num_ms, self._cdf,
+ self._ev_server,
+ util.Dir(self.suite_run.get_test_run_dir()),
+ suite_run=self._suite_run)
+
+ # TODO: We should pass subscribers down to the test and not get it from
+ # there.
+ self._subs = [Subscriber(imsi=mob.imsi(), ki=mob.ki()) for mob in self._test_case.mobiles()]
+
+
+ def ms_subscribers(self):
+ """
+ Returns a list of 'subscribers' that were configured in the
+ current scenario.
+ """
+ if not hasattr(self, '_subs'):
+ self.configure()
+ return self._subs
+
+ def run_test(self):
+ """
+ Runs the configured tests by starting the configured amount of mobile
+ devices according to their schedule. Returns once all tests succeeded
+ or the configured timeout has passed.
+ """
+ if not hasattr(self, '_subs'):
+ self.configure()
+ self._test_case.run_test(self._loop, self._test_duration)
+
+ def print_stats(self):
+ """
+ Prints statistics about the test run.
+ """
+ self._test_case.print_stats()
+
+ def cleanup(self):
+ """
+ Cleans up the driver (e.g. AF_UNIX files).
+ """
+
+ # Clean-up the temporary directory.
+ if self.event_server_sk_tmp_dir:
+ shutil.rmtree(path=self.event_server_sk_tmp_dir)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_bsc.py b/src/osmo_gsm_tester/osmo_bsc.py
new file mode 100644
index 0000000..ec9c3b8
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_bsc.py
@@ -0,0 +1,161 @@
+# osmo_gsm_tester: specifics for running an osmo-bsc
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder
+
+class OsmoBsc(log.Origin):
+
+ def __init__(self, suite_run, msc, mgw, stp, ip_address):
+ super().__init__(log.C_RUN, 'osmo-bsc_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.encryption = None
+ self.rsl_ip = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+ self.bts = []
+ self.msc = msc
+ self.mgw = mgw
+ self.stp = stp
+
+ def start(self):
+ self.log('Starting osmo-bsc')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-bsc')))
+
+ binary = inst.child('bin', 'osmo-bsc')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ if self.rsl_ip and self.addr() != self.rsl_ip:
+ filter = 'host %s or host %s and port not 22' % (self.addr(), self.rsl_ip)
+ else:
+ filter = 'host %s and port not 22' % self.addr()
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None, filter)
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-c',
+ os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-bsc.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(bsc=config.get_defaults('bsc'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(bsc=dict(ip_address=self.ip_address)))
+ config.overlay(values, self.mgw.conf_for_client())
+ config.overlay(values, self.stp.conf_for_client())
+
+ bts_list = []
+ for bts in self.bts:
+ bts_list.append(bts.conf_for_bsc())
+ config.overlay(values, dict(bsc=dict(net=dict(bts_list=bts_list))))
+
+ # runtime parameters:
+ if self.encryption is not None:
+ encryption_vty = util.encryption2osmovty(self.encryption)
+ else:
+ encryption_vty = util.encryption2osmovty(values['bsc']['net']['encryption'])
+ config.overlay(values, dict(bsc=dict(net=dict(encryption=encryption_vty))))
+
+ if self.rsl_ip is not None:
+ config.overlay(values, dict(bsc=dict(net=dict(rsl_ip=self.rsl_ip))))
+
+ self.dbg('BSC CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-bsc.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def set_encryption(self, val):
+ self.encryption = val
+
+ def set_rsl_ip(self, ip_addr):
+ '''Overwrite RSL IPaddr option sent to all BTS during OML config. Useful
+ for tests only willing to use osmo-bsc to do the OML setup but using
+ other external entities to test the RSL path, such as TTCN3 tests.'''
+ self.rsl_ip = ip_addr
+
+ def bts_add(self, bts):
+ self.bts.append(bts)
+ bts.set_bsc(self)
+
+ def bts_num(self, bts):
+ 'Provide number id used by OsmoNITB to identify configured BTS'
+ # We take advantage from the fact that VTY code assigns VTY in ascending
+ # order through the bts nodes found. As we populate the config iterating
+ # over this list, we have a 1:1 match in indexes.
+ return self.bts.index(bts)
+
+ def bts_is_connected(self, bts):
+ return OsmoBscCtrl(self).bts_is_connected(self.bts_num(bts))
+
+ def running(self):
+ return not self.process.terminated()
+
+
+class OsmoBscCtrl(log.Origin):
+ PORT = 4249
+ BTS_OML_STATE_VAR = "bts.%d.oml-connection-state"
+ BTS_OML_STATE_RE = re.compile("GET_REPLY (\d+) bts.\d+.oml-connection-state (?P<oml_state>\w+)")
+
+ def __init__(self, bsc):
+ self.bsc = bsc
+ super().__init__(log.C_BUS, 'CTRL(%s:%d)' % (self.bsc.addr(), OsmoBscCtrl.PORT))
+
+ def ctrl(self):
+ return osmo_ctrl.OsmoCtrl(self.bsc.addr(), OsmoBscCtrl.PORT)
+
+ def bts_is_connected(self, bts_num):
+ with self.ctrl() as ctrl:
+ ctrl.do_get(OsmoBscCtrl.BTS_OML_STATE_VAR % bts_num)
+ data = ctrl.receive()
+ while (len(data) > 0):
+ (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+ answer_str = answer.decode('utf-8')
+ answer_str = answer_str.replace('\n', ' ')
+ res = OsmoBscCtrl.BTS_OML_STATE_RE.match(answer_str)
+ if res:
+ oml_state = str(res.group('oml_state'))
+ if oml_state == 'connected':
+ return True
+ return False
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_ctrl.py b/src/osmo_gsm_tester/osmo_ctrl.py
new file mode 100644
index 0000000..be27b75
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_ctrl.py
@@ -0,0 +1,89 @@
+
+# osmo_gsm_tester: specifics for running a sysmoBTS
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import socket
+import struct
+
+from . import log
+
+class CtrlInterfaceExn(Exception):
+ pass
+
+class OsmoCtrl(log.Origin):
+
+ def __init__(self, host, port):
+ super().__init__(log.C_BUS, 'Ctrl', host=host, port=port)
+ self.host = host
+ self.port = port
+ self.sck = None
+
+ def prefix_ipa_ctrl_header(self, data):
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+ s = struct.pack(">HBB", len(data)+1, 0xee, 0)
+ return s + data
+
+ def remove_ipa_ctrl_header(self, data):
+ if (len(data) < 4):
+ raise CtrlInterfaceExn("Answer too short!")
+ (plen, ipa_proto, osmo_proto) = struct.unpack(">HBB", data[:4])
+ if (plen + 3 > len(data)):
+ self.err('Warning: Wrong payload length', expected=plen, got=len(data)-3)
+ if (ipa_proto != 0xee or osmo_proto != 0):
+ raise CtrlInterfaceExn("Wrong protocol in answer!")
+ return data[4:plen+3], data[plen+3:]
+
+ def connect(self):
+ self.dbg('Connecting')
+ self.sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sck.connect((self.host, self.port))
+ self.sck.setblocking(1)
+
+ def disconnect(self):
+ self.dbg('Disconnecting')
+ if self.sck is not None:
+ self.sck.close()
+
+ def _send(self, data):
+ self.dbg('Sending', data=data)
+ data = self.prefix_ipa_ctrl_header(data)
+ self.sck.send(data)
+
+ def receive(self, length = 1024):
+ data = self.sck.recv(length)
+ self.dbg('Receiving', data=data)
+ return data
+
+ def do_set(self, var, value, id=0):
+ setmsg = "SET %s %s %s" %(id, var, value)
+ self._send(setmsg)
+
+ def do_get(self, var, id=0):
+ getmsg = "GET %s %s" %(id, var)
+ self._send(getmsg)
+
+ def __enter__(self):
+ self.connect()
+ return self
+
+ def __exit__(self, *exc_info):
+ self.disconnect()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_ggsn.py b/src/osmo_gsm_tester/osmo_ggsn.py
new file mode 100644
index 0000000..2fdc792
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_ggsn.py
@@ -0,0 +1,94 @@
+# osmo_gsm_tester: specifics for running an openggsn
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder
+
+class OsmoGgsn(log.Origin):
+
+ def __init__(self, suite_run, ip_address):
+ super().__init__(log.C_RUN, 'osmo-ggsn_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+
+ def start(self):
+ self.log('Starting osmo-ggsn')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-ggsn')))
+
+ binary = inst.child('bin', 'osmo-ggsn')
+ if not os.path.isfile(binary):
+ raise log.Error('Binary missing:', binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise log.Error('No lib/ in', inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s' % self.addr())
+
+ env = {}
+
+ # setting capabilities will later disable use of LD_LIBRARY_PATH from ELF loader -> modify RPATH instead.
+ self.log('Setting RPATH for osmo-ggsn')
+ util.change_elf_rpath(binary, util.prepend_library_path(lib), self.run_dir.new_dir('patchelf'))
+ # osmo-ggsn requires CAP_NET_ADMIN to create tunnel devices: ioctl(TUNSETIFF):
+ self.log('Applying CAP_NET_ADMIN capability to osmo-ggsn')
+ util.setcap_net_admin(binary, self.run_dir.new_dir('setcap_net_admin'))
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary,
+ '-c', os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-ggsn.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(ggsn=config.get_defaults('ggsn'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(ggsn=dict(ip_address=self.ip_address)))
+ config.overlay(values, dict(ggsn=dict(statedir=self.run_dir.new_dir('statedir'))))
+
+ self.dbg('GGSN CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-ggsn.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def conf_for_client(self):
+ return dict(ggsn=dict(ip_address=self.ip_address))
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def running(self):
+ return not self.process.terminated()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_hlr.py b/src/osmo_gsm_tester/osmo_hlr.py
new file mode 100644
index 0000000..a4633e3
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_hlr.py
@@ -0,0 +1,152 @@
+# osmo_gsm_tester: specifics for running an osmo-hlr
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+import pprint
+import sqlite3
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder
+
+class OsmoHlr(log.Origin):
+ run_dir = None
+ config_file = None
+ process = None
+ next_subscriber_id = 1
+
+ def __init__(self, suite_run, ip_address):
+ super().__init__(log.C_RUN, 'osmo-hlr_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.next_subscriber_id = 1
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+
+ def start(self):
+ self.log('Starting osmo-hlr')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-hlr')))
+
+ binary = inst.child('bin', 'osmo-hlr')
+ if not os.path.isfile(binary):
+ raise log.Error('Binary missing:', binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise log.Error('No lib/ in', inst)
+
+ # bootstrap an empty hlr.db
+ self.db_file = self.run_dir.new_file('hlr.db')
+ sql_input = inst.child('share/doc/osmo-hlr/sql/hlr.sql')
+ if not os.path.isfile(sql_input):
+ raise log.Error('hlr.sql missing:', sql_input)
+ self.run_local('create_hlr_db', ('/bin/sh', '-c', 'sqlite3 %r < %r' % (self.db_file, sql_input)))
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary,
+ '-c', os.path.abspath(self.config_file),
+ '--database', self.db_file),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-hlr.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(hlr=config.get_defaults('hlr'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(hlr=dict(ip_address=self.ip_address)))
+
+ self.dbg('HLR CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-hlr.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def running(self):
+ return not self.process.terminated()
+
+ def run_local(self, name, popen_args):
+ run_dir = self.run_dir.new_dir(name)
+ proc = process.Process(name, run_dir, popen_args)
+ proc.launch()
+ proc.wait()
+ if proc.result != 0:
+ log.ctx(proc)
+ raise log.Error('Exited in error')
+
+ def subscriber_add(self, modem, msisdn=None, algo_str=None):
+ if msisdn is None:
+ msisdn = self.suite_run.resources_pool.next_msisdn(modem)
+ modem.set_msisdn(msisdn)
+ subscriber_id = self.next_subscriber_id
+ self.next_subscriber_id += 1
+
+ if algo_str is None:
+ algo_str = modem.auth_algo() or util.OSMO_AUTH_ALGO_NONE
+
+ if algo_str != util.OSMO_AUTH_ALGO_NONE and not modem.ki():
+ raise log.Error("Auth algo %r selected but no KI specified" % algo_str)
+
+ algo = util.osmo_auth_algo_by_name(algo_str)
+
+ self.log('Add subscriber', msisdn=msisdn, imsi=modem.imsi(), subscriber_id=subscriber_id,
+ algo_str=algo_str, algo=algo)
+ conn = sqlite3.connect(self.db_file)
+ try:
+ c = conn.cursor()
+ c.execute('insert into subscriber (id, imsi, msisdn) values (?, ?, ?)',
+ (subscriber_id, modem.imsi(), modem.msisdn,))
+ c.execute('insert into auc_2g (subscriber_id, algo_id_2g, ki) values (?, ?, ?)',
+ (subscriber_id, algo, modem.ki(),))
+ conn.commit()
+ finally:
+ conn.close()
+ return subscriber_id
+
+ def subscriber_delete(self, modem):
+ self.log('Add subscriber', imsi=modem.imsi())
+ conn = sqlite3.connect(self.db_file)
+ try:
+ c = conn.cursor()
+ c.execute('select id from subscriber where imsi = ?', (modem.imsi(),))
+ subscriber_id = c.fetchone()[0]
+ c.execute('delete from subscriber where id = ?', (subscriber_id,))
+ c.execute('delete from auc_2g where subscriber_id = ?', (subscriber_id,))
+ conn.commit()
+ finally:
+ conn.close()
+
+ def conf_for_client(self):
+ return dict(hlr=dict(ip_address=self.ip_address))
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_mgcpgw.py b/src/osmo_gsm_tester/osmo_mgcpgw.py
new file mode 100644
index 0000000..668e4ce
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_mgcpgw.py
@@ -0,0 +1,86 @@
+# osmo_gsm_tester: specifics for running an osmo-mgcp-gw (osmo-bsc_mgcp)
+#
+# Copyright (C) 2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder
+
+class OsmoMgcpgw(log.Origin):
+
+ def __init__(self, suite_run, ip_address, bts_ip):
+ super().__init__(log.C_RUN, 'osmo-mgcpgw_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+ # hack: so far mgcpgw needs one specific BTS IP.
+ self.bts_ip = bts_ip
+
+ def start(self):
+ self.log('Starting osmo-mgcpgw')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-nitb')))
+ binary = inst.child('bin', 'osmo-bsc_mgcp')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-c',
+ os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-mgcpgw.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(mgcpgw=config.get_defaults('mgcpgw'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(mgcpgw=dict(ip_address=self.ip_address, bts_ip=self.bts_ip)))
+
+ self.dbg('MGCPGW CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-mgcpgw.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def conf_for_msc(self):
+ return dict(mgw=dict(ip_address=self.ip_address))
+
+ def running(self):
+ return not self.process.terminated()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_mgw.py b/src/osmo_gsm_tester/osmo_mgw.py
new file mode 100644
index 0000000..1d5596e
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_mgw.py
@@ -0,0 +1,84 @@
+# osmo_gsm_tester: specifics for running an osmo-mgw
+#
+# Copyright (C) 2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder
+
+class OsmoMgw(log.Origin):
+
+ def __init__(self, suite_run, ip_address):
+ super().__init__(log.C_RUN, 'osmo-mgw_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+
+ def start(self):
+ self.log('Starting osmo-mgw')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-mgw')))
+ binary = inst.child('bin', 'osmo-mgw')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-c',
+ os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-mgw.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(mgw=config.get_defaults('mgw'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(mgw=dict(ip_address=self.ip_address)))
+
+ self.dbg('MGW CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-mgw.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def conf_for_client(self):
+ return dict(mgw=dict(ip_address=self.ip_address))
+
+ def running(self):
+ return not self.process.terminated()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_msc.py b/src/osmo_gsm_tester/osmo_msc.py
new file mode 100644
index 0000000..192926c
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_msc.py
@@ -0,0 +1,159 @@
+# osmo_gsm_tester: specifics for running an osmo-msc
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder, smsc
+
+class OsmoMsc(log.Origin):
+
+ def __init__(self, suite_run, hlr, mgw, stp, ip_address):
+ super().__init__(log.C_RUN, 'osmo-msc_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.config = None
+ self.encryption = None
+ self.authentication = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+ self.hlr = hlr
+ self.mgw = mgw
+ self.stp = stp
+ self.smsc = smsc.Smsc((ip_address.get('addr'), 2775))
+
+ def start(self):
+ self.log('Starting osmo-msc')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-msc')))
+ binary = inst.child('bin', 'osmo-msc')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-c',
+ os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-msc.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(msc=config.get_defaults('msc'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(msc=dict(ip_address=self.ip_address)))
+ config.overlay(values, self.mgw.conf_for_client())
+ config.overlay(values, self.hlr.conf_for_client())
+ config.overlay(values, self.stp.conf_for_client())
+ config.overlay(values, self.smsc.get_config())
+
+ # runtime parameters:
+ if self.encryption is not None:
+ encryption_vty = util.encryption2osmovty(self.encryption)
+ else:
+ encryption_vty = util.encryption2osmovty(values['msc']['net']['encryption'])
+ config.overlay(values, dict(msc=dict(net=dict(encryption=encryption_vty))))
+ if self.authentication is not None:
+ config.overlay(values, dict(msc=dict(net=dict(authentication=self.authentication))))
+
+
+ self.config = values
+
+ self.dbg('MSC CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-msc.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def set_encryption(self, val):
+ self.encryption = val
+
+ def set_authentication(self, val):
+ if val is None:
+ self.authroziation = None
+ return
+ self.authentication = "required" if val else "optional"
+
+ def mcc(self):
+ return self.config['msc']['net']['mcc']
+
+ def mnc(self):
+ return self.config['msc']['net']['mnc']
+
+ def mcc_mnc(self):
+ return (self.mcc(), self.mnc())
+
+ def subscriber_attached(self, *modems):
+ return self.imsi_attached(*[m.imsi() for m in modems])
+
+ def imsi_attached(self, *imsis):
+ attached = self.imsi_list_attached()
+ log.dbg('attached:', attached)
+ return all([(imsi in attached) for imsi in imsis])
+
+ def imsi_list_attached(self):
+ return OsmoMscCtrl(self).subscriber_list_active()
+
+ def running(self):
+ return not self.process.terminated()
+
+
+class OsmoMscCtrl(log.Origin):
+ PORT = 4255
+ SUBSCR_LIST_ACTIVE_VAR = 'subscriber-list-active-v1'
+
+ def __init__(self, msc):
+ self.msc = msc
+ super().__init__(log.C_BUS, 'CTRL(%s:%d)' % (self.msc.addr(), self.PORT))
+
+ def ctrl(self):
+ return osmo_ctrl.OsmoCtrl(self.msc.addr(), self.PORT)
+
+ def subscriber_list_active(self):
+ aslist_str = ""
+ with self.ctrl() as ctrl:
+ ctrl.do_get(self.SUBSCR_LIST_ACTIVE_VAR)
+ # This is legacy code from the old osmo-gsm-tester.
+ # looks like this doesn't work for long data.
+ data = ctrl.receive()
+ while (len(data) > 0):
+ (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+ answer_str = answer.decode('utf-8')
+ answer_str = answer_str.replace('\n', ' ')
+ aslist_str = answer_str
+ return aslist_str
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_nitb.py b/src/osmo_gsm_tester/osmo_nitb.py
new file mode 100644
index 0000000..66ab2a6
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_nitb.py
@@ -0,0 +1,236 @@
+# osmo_gsm_tester: specifics for running an osmo-nitb
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder, smsc
+
+class OsmoNitb(log.Origin):
+
+ def __init__(self, suite_run, ip_address):
+ super().__init__(log.C_RUN, 'osmo-nitb_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.encryption = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+ self.bts = []
+ self.smsc = smsc.Smsc((ip_address.get('addr'), 2775))
+
+ def start(self):
+ self.log('Starting osmo-nitb')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-nitb')))
+ binary = inst.child('bin', 'osmo-nitb')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-c',
+ os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-nitb.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(nitb=config.get_defaults('nitb'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(nitb=dict(ip_address=self.ip_address)))
+
+ bts_list = []
+ for bts in self.bts:
+ bts_list.append(bts.conf_for_bsc())
+ config.overlay(values, dict(nitb=dict(net=dict(bts_list=bts_list))))
+ config.overlay(values, self.smsc.get_config())
+
+ # runtime parameters:
+ if self.encryption is not None:
+ encryption_vty = util.encryption2osmovty(self.encryption)
+ else:
+ encryption_vty = util.encryption2osmovty(values['nitb']['net']['encryption'])
+ config.overlay(values, dict(nitb=dict(net=dict(encryption=encryption_vty))))
+
+ self.config = values
+
+ self.dbg('NITB CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-nitb.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def bts_add(self, bts):
+ self.bts.append(bts)
+ bts.set_bsc(self)
+
+ def set_encryption(self, val):
+ self.encryption = val
+
+ def mcc(self):
+ return self.config['nitb']['net']['mcc']
+
+ def mnc(self):
+ return self.config['nitb']['net']['mnc']
+
+ def mcc_mnc(self):
+ return (self.mcc(), self.mnc())
+
+ def bts_num(self, bts):
+ 'Provide number id used by OsmoNITB to identify configured BTS'
+ # We take advantage from the fact that VTY code assigns VTY in ascending
+ # order through the bts nodes found. As we populate the config iterating
+ # over this list, we have a 1:1 match in indexes.
+ return self.bts.index(bts)
+
+ def subscriber_add(self, modem, msisdn=None, algo=None):
+ if msisdn is None:
+ msisdn = self.suite_run.resources_pool.next_msisdn(modem)
+ modem.set_msisdn(msisdn)
+
+ if not algo:
+ alg_str = modem.auth_algo()
+ if not alg_str or alg_str == 'none':
+ algo = None
+ elif alg_str == 'comp128v1':
+ algo = 'comp128v1'
+ elif alg_str == 'xor':
+ algo = 'xor'
+ if algo is not None and not modem.ki():
+ raise log.Error("Auth algo %r selected and no KI specified" % algo)
+
+ self.log('Add subscriber', msisdn=msisdn, imsi=modem.imsi())
+ OsmoNitbCtrl(self).subscriber_add(modem.imsi(), msisdn, modem.ki(), algo)
+
+ def subscriber_delete(self, modem):
+ self.log('Delete subscriber', imsi=modem.imsi())
+ OsmoNitbCtrl(self).subscriber_delete(modem.imsi())
+
+ def subscriber_attached(self, *modems):
+ return self.imsi_attached(*[m.imsi() for m in modems])
+
+ def imsi_attached(self, *imsis):
+ attached = self.imsi_list_attached()
+ self.dbg('attached:', attached)
+ return all([(imsi in attached) for imsi in imsis])
+
+ def imsi_list_attached(self):
+ return OsmoNitbCtrl(self).subscriber_list_active()
+
+ def bts_is_connected(self, bts):
+ return OsmoNitbCtrl(self).bts_is_connected(self.bts_num(bts))
+
+ def running(self):
+ return not self.process.terminated()
+
+
+class OsmoNitbCtrl(log.Origin):
+ PORT = 4249
+ SUBSCR_MODIFY_VAR = 'subscriber-modify-v1'
+ SUBSCR_MODIFY_REPLY_RE = re.compile("SET_REPLY (\d+) %s OK" % SUBSCR_MODIFY_VAR)
+ SUBSCR_DELETE_VAR = 'subscriber-delete-v1'
+ SUBSCR_DELETE_REPLY_RE = re.compile("SET_REPLY (\d+) %s Removed" % SUBSCR_DELETE_VAR)
+ SUBSCR_LIST_ACTIVE_VAR = 'subscriber-list-active-v1'
+ BTS_OML_STATE_VAR = "bts.%d.oml-connection-state"
+ BTS_OML_STATE_RE = re.compile("GET_REPLY (\d+) bts.\d+.oml-connection-state (?P<oml_state>\w+)")
+
+ def __init__(self, nitb):
+ self.nitb = nitb
+ super().__init__(log.C_BUS, 'CTRL(%s:%d)' % (self.nitb.addr(), OsmoNitbCtrl.PORT))
+
+ def ctrl(self):
+ return osmo_ctrl.OsmoCtrl(self.nitb.addr(), OsmoNitbCtrl.PORT)
+
+ def subscriber_add(self, imsi, msisdn, ki=None, algo=None):
+ created = False
+
+ if algo:
+ value = '%s,%s,%s,%s' % (imsi,msisdn,algo,ki)
+ else:
+ value = '%s,%s' % (imsi, msisdn)
+
+ with self.ctrl() as ctrl:
+ ctrl.do_set(OsmoNitbCtrl.SUBSCR_MODIFY_VAR, value)
+ data = ctrl.receive()
+ (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+ answer_str = answer.decode('utf-8')
+ res = OsmoNitbCtrl.SUBSCR_MODIFY_REPLY_RE.match(answer_str)
+ if not res:
+ raise RuntimeError('Cannot create subscriber %r (answer=%r)' % (imsi, answer_str))
+ self.dbg('Created subscriber', imsi=imsi, msisdn=msisdn)
+
+ def subscriber_delete(self, imsi):
+ with self.ctrl() as ctrl:
+ ctrl.do_set(OsmoNitbCtrl.SUBSCR_DELETE_VAR, imsi)
+ data = ctrl.receive()
+ (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+ answer_str = answer.decode('utf-8')
+ res = OsmoNitbCtrl.SUBSCR_DELETE_REPLY_RE.match(answer_str)
+ if not res:
+ raise RuntimeError('Cannot delete subscriber %r (answer=%r)' % (imsi, answer_str))
+ self.dbg('Deleted subscriber', imsi=imsi)
+
+ def subscriber_list_active(self):
+ aslist_str = ""
+ with self.ctrl() as ctrl:
+ ctrl.do_get(OsmoNitbCtrl.SUBSCR_LIST_ACTIVE_VAR)
+ # This is legacy code from the old osmo-gsm-tester.
+ # looks like this doesn't work for long data.
+ data = ctrl.receive()
+ while (len(data) > 0):
+ (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+ answer_str = answer.decode('utf-8')
+ answer_str = answer_str.replace('\n', ' ')
+ aslist_str = answer_str
+ return aslist_str
+
+ def bts_is_connected(self, bts_num):
+ with self.ctrl() as ctrl:
+ ctrl.do_get(OsmoNitbCtrl.BTS_OML_STATE_VAR % bts_num)
+ data = ctrl.receive()
+ while (len(data) > 0):
+ (answer, data) = ctrl.remove_ipa_ctrl_header(data)
+ answer_str = answer.decode('utf-8')
+ answer_str = answer_str.replace('\n', ' ')
+ res = OsmoNitbCtrl.BTS_OML_STATE_RE.match(answer_str)
+ if res:
+ oml_state = str(res.group('oml_state'))
+ if oml_state == 'connected':
+ return True
+ return False
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_sgsn.py b/src/osmo_gsm_tester/osmo_sgsn.py
new file mode 100644
index 0000000..07cf059
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_sgsn.py
@@ -0,0 +1,93 @@
+# osmo_gsm_tester: specifics for running an osmo-sgsn
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from . import log, util, config, template, process, osmo_ctrl, pcap_recorder
+
+class OsmoSgsn(log.Origin):
+
+ def __init__(self, suite_run, hlr, ggsn, ip_address):
+ super().__init__(log.C_RUN, 'osmo-sgsn_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.suite_run = suite_run
+ self.hlr = hlr
+ self.ggsn = ggsn
+ self.ip_address = ip_address
+
+ def start(self):
+ self.log('Starting osmo-sgsn')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-sgsn')))
+
+ binary = inst.child('bin', 'osmo-sgsn')
+ if not os.path.isfile(binary):
+ raise log.Error('Binary missing:', binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise log.Error('No lib/ in', inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary,
+ '-c', os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-sgsn.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(sgsn=config.get_defaults('sgsn'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(sgsn=dict(ip_address=self.ip_address)))
+ config.overlay(values, self.hlr.conf_for_client())
+ config.overlay(values, self.ggsn.conf_for_client())
+
+ self.dbg('SGSN CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-sgsn.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def conf_for_client(self):
+ return dict(sgsn=dict(ip_address=self.ip_address))
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def running(self):
+ return not self.process.terminated()
+
+ def bts_add(self, bts):
+ bts.set_sgsn(self)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmo_stp.py b/src/osmo_gsm_tester/osmo_stp.py
new file mode 100644
index 0000000..5a7f0d3
--- /dev/null
+++ b/src/osmo_gsm_tester/osmo_stp.py
@@ -0,0 +1,86 @@
+# osmo_gsm_tester: specifics for running an osmo-stp
+#
+# Copyright (C) 2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+
+from . import log, util, config, template, process, pcap_recorder
+
+class OsmoStp(log.Origin):
+
+ def __init__(self, suite_run, ip_address):
+ super().__init__(log.C_RUN, 'osmo-stp_%s' % ip_address.get('addr'))
+ self.run_dir = None
+ self.config_file = None
+ self.process = None
+ self.suite_run = suite_run
+ self.ip_address = ip_address
+
+ def start(self):
+ self.log('Starting osmo-stp')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-stp')))
+
+ binary = inst.child('bin', 'osmo-stp')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ pcap_recorder.PcapRecorder(self.suite_run, self.run_dir.new_dir('pcap'), None,
+ 'host %s and port not 22' % self.addr())
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-c',
+ os.path.abspath(self.config_file)),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file('osmo-stp.cfg')
+ self.dbg(config_file=self.config_file)
+
+ values = dict(stp=config.get_defaults('stp'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, dict(stp=dict(ip_address=self.ip_address)))
+
+ self.dbg('STP CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render('osmo-stp.cfg', values)
+ self.dbg(r)
+ f.write(r)
+
+ def conf_for_client(self):
+ return dict(stp=dict(ip_address=self.ip_address))
+
+ def addr(self):
+ return self.ip_address.get('addr')
+
+ def running(self):
+ return not self.process.terminated()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/osmocon.py b/src/osmo_gsm_tester/osmocon.py
new file mode 100644
index 0000000..8b6040f
--- /dev/null
+++ b/src/osmo_gsm_tester/osmocon.py
@@ -0,0 +1,101 @@
+# osmo_gsm_tester: specifics for running an osmocon
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import tempfile
+
+from . import log, util, process
+from .event_loop import MainLoop
+
+class Osmocon(log.Origin):
+
+ FIRMWARE_FILE="opt/osmocom-bb/target/firmware/board/compal_e88/layer1.compalram.bin"
+
+ def __init__(self, suite_run, conf):
+ serial_device = conf.get('serial_device')
+ if serial_device is None:
+ raise log.Error('osmocon_phone contains no attr "serial_device"')
+ self.serial_device = os.path.realpath(serial_device)
+ super().__init__(log.C_RUN, 'osmocon_%s' % os.path.basename(self.serial_device))
+ self.run_dir = None
+ self.process = None
+ self.suite_run = suite_run
+ self.conf = conf
+ self.sk_tmp_dir = tempfile.mkdtemp('', 'ogtosmoconsk')
+ if len(self.l2_socket_path().encode()) > 107:
+ raise log.Error('Path for l2 socket is longer than max allowed len for unix socket path (107):', self.l2_socket_path())
+ if len(self.loader_socket_path().encode()) > 107:
+ raise log.Error('Path for loader socket is longer than max allowed len for unix socket path (107):', self.loader_socket_path())
+
+ def l2_socket_path(self):
+ return os.path.join(self.sk_tmp_dir, 'osmocom_l2')
+
+ def loader_socket_path(self):
+ return os.path.join(self.sk_tmp_dir, 'osmocom_loader')
+
+ def start(self):
+ self.log('Resetting the phone')
+ # TODO: make sure the pone is powered off before starting osmocon
+
+ self.log('Starting osmocon')
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+
+ inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmocom-bb')))
+
+ binary = inst.child('sbin', 'osmocon')
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ lib = inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % inst)
+
+ env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ firmware_path = os.path.join(str(inst), Osmocon.FIRMWARE_FILE)
+ if not os.path.isfile(firmware_path):
+ raise RuntimeError('Binary missing: %r' % firmware_path)
+ self.dbg(run_dir=self.run_dir, binary=binary, env=env)
+ self.process = process.Process(self.name(), self.run_dir,
+ (binary, '-p', self.serial_device,
+ '-m', 'c123xor',
+ '-s', self.l2_socket_path(),
+ '-l', self.loader_socket_path(),
+ firmware_path),
+ env=env)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+ self.log('Waiting for osmocon to be up and running')
+ MainLoop.wait(self, os.path.exists, self.l2_socket_path())
+
+ def running(self):
+ return not self.process.terminated()
+
+ def cleanup(self):
+ if self.sk_tmp_dir:
+ try:
+ os.remove(self.l2_socket_path())
+ except OSError:
+ pass
+ try:
+ os.remove(self.loader_socket_path())
+ except OSError:
+ pass
+ os.rmdir(self.sk_tmp_dir)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/pcap_recorder.py b/src/osmo_gsm_tester/pcap_recorder.py
new file mode 100644
index 0000000..70833d0
--- /dev/null
+++ b/src/osmo_gsm_tester/pcap_recorder.py
@@ -0,0 +1,57 @@
+# osmo_gsm_tester: specifics for running an osmo-nitb
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import random
+import re
+import socket
+
+from . import log, util, config, template, process, osmo_ctrl
+
+class PcapRecorder(log.Origin):
+
+ def __init__(self, suite_run, run_dir, iface=None, filters='', netns=None):
+ self.iface = iface
+ if not self.iface:
+ self.iface = "any"
+ self.filters = filters
+ super().__init__(log.C_RUN, 'pcap-recorder_%s' % self.iface, filters=self.filters)
+ self.suite_run = suite_run
+ self.run_dir = run_dir
+ self.netns = netns
+ self.start()
+
+ def start(self):
+ self.dbg('Recording pcap', self.run_dir, self.filters)
+ dumpfile = os.path.join(os.path.abspath(self.run_dir), self.name() + ".pcap")
+ popen_args = ('tcpdump', '-n',
+ '-i', self.iface,
+ '-w', dumpfile,
+ self.filters)
+ if self.netns:
+ self.process = process.NetNSProcess(self.name(), self.run_dir, self.netns, popen_args)
+ else:
+ self.process = process.Process(self.name(), self.run_dir, popen_args)
+ self.suite_run.remember_to_stop(self.process)
+ self.process.launch()
+
+ def running(self):
+ return not self.process.terminated()
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/pcu.py b/src/osmo_gsm_tester/pcu.py
new file mode 100644
index 0000000..9ec8f35
--- /dev/null
+++ b/src/osmo_gsm_tester/pcu.py
@@ -0,0 +1,57 @@
+# osmo_gsm_tester: specifics pcu base abstract class
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from abc import ABCMeta, abstractmethod
+from . import log
+
+class Pcu(log.Origin, metaclass=ABCMeta):
+ """PCU Abstract Base Class."""
+
+##############
+# PROTECTED
+##############
+
+ def __init__(self, suite_run, bts, conf, name):
+ """Base constructor. Must be called by subclass."""
+ super().__init__(log.C_RUN, name)
+ self.suite_run = suite_run
+ self.bts = bts
+ self.conf = conf
+
+###################
+# PUBLIC (test API included)
+###################
+
+ @abstractmethod
+ def start(self, keepalive=False):
+ """Start the PCU. Must be implemented by subclass."""
+ pass
+
+#------------------------------------------------------------------------------
+
+class PcuDummy(Pcu):
+ """PCU for BTS without proper PCU control"""
+
+ def __init__(self, suite_run, bts, conf):
+ super().__init__(suite_run, bts, conf, 'PcuDummy')
+
+ def start(self, keepalive=False):
+ pass
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/pcu_osmo.py b/src/osmo_gsm_tester/pcu_osmo.py
new file mode 100644
index 0000000..767264c
--- /dev/null
+++ b/src/osmo_gsm_tester/pcu_osmo.py
@@ -0,0 +1,83 @@
+# osmo_gsm_tester: specifics for running an osmo-pcu
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+from . import config, util, template, process, pcu
+
+class OsmoPcu(pcu.Pcu):
+
+ BIN_PCU = 'osmo-pcu'
+ PCU_OSMO_CFG = 'osmo-pcu.cfg'
+
+ def __init__(self, suite_run, bts, conf):
+ super().__init__(suite_run, bts, conf, OsmoPcu.BIN_PCU)
+ self.run_dir = None
+ self.inst = None
+ self.conf = conf
+ self.env = {}
+
+ def start(self, keepalive=False):
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-pcu')))
+ lib = self.inst.child('lib')
+ if not os.path.isdir(lib):
+ raise RuntimeError('No lib/ in %r' % self.inst)
+ self.env = { 'LD_LIBRARY_PATH': util.prepend_library_path(lib) }
+
+ self.launch_process(keepalive, OsmoPcu.BIN_PCU, '-r', '1',
+ '-c', os.path.abspath(self.config_file),
+ '-i', self.bts.bsc.addr())
+ self.suite_run.poll()
+
+ def launch_process(self, keepalive, binary_name, *args):
+ binary = os.path.abspath(self.inst.child('bin', binary_name))
+ run_dir = self.run_dir.new_dir(binary_name)
+ if not os.path.isfile(binary):
+ raise RuntimeError('Binary missing: %r' % binary)
+ proc = process.Process(binary_name, run_dir,
+ (binary,) + args,
+ env=self.env)
+ self.suite_run.remember_to_stop(proc, keepalive)
+ proc.launch()
+ return proc
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file(OsmoPcu.PCU_OSMO_CFG)
+ self.dbg(config_file=self.config_file)
+
+ values = dict(osmo_pcu=config.get_defaults('osmo_pcu'))
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, {
+ 'osmo_pcu': {
+ 'pcu_socket_path': self.bts.pcu_socket_path(),
+ }
+ })
+ config.overlay(values, { 'osmo_pcu': self.conf })
+
+ self.dbg('OSMO-PCU CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render(OsmoPcu.PCU_OSMO_CFG, values)
+ self.dbg(r)
+ f.write(r)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/pcu_sysmo.py b/src/osmo_gsm_tester/pcu_sysmo.py
new file mode 100644
index 0000000..cd3b75e
--- /dev/null
+++ b/src/osmo_gsm_tester/pcu_sysmo.py
@@ -0,0 +1,125 @@
+# osmo_gsm_tester: specifics for running a osmo-pcu for sysmoBTS
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import pprint
+from . import log, config, util, template, process
+
+class OsmoPcuSysmo(log.Origin):
+
+ REMOTE_DIR = '/osmo-gsm-tester-pcu'
+ PCU_SYSMO_BIN = 'osmo-pcu'
+ PCU_SYSMO_CFG = 'osmo-pcu-sysmo.cfg'
+
+ def __init__(self, suite_run, sysmobts, conf):
+ super().__init__(log.C_RUN, self.PCU_SYSMO_BIN)
+ self.run_dir = None
+ self.bsc = None
+ self.inst = None
+ self.remote_inst = None
+ self.remote_dir = None
+ self.sysmobts = None
+ self.suite_run = suite_run
+ self.sysmobts = sysmobts
+ self.conf = conf
+ self.remote_env = {}
+ self.remote_user = 'root'
+
+ def start(self, keepalive=False):
+ self.run_dir = util.Dir(self.suite_run.get_test_run_dir().new_dir(self.name()))
+ self.configure()
+
+ self.inst = util.Dir(os.path.abspath(self.suite_run.trial.get_inst('osmo-pcu-sysmo')))
+ lib = self.inst.child('lib')
+ if not os.path.isdir(lib):
+ raise log.Error('No lib/ in', self.inst)
+ if not self.inst.isfile('bin', OsmoPcuSysmo.PCU_SYSMO_BIN):
+ raise log.Error('No osmo-pcu-sysmo binary in', self.inst)
+
+ self.remote_dir = util.Dir(OsmoPcuSysmo.REMOTE_DIR)
+ self.remote_inst = util.Dir(self.remote_dir.child(os.path.basename(str(self.inst))))
+
+ self.run_remote('rm-remote-dir', ('test', '!', '-d', OsmoPcuSysmo.REMOTE_DIR, '||', 'rm', '-rf', OsmoPcuSysmo.REMOTE_DIR))
+ self.run_remote('mk-remote-dir', ('mkdir', '-p', OsmoPcuSysmo.REMOTE_DIR))
+ self.run_local('scp-inst-to-sysmobts',
+ ('scp', '-r', str(self.inst), '%s@%s:%s' % (self.remote_user, self.sysmobts.remote_addr(), str(self.remote_inst))))
+
+ remote_run_dir = self.remote_dir.child(OsmoPcuSysmo.PCU_SYSMO_BIN)
+ self.run_remote('mk-remote-run-dir', ('mkdir', '-p', remote_run_dir))
+
+ remote_config_file = self.remote_dir.child(OsmoPcuSysmo.PCU_SYSMO_CFG)
+ self.run_local('scp-cfg-to-sysmobts',
+ ('scp', '-r', self.config_file, '%s@%s:%s' % (self.remote_user, self.sysmobts.remote_addr(), remote_config_file)))
+
+ remote_lib = self.remote_inst.child('lib')
+ remote_binary = self.remote_inst.child('bin', OsmoPcuSysmo.PCU_SYSMO_BIN)
+ self.launch_remote(OsmoPcuSysmo.PCU_SYSMO_BIN,
+ ('LD_LIBRARY_PATH=%s' % remote_lib,
+ remote_binary, '-c', remote_config_file, '-r', '1',
+ '-i', self.sysmobts.bsc.addr()),
+ remote_cwd=remote_run_dir, keepalive=keepalive)
+
+ def _process_remote(self, name, popen_args, remote_cwd=None):
+ run_dir = self.run_dir.new_dir(name)
+ return process.RemoteProcess(name, run_dir, self.remote_user, self.sysmobts.remote_addr(), remote_cwd,
+ popen_args)
+
+ def run_remote(self, name, popen_args, remote_cwd=None):
+ proc = self._process_remote(name, popen_args, remote_cwd)
+ proc.launch()
+ proc.wait()
+ if proc.result != 0:
+ log.ctx(proc)
+ raise log.Error('Exited in error')
+
+ def launch_remote(self, name, popen_args, remote_cwd=None, keepalive=False):
+ proc = self._process_remote(name, popen_args, remote_cwd)
+ self.suite_run.remember_to_stop(proc, keepalive)
+ proc.launch()
+
+ def run_local(self, name, popen_args):
+ run_dir = self.run_dir.new_dir(name)
+ proc = process.Process(name, run_dir, popen_args)
+ proc.launch()
+ proc.wait()
+ if proc.result != 0:
+ log.ctx(proc)
+ raise log.Error('Exited in error')
+
+ def configure(self):
+ self.config_file = self.run_dir.new_file(OsmoPcuSysmo.PCU_SYSMO_CFG)
+ self.dbg(config_file=self.config_file)
+
+ values = { 'osmo_pcu_sysmo': config.get_defaults('osmo_pcu_sysmo') }
+ config.overlay(values, self.suite_run.config())
+ config.overlay(values, {
+ 'osmo_pcu_sysmo': {
+ 'pcu_socket_path': self.sysmobts.pcu_socket_path()
+ }
+ })
+ config.overlay(values, { 'osmo_pcu_sysmo': self.conf })
+
+ self.dbg('OSMO-PCU-SYSMO CONFIG:\n' + pprint.pformat(values))
+
+ with open(self.config_file, 'w') as f:
+ r = template.render(OsmoPcuSysmo.PCU_SYSMO_CFG, values)
+ self.dbg(r)
+ f.write(r)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/powersupply.py b/src/osmo_gsm_tester/powersupply.py
new file mode 100644
index 0000000..86fc010
--- /dev/null
+++ b/src/osmo_gsm_tester/powersupply.py
@@ -0,0 +1,73 @@
+# osmo_gsm_tester: class defining a Power Supply object
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from abc import ABCMeta, abstractmethod
+from . import log
+from .event_loop import MainLoop
+
+class PowerSupply(log.Origin, metaclass=ABCMeta):
+
+##############
+# PROTECTED
+##############
+ def __init__(self, conf, name):
+ """Base constructor. Must be called by subclass."""
+ super().__init__(log.C_RUN, name)
+ self.conf = conf
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ @abstractmethod
+ def is_powered(self):
+ """Get whether the device is powered on or off. Must be implemented by subclass."""
+ pass
+
+ @abstractmethod
+ def power_set(self, onoff):
+ """Turn on (onoff=True) or off (onoff=False) the device. Must be implemented by subclass."""
+ pass
+
+ def power_cycle(self, sleep=0):
+ """Turns off the device, waits N.N seconds, then turn on the device."""
+ self.power_set(False)
+ MainLoop.sleep(self, sleep)
+ self.power_set(True)
+
+
+from . import powersupply_sispm
+
+KNOWN_PWSUPPLY_TYPES = {
+ 'sispm' : powersupply_sispm.PowerSupplySispm,
+}
+
+def register_type(name, clazz):
+ """Register a new PoerSupply child class at runtime."""
+ KNOWN_PWSUPPLY_TYPES[name] = clazz
+
+def get_instance_by_type(pwsupply_type, pwsupply_opt):
+ """Allocate a PowerSupply child class based on type. Opts are passed to the newly created object."""
+ obj = KNOWN_PWSUPPLY_TYPES.get(pwsupply_type, None)
+ if not obj:
+ raise log.Error('PowerSupply type not supported:', pwsupply_type)
+ return obj(pwsupply_opt)
+
+
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/powersupply_sispm.py b/src/osmo_gsm_tester/powersupply_sispm.py
new file mode 100644
index 0000000..4505b17
--- /dev/null
+++ b/src/osmo_gsm_tester/powersupply_sispm.py
@@ -0,0 +1,113 @@
+# osmo_gsm_tester: class defining a Power Supply object
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sispm
+from usb.core import USBError
+
+from . import log
+from .event_loop import MainLoop
+from .powersupply import PowerSupply
+
+class PowerSupplySispm(PowerSupply):
+ """PowerSupply implementation using pysispm.
+
+ The device object from sismpm is not cached into an attribute of the class
+ instance because it is actually a libusb object keeping the device assigned
+ to it until it destroyed, meaning it will block other users to use the whole
+ device until the object is released. Instead, we pick the object in the
+ smallest scope possible, and we re-try if we receive a "Resource Busy" error
+ because we know it will be available in short time.
+ """
+
+ def _retry_usberr(self, func, *args):
+ """Run function until it runs successfully, retry on spurious errors.
+
+ Sometimes when operating the usb device, libusb reports the following spurious exception:
+ [Errno 16] Resource busy -> This can appear if another instance is using the device.
+ [Errno 110] Operation timed out
+
+ Retrying after that it's usually enough.
+ """
+ while True:
+ try:
+ ret = func(*args)
+ return ret
+ except USBError as e:
+ if e.errno == 16 or e.errno==110:
+ self.log('skip usb error, retry', repr(e))
+ MainLoop.sleep(self, 0.1)
+ continue
+ raise e
+
+ def _get_device(self):
+ """Get the sispm device object.
+
+ It should be kept alive as short as possible as it blocks other users
+ from using the device until the object is released.
+ """
+ mydevid = self.conf.get('device')
+ devices = self._retry_usberr(sispm.connect)
+ for d in devices:
+ did = self._retry_usberr(sispm.getid, d)
+ self.dbg('detected device:', did)
+ if did == mydevid:
+ self.dbg('found matching device: %s' % did)
+ return d
+ return None
+
+
+########################
+# PUBLIC - INTERNAL API
+########################
+ def __init__(self, conf):
+ super().__init__(conf, 'sispm')
+ mydevid = conf.get('device')
+ if mydevid is None:
+ raise log.Error('No "device" attribute provided in supply conf!')
+ self.set_name('sispm-'+mydevid)
+ myport = conf.get('port')
+ if myport is None:
+ raise log.Error('No "port" attribute provided in power_supply conf!')
+ if not int(myport):
+ raise log.Error('Wrong non numeric "port" attribute provided in power_supply conf!')
+ self.set_name('sispm-'+mydevid+'-'+myport)
+ self.port = int(myport)
+ device = self._get_device()
+ if device is None:
+ raise log.Error('device with with id %s not found!' % mydevid)
+ dmin = self._retry_usberr(sispm.getminport, device)
+ dmax = self._retry_usberr(sispm.getmaxport, device)
+ if dmin > self.port or dmax < self.port:
+ raise log.Error('Out of range "port" attribute provided in power_supply conf!')
+
+ def is_powered(self):
+ """Get whether the device is powered on or off."""
+ return self._retry_usberr(sispm.getstatus, self._get_device(), self.port)
+
+ def power_set(self, onoff):
+ """Turn on (onoff=True) or off (onoff=False) the device."""
+ if onoff:
+ self.dbg('switchon')
+ self._retry_usberr(sispm.switchon, self._get_device(), self.port)
+ else:
+ self.dbg('switchoff')
+ self._retry_usberr(sispm.switchoff, self._get_device(), self.port)
+
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/process.py b/src/osmo_gsm_tester/process.py
new file mode 100644
index 0000000..b1769f8
--- /dev/null
+++ b/src/osmo_gsm_tester/process.py
@@ -0,0 +1,306 @@
+# osmo_gsm_tester: process management
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import time
+import subprocess
+import signal
+from datetime import datetime
+
+from . import log
+from .event_loop import MainLoop
+from .util import Dir
+
+class Process(log.Origin):
+
+ def __init__(self, name, run_dir, popen_args, **popen_kwargs):
+ super().__init__(log.C_RUN, name)
+ self.process_obj = None
+ self.result = None
+ self.killed = None
+ self.name_str = name
+ self.run_dir = run_dir
+ self.popen_args = popen_args
+ self.popen_kwargs = popen_kwargs
+ self.outputs = {}
+ if not isinstance(self.run_dir, Dir):
+ self.run_dir = Dir(os.path.abspath(str(self.run_dir)))
+
+ def set_env(self, key, value):
+ env = self.popen_kwargs.get('env') or {}
+ env[key] = value
+ self.popen_kwargs['env'] = env
+
+ def make_output_log(self, name):
+ '''
+ create a non-existing log output file in run_dir to pipe stdout and
+ stderr from this process to.
+ '''
+ path = self.run_dir.new_child(name)
+ f = open(path, 'w')
+ self.dbg(path)
+ f.write('(launched: %s)\n' % datetime.now().strftime(log.LONG_DATEFMT))
+ f.flush()
+ self.outputs[name] = (path, f)
+ return f
+
+ def launch(self):
+ log.dbg('cd %r; %s %s' % (
+ os.path.abspath(str(self.run_dir)),
+ ' '.join(['%s=%r'%(k,v) for k,v in self.popen_kwargs.get('env', {}).items()]),
+ ' '.join(self.popen_args)))
+
+ self.process_obj = subprocess.Popen(
+ self.popen_args,
+ stdout=self.make_output_log('stdout'),
+ stderr=self.make_output_log('stderr'),
+ stdin=subprocess.PIPE,
+ shell=False,
+ cwd=self.run_dir.path,
+ **self.popen_kwargs)
+ self.set_name(self.name_str, pid=self.process_obj.pid)
+ self.log('Launched')
+
+ def launch_sync(self, raise_nonsuccess=True):
+ '''
+ calls launch() method and block waiting for it to finish, serving the
+ mainloop meanwhile.
+ '''
+ try:
+ self.launch()
+ self.wait()
+ except Exception as e:
+ self.terminate()
+ raise e
+ if raise_nonsuccess and self.result != 0:
+ log.ctx(self)
+ raise log.Error('Exited in error %d' % self.result)
+ return self.result
+
+ def respawn(self):
+ self.dbg('respawn')
+ assert not self.is_running()
+ self.result = None
+ self.killed = None
+ self.launch()
+
+ def _poll_termination(self, time_to_wait_for_term=5):
+ wait_step = 0.001
+ waited_time = 0
+ while True:
+ # poll returns None if proc is still running
+ self.result = self.process_obj.poll()
+ if self.result is not None:
+ return True
+ waited_time += wait_step
+ # make wait_step approach 1.0
+ wait_step = (1. + 5. * wait_step) / 6.
+ if waited_time >= time_to_wait_for_term:
+ break
+ time.sleep(wait_step)
+ return False
+
+ def send_signal(self, sig):
+ os.kill(self.process_obj.pid, sig)
+
+ def terminate(self):
+ if self.process_obj is None:
+ return
+ if self.result is not None:
+ return
+
+ while True:
+ # first try SIGINT to allow stdout+stderr flushing
+ self.log('Terminating (SIGINT)')
+ self.send_signal(signal.SIGINT)
+ self.killed = signal.SIGINT
+ if self._poll_termination():
+ break
+
+ # SIGTERM maybe?
+ self.log('Terminating (SIGTERM)')
+ self.send_signal(signal.SIGTERM)
+ self.killed = signal.SIGTERM
+ if self._poll_termination():
+ break
+
+ # out of patience
+ self.log('Terminating (SIGKILL)')
+ self.send_signal(signal.SIGKILL)
+ self.killed = signal.SIGKILL
+ break;
+
+ self.process_obj.wait()
+ self.cleanup()
+
+ def cleanup(self):
+ self.dbg('Cleanup')
+ self.close_output_logs()
+ if self.result == 0:
+ self.log('Terminated: ok', rc=self.result)
+ elif self.killed:
+ self.log('Terminated', rc=self.result)
+ else:
+ self.err('Terminated: ERROR', rc=self.result)
+ #self.log_stdout_tail()
+ self.log_stderr_tail()
+
+ def log_stdout_tail(self):
+ m = self.get_stdout_tail(prefix='| ')
+ if not m:
+ return
+ self.log('stdout:\n', m, '\n')
+
+ def log_stderr_tail(self):
+ m = self.get_stderr_tail(prefix='| ')
+ if not m:
+ return
+ self.log('stderr:\n', m, '\n')
+
+ def close_output_logs(self):
+ for k, v in self.outputs.items():
+ path, f = v
+ if f:
+ f.flush()
+ f.close()
+ self.outputs[k] = (path, None)
+
+ def poll(self):
+ if self.process_obj is None:
+ return
+ if self.result is not None:
+ return
+ self.result = self.process_obj.poll()
+ if self.result is not None:
+ self.cleanup()
+
+ def is_running(self, poll_first=True):
+ if poll_first:
+ self.poll()
+ return self.process_obj is not None and self.result is None
+
+ def get_output(self, which):
+ v = self.outputs.get(which)
+ if not v:
+ return None
+ path, f = v
+ with open(path, 'r') as f2:
+ return f2.read()
+
+ def get_output_tail(self, which, tail=10, prefix=''):
+ out = self.get_output(which)
+ if not out:
+ return None
+ out = out.splitlines()
+ tail = min(len(out), tail)
+ return prefix + ('\n' + prefix).join(out[-tail:])
+
+ def get_stdout(self):
+ return self.get_output('stdout')
+
+ def get_stderr(self):
+ return self.get_output('stderr')
+
+ def get_stdout_tail(self, tail=10, prefix=''):
+ return self.get_output_tail('stdout', tail, prefix)
+
+ def get_stderr_tail(self, tail=10, prefix=''):
+ return self.get_output_tail('stderr', tail, prefix)
+
+ def terminated(self, poll_first=True):
+ if poll_first:
+ self.poll()
+ return self.result is not None
+
+ def wait(self, timeout=300):
+ MainLoop.wait(self, self.terminated, timeout=timeout)
+
+
+class RemoteProcess(Process):
+
+ def __init__(self, name, run_dir, remote_user, remote_host, remote_cwd, popen_args, **popen_kwargs):
+ super().__init__(name, run_dir, popen_args, **popen_kwargs)
+ self.remote_user = remote_user
+ self.remote_host = remote_host
+ self.remote_cwd = remote_cwd
+
+ # hacky: instead of just prepending ssh, i.e. piping stdout and stderr
+ # over the ssh link, we should probably run on the remote side,
+ # monitoring the process remotely.
+ if self.remote_cwd:
+ cd = 'cd "%s"; ' % self.remote_cwd
+ else:
+ cd = ''
+ # We need double -t to force tty and be able to forward signals to
+ # processes (SIGHUP) when we close ssh on the local side. As a result,
+ # stderr seems to be merged into stdout in ssh client.
+ self.popen_args = ['ssh', '-t', '-t', self.remote_user+'@'+self.remote_host,
+ '%s%s' % (cd,
+ ' '.join(self.popen_args))]
+ self.dbg(self.popen_args, dir=self.run_dir, conf=self.popen_kwargs)
+
+class NetNSProcess(Process):
+ NETNS_EXEC_BIN = 'osmo-gsm-tester_netns_exec.sh'
+ def __init__(self, name, run_dir, netns, popen_args, **popen_kwargs):
+ super().__init__(name, run_dir, popen_args, **popen_kwargs)
+ self.netns = netns
+
+ self.popen_args = ['sudo', self.NETNS_EXEC_BIN, self.netns] + list(popen_args)
+ self.dbg(self.popen_args, dir=self.run_dir, conf=self.popen_kwargs)
+
+ # HACK: Since we run under sudo, only way to kill root-owned process is to kill as root...
+ # This function is overwritten from Process.
+ def send_signal(self, sig):
+ kill_cmd = ('kill', '-%d' % int(sig), str(self.process_obj.pid))
+ run_local_netns_sync(self.run_dir, self.name()+"-kill", self.netns, kill_cmd)
+
+
+def run_local_sync(run_dir, name, popen_args):
+ run_dir =run_dir.new_dir(name)
+ proc = Process(name, run_dir, popen_args)
+ proc.launch_sync()
+
+def run_local_netns_sync(run_dir, name, netns, popen_args):
+ run_dir =run_dir.new_dir(name)
+ proc = NetNSProcess(name, run_dir, netns, popen_args)
+ proc.launch_sync()
+
+def run_remote_sync(run_dir, remote_user, remote_addr, name, popen_args, remote_cwd=None):
+ run_dir = run_dir.new_dir(name)
+ proc = RemoteProcess(name, run_dir, remote_user, remote_addr, remote_cwd, popen_args)
+ proc.launch_sync()
+
+def scp(run_dir, remote_user, remote_addr, name, local_path, remote_path):
+ run_local_sync(run_dir, name, ('scp', '-r', local_path, '%s@%s:%s' % (remote_user, remote_addr, remote_path)))
+
+def copy_inst_ssh(run_dir, inst, remote_dir, remote_user, remote_addr, remote_rundir_append, cfg_file_name):
+ remote_inst = Dir(remote_dir.child(os.path.basename(str(inst))))
+ remote_dir_str = str(remote_dir)
+ run_remote_sync(run_dir, remote_user, remote_addr, 'rm-remote-dir', ('test', '!', '-d', remote_dir_str, '||', 'rm', '-rf', remote_dir_str))
+ run_remote_sync(run_dir, remote_user, remote_addr, 'mk-remote-dir', ('mkdir', '-p', remote_dir_str))
+ scp(run_dir, remote_user, remote_addr, 'scp-inst-to-remote', str(inst), remote_dir_str)
+
+ remote_run_dir = remote_dir.child(remote_rundir_append)
+ run_remote_sync(run_dir, remote_user, remote_addr, 'mk-remote-run-dir', ('mkdir', '-p', remote_run_dir))
+
+ remote_config_file = remote_dir.child(os.path.basename(cfg_file_name))
+ scp(run_dir, remote_user, remote_addr, 'scp-cfg-to-remote', cfg_file_name, remote_config_file)
+ return remote_inst
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/report.py b/src/osmo_gsm_tester/report.py
new file mode 100644
index 0000000..224cc46
--- /dev/null
+++ b/src/osmo_gsm_tester/report.py
@@ -0,0 +1,112 @@
+# osmo_gsm_tester: report: directory of binaries to be tested
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import math
+from datetime import datetime
+import xml.etree.ElementTree as et
+from . import test
+
+def trial_to_junit_write(trial, junit_path):
+ elements = et.ElementTree(element=trial_to_junit(trial))
+ elements.write(junit_path)
+
+def trial_to_junit(trial):
+ testsuites = et.Element('testsuites')
+ for suite in trial.suites:
+ testsuite = suite_to_junit(suite)
+ testsuites.append(testsuite)
+ return testsuites
+
+def suite_to_junit(suite):
+ testsuite = et.Element('testsuite')
+ testsuite.set('name', suite.name())
+ testsuite.set('hostname', 'localhost')
+ if suite.start_timestamp:
+ testsuite.set('timestamp', datetime.fromtimestamp(round(suite.start_timestamp)).isoformat())
+ testsuite.set('time', str(math.ceil(suite.duration)))
+ testsuite.set('tests', str(len(suite.tests)))
+ testsuite.set('failures', str(suite.count_test_results()[2]))
+ for test in suite.tests:
+ testcase = test_to_junit(test)
+ testsuite.append(testcase)
+ return testsuite
+
+def test_to_junit(t):
+ testcase = et.Element('testcase')
+ testcase.set('name', t.name())
+ testcase.set('time', str(math.ceil(t.duration)))
+ if t.status == test.Test.SKIP:
+ et.SubElement(testcase, 'skipped')
+ elif t.status == test.Test.FAIL:
+ failure = et.SubElement(testcase, 'failure')
+ failure.set('type', t.fail_type or 'failure')
+ failure.text = t.fail_message
+ if t.fail_tb:
+ system_err = et.SubElement(testcase, 'system-err')
+ system_err.text = t.fail_tb
+ elif t.status != test.Test.PASS:
+ error = et.SubElement(testcase, 'error')
+ error.text = 'could not run'
+ return testcase
+
+def trial_to_text(trial):
+ suite_failures = []
+ count_fail = 0
+ count_pass = 0
+ for suite in trial.suites:
+ if suite.passed():
+ count_pass += 1
+ else:
+ count_fail += 1
+ suite_failures.append(suite_to_text(suite))
+
+ summary = ['%s: %s' % (trial.name(), trial.status)]
+ if count_fail:
+ summary.append('%d suites failed' % count_fail)
+ if count_pass:
+ summary.append('%d suites passed' % count_pass)
+ msg = [', '.join(summary)]
+ msg.extend(suite_failures)
+ return '\n'.join(msg)
+
+def suite_to_text(suite):
+ if not suite.tests:
+ return 'no tests were run.'
+
+ passed, skipped, failed = suite.count_test_results()
+ details = []
+ if failed:
+ details.append('fail: %d' % failed)
+ if passed:
+ details.append('pass: %d' % passed)
+ if skipped:
+ details.append('skip: %d' % skipped)
+ msgs = ['%s: %s (%s)' % (suite.status, suite.name(), ', '.join(details))]
+ msgs.extend([test_to_text(t) for t in suite.tests])
+ return '\n '.join(msgs)
+
+def test_to_text(t):
+ msgs = ['%s: %s' % (t.status, t.name())]
+ if t.start_timestamp:
+ msgs.append('(%.1f sec)' % t.duration)
+ if t.status == test.Test.FAIL:
+ msgs.append('%s: %s' % (t.fail_type, t.fail_message))
+ return ' '.join(msgs)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/resource.py b/src/osmo_gsm_tester/resource.py
new file mode 100644
index 0000000..e71f4cd
--- /dev/null
+++ b/src/osmo_gsm_tester/resource.py
@@ -0,0 +1,583 @@
+# osmo_gsm_tester: manage resources
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import time
+import copy
+import atexit
+import pprint
+
+from . import log
+from . import config
+from . import util
+from . import schema
+from . import modem
+from . import osmo_nitb
+from . import bts_sysmo, bts_osmotrx, bts_octphy, bts_nanobts
+
+from .util import is_dict, is_list
+
+HASH_KEY = '_hash'
+RESERVED_KEY = '_reserved_by'
+USED_KEY = '_used'
+
+RESOURCES_CONF = 'resources.conf'
+RESERVED_RESOURCES_FILE = 'reserved_resources.state'
+
+R_IP_ADDRESS = 'ip_address'
+R_BTS = 'bts'
+R_ARFCN = 'arfcn'
+R_MODEM = 'modem'
+R_OSMOCON = 'osmocon_phone'
+R_ALL = (R_IP_ADDRESS, R_BTS, R_ARFCN, R_MODEM, R_OSMOCON)
+
+RESOURCES_SCHEMA = {
+ 'ip_address[].addr': schema.IPV4,
+ 'bts[].label': schema.STR,
+ 'bts[].type': schema.STR,
+ 'bts[].ipa_unit_id': schema.UINT,
+ 'bts[].addr': schema.IPV4,
+ 'bts[].band': schema.BAND,
+ 'bts[].direct_pcu': schema.BOOL_STR,
+ 'bts[].ciphers[]': schema.CIPHER,
+ 'bts[].channel_allocator': schema.CHAN_ALLOCATOR,
+ 'bts[].gprs_mode': schema.GPRS_MODE,
+ 'bts[].num_trx': schema.UINT,
+ 'bts[].max_trx': schema.UINT,
+ 'bts[].trx_list[].addr': schema.IPV4,
+ 'bts[].trx_list[].hw_addr': schema.HWADDR,
+ 'bts[].trx_list[].net_device': schema.STR,
+ 'bts[].trx_list[].nominal_power': schema.UINT,
+ 'bts[].trx_list[].max_power_red': schema.UINT,
+ 'bts[].trx_list[].timeslot_list[].phys_chan_config': schema.PHY_CHAN,
+ 'bts[].trx_list[].power_supply.type': schema.STR,
+ 'bts[].trx_list[].power_supply.device': schema.STR,
+ 'bts[].trx_list[].power_supply.port': schema.STR,
+ 'bts[].osmo_trx.launch_trx': schema.BOOL_STR,
+ 'bts[].osmo_trx.type': schema.STR,
+ 'bts[].osmo_trx.clock_reference': schema.OSMO_TRX_CLOCK_REF,
+ 'bts[].osmo_trx.trx_ip': schema.IPV4,
+ 'bts[].osmo_trx.remote_user': schema.STR,
+ 'bts[].osmo_trx.dev_args': schema.STR,
+ 'bts[].osmo_trx.multi_arfcn': schema.BOOL_STR,
+ 'arfcn[].arfcn': schema.INT,
+ 'arfcn[].band': schema.BAND,
+ 'modem[].label': schema.STR,
+ 'modem[].path': schema.STR,
+ 'modem[].imsi': schema.IMSI,
+ 'modem[].ki': schema.KI,
+ 'modem[].auth_algo': schema.AUTH_ALGO,
+ 'modem[].ciphers[]': schema.CIPHER,
+ 'modem[].features[]': schema.MODEM_FEATURE,
+ 'osmocon_phone[].serial_device': schema.STR,
+ }
+
+WANT_SCHEMA = util.dict_add(
+ dict([('%s[].times' % r, schema.TIMES) for r in R_ALL]),
+ RESOURCES_SCHEMA)
+
+CONF_SCHEMA = util.dict_add(
+ { 'defaults.timeout': schema.STR,
+ 'config.bsc.net.codec_list[]': schema.CODEC },
+ dict([('resources.%s' % key, val) for key, val in WANT_SCHEMA.items()]),
+ dict([('modifiers.%s' % key, val) for key, val in WANT_SCHEMA.items()]))
+
+KNOWN_BTS_TYPES = {
+ 'osmo-bts-sysmo': bts_sysmo.SysmoBts,
+ 'osmo-bts-trx': bts_osmotrx.OsmoBtsTrx,
+ 'osmo-bts-octphy': bts_octphy.OsmoBtsOctphy,
+ 'nanobts': bts_nanobts.NanoBts,
+ }
+
+def register_bts_type(name, clazz):
+ KNOWN_BTS_TYPES[name] = clazz
+
+class ResourcesPool(log.Origin):
+ _remember_to_free = None
+ _registered_exit_handler = False
+
+ def __init__(self):
+ self.config_path = config.get_config_file(RESOURCES_CONF)
+ self.state_dir = config.get_state_dir()
+ super().__init__(log.C_CNF, conf=self.config_path, state=self.state_dir.path)
+ self.read_conf()
+
+ def read_conf(self):
+ self.all_resources = Resources(config.read(self.config_path, RESOURCES_SCHEMA))
+ self.all_resources.set_hashes()
+
+ def reserve(self, origin, want, modifiers):
+ '''
+ attempt to reserve the resources specified in the dict 'want' for
+ 'origin'. Obtain a lock on the resources lock dir, verify that all
+ wanted resources are available, and if yes mark them as reserved.
+
+ On success, return a reservation object which can be used to release
+ the reservation. The reservation will be freed automatically on program
+ exit, if not yet done manually.
+
+ 'origin' should be an Origin() instance.
+
+ 'want' is a dict matching RESOURCES_SCHEMA, used to specify what to
+ reserve.
+
+ 'modifiers' is a dict matching RESOURCES_SCHEMA, it is overlaid on top
+ of 'want'.
+
+ If an entry has no attribute set, any of the resources may be
+ reserved without further limitations.
+
+ ResourcesPool may also be selected with narrowed down constraints.
+ This would reserve one IP address, two modems, one BTS of type
+ sysmo and one of type trx, plus 2 ARFCNs in the 1800 band:
+
+ {
+ 'ip_address': [ {} ],
+ 'bts': [ { 'type': 'sysmo' }, { 'type': 'trx' } ],
+ 'arfcn': [ { 'band': 'GSM-1800' }, { 'band': 'GSM-1800' } ],
+ 'modem': [ {}, {} ],
+ }
+ '''
+ schema.validate(want, RESOURCES_SCHEMA)
+ schema.validate(modifiers, RESOURCES_SCHEMA)
+
+ origin_id = origin.origin_id()
+
+ with self.state_dir.lock(origin_id):
+ rrfile_path = self.state_dir.mk_parentdir(RESERVED_RESOURCES_FILE)
+ reserved = Resources(config.read(rrfile_path, if_missing_return={}))
+ to_be_reserved = self.all_resources.without(reserved).find(origin, want)
+
+ to_be_reserved.mark_reserved_by(origin_id)
+
+ reserved.add(to_be_reserved)
+ config.write(rrfile_path, reserved)
+
+ self.remember_to_free(to_be_reserved)
+ return ReservedResources(self, origin, to_be_reserved, modifiers)
+
+ def free(self, origin, to_be_freed):
+ log.ctx(origin)
+ with self.state_dir.lock(origin.origin_id()):
+ rrfile_path = self.state_dir.mk_parentdir(RESERVED_RESOURCES_FILE)
+ reserved = Resources(config.read(rrfile_path, if_missing_return={}))
+ reserved.drop(to_be_freed)
+ config.write(rrfile_path, reserved)
+ self.forget_freed(to_be_freed)
+
+ def register_exit_handler(self):
+ if self._registered_exit_handler:
+ return
+ atexit.register(self.clean_up_registered_resources)
+ self._registered_exit_handler = True
+
+ def unregister_exit_handler(self):
+ if not self._registered_exit_handler:
+ return
+ atexit.unregister(self.clean_up_registered_resources)
+ self._registered_exit_handler = False
+
+ def clean_up_registered_resources(self):
+ if not self._remember_to_free:
+ return
+ self.free(log.Origin('atexit.clean_up_registered_resources()'),
+ self._remember_to_free)
+
+ def remember_to_free(self, to_be_reserved):
+ self.register_exit_handler()
+ if not self._remember_to_free:
+ self._remember_to_free = Resources()
+ self._remember_to_free.add(to_be_reserved)
+
+ def forget_freed(self, freed):
+ if freed is self._remember_to_free:
+ self._remember_to_free.clear()
+ else:
+ self._remember_to_free.drop(freed)
+ if not self._remember_to_free:
+ self.unregister_exit_handler()
+
+ def next_persistent_value(self, token, first_val, validate_func, inc_func, origin):
+ origin_id = origin.origin_id()
+
+ with self.state_dir.lock(origin_id):
+ token_path = self.state_dir.child('last_used_%s.state' % token)
+ log.ctx(token_path)
+ last_value = first_val
+ if os.path.exists(token_path):
+ if not os.path.isfile(token_path):
+ raise RuntimeError('path should be a file but is not: %r' % token_path)
+ with open(token_path, 'r') as f:
+ last_value = f.read().strip()
+ validate_func(last_value)
+
+ next_value = inc_func(last_value)
+ with open(token_path, 'w') as f:
+ f.write(next_value)
+ return next_value
+
+ def next_msisdn(self, origin):
+ return self.next_persistent_value('msisdn', '1000', schema.msisdn, util.msisdn_inc, origin)
+
+ def next_lac(self, origin):
+ # LAC=0 has special meaning (MS detached), avoid it
+ return self.next_persistent_value('lac', '1', schema.uint16, lambda x: str(((int(x)+1) % pow(2,16)) or 1), origin)
+
+ def next_rac(self, origin):
+ return self.next_persistent_value('rac', '1', schema.uint8, lambda x: str((int(x)+1) % pow(2,8) or 1), origin)
+
+ def next_cellid(self, origin):
+ return self.next_persistent_value('cellid', '1', schema.uint16, lambda x: str((int(x)+1) % pow(2,16)), origin)
+
+ def next_bvci(self, origin):
+ # BVCI=0 and =1 are reserved, avoid them.
+ return self.next_persistent_value('bvci', '2', schema.uint16, lambda x: str(int(x)+1) if int(x) < pow(2,16) - 1 else '2', origin)
+
+class NoResourceExn(log.Error):
+ pass
+
+class Resources(dict):
+
+ def __init__(self, all_resources={}, do_copy=True):
+ if do_copy:
+ all_resources = copy.deepcopy(all_resources)
+ self.update(all_resources)
+
+ def drop(self, reserved, fail_if_not_found=True):
+ # protect from modifying reserved because we're the same object
+ if reserved is self:
+ raise RuntimeError('Refusing to drop a list of resources from itself.'
+ ' This is probably a bug where a list of Resources()'
+ ' should have been copied but is passed as-is.'
+ ' use Resources.clear() instead.')
+
+ for key, reserved_list in reserved.items():
+ my_list = self.get(key) or []
+
+ if my_list is reserved_list:
+ self.pop(key)
+ continue
+
+ for reserved_item in reserved_list:
+ found = False
+ reserved_hash = reserved_item.get(HASH_KEY)
+ if not reserved_hash:
+ raise RuntimeError('Resources.drop() only works with hashed items')
+
+ for i in range(len(my_list)):
+ my_item = my_list[i]
+ my_hash = my_item.get(HASH_KEY)
+ if not my_hash:
+ raise RuntimeError('Resources.drop() only works with hashed items')
+ if my_hash == reserved_hash:
+ found = True
+ my_list.pop(i)
+ break
+
+ if fail_if_not_found and not found:
+ raise RuntimeError('Asked to drop resource from a pool, but the'
+ ' resource was not found: %s = %r' % (key, reserved_item))
+
+ if not my_list:
+ self.pop(key)
+ return self
+
+ def without(self, reserved):
+ return Resources(self).drop(reserved)
+
+ def find(self, for_origin, want, skip_if_marked=None, do_copy=True, raise_if_missing=True, log_label='Reserving'):
+ '''
+ Pass a dict of resource requirements, e.g.:
+ want = {
+ 'bts': [ {'type': 'osmo-bts-sysmo',}, {} ],
+ 'modem': [ {}, {}, {} ]
+ }
+ This function tries to find a combination from the available resources that
+ matches these requirements. The return value is a dict (wrapped in a Resources class)
+ that contains the matching resources in the order of 'want' dict: in above
+ example, the returned dict would have a 'bts' list with the first item being
+ a sysmoBTS, the second item being any other available BTS.
+
+ If skip_if_marked is passed, any resource that contains this key is skipped.
+ E.g. if a BTS has the USED_KEY set like
+ reserved_resources = { 'bts' : {..., '_used': True} }
+ then this may be skipped by passing skip_if_marked='_used'
+ (or rather skip_if_marked=USED_KEY).
+
+ If do_copy is True, the returned dict is a deep copy and does not share
+ lists with any other Resources dict.
+
+ If raise_if_missing is False, this will return an empty item for any
+ resource that had no match, instead of immediately raising an exception.
+
+ This function expects input dictionaries whose contents have already
+ been replicated based on its the 'times' attributes. See
+ config.replicate_times() for more details.
+ '''
+ matches = {}
+ for key, want_list in sorted(want.items()): # sorted for deterministic test results
+ # here we have a resource of a given type, e.g. 'bts', with a list
+ # containing as many BTSes as the caller wants to reserve/use. Each
+ # list item contains specifics for the particular BTS.
+ my_list = self.get(key, [])
+
+ if log_label:
+ for_origin.log(log_label, len(want_list), 'x', key, '(candidates: %d)'%len(my_list))
+
+ # Try to avoid a less constrained item snatching away a resource
+ # from a more detailed constrained requirement.
+
+ # first record all matches, so that each requested item has a list
+ # of all available resources that match it. Some resources may
+ # appear for multiple requested items. Store matching indexes.
+ all_matches = []
+ for want_item in want_list:
+ item_match_list = []
+ for i in range(len(my_list)):
+ my_item = my_list[i]
+ if skip_if_marked and my_item.get(skip_if_marked):
+ continue
+ if item_matches(my_item, want_item):
+ item_match_list.append(i)
+ if not item_match_list:
+ if raise_if_missing:
+ raise NoResourceExn('No matching resource available for %s = %r'
+ % (key, want_item))
+ else:
+ # this one failed... see below
+ all_matches = []
+ break
+
+ all_matches.append( item_match_list )
+
+ if not all_matches:
+ # ...this one failed. Makes no sense to solve resource
+ # allocations, return an empty list for this key to mark
+ # failure.
+ matches[key] = []
+ continue
+
+ # figure out who gets what
+ try:
+ solution = solve(all_matches)
+ except NotSolvable:
+ # instead of a cryptic error message, raise an exception that
+ # conveys meaning to the user.
+ raise NoResourceExn('Could not resolve request to reserve resources: '
+ '%d x %s with requirements: %r' % (len(want_list), key, want_list))
+ picked = [ my_list[i] for i in solution if i is not None ]
+ for_origin.dbg('Picked', config.tostr(picked))
+ matches[key] = picked
+
+ return Resources(matches, do_copy=do_copy)
+
+ def set_hashes(self):
+ for key, item_list in self.items():
+ for item in item_list:
+ item[HASH_KEY] = util.hash_obj(item, HASH_KEY, RESERVED_KEY, USED_KEY)
+
+ def add(self, more):
+ if more is self:
+ raise RuntimeError('adding a list of resources to itself?')
+ config.add(self, copy.deepcopy(more))
+
+ def combine(self, more_rules):
+ if more_rules is self:
+ raise RuntimeError('combining a list of resource rules with itself?')
+ config.combine(self, copy.deepcopy(more))
+
+ def mark_reserved_by(self, origin_id):
+ for key, item_list in self.items():
+ for item in item_list:
+ item[RESERVED_KEY] = origin_id
+
+
+class NotSolvable(Exception):
+ pass
+
+def solve(all_matches):
+ '''
+ all_matches shall be a list of index-lists.
+ all_matches[i] is the list of indexes that item i can use.
+ Return a solution so that each i gets a different index.
+ solve([ [0, 1, 2],
+ [0],
+ [0, 2] ]) == [1, 0, 2]
+ '''
+
+ def all_differ(l):
+ return len(set(l)) == len(l)
+
+ def search_in_permutations(fixed=[]):
+ idx = len(fixed)
+ for i in range(len(all_matches[idx])):
+ val = all_matches[idx][i]
+ # don't add a val that's already in the list
+ if val in fixed:
+ continue
+ l = list(fixed)
+ l.append(val)
+ if len(l) == len(all_matches):
+ # found a solution
+ return l
+ # not at the end yet, add next digit
+ r = search_in_permutations(l)
+ if r:
+ # nested search_in_permutations() call found a solution
+ return r
+ # this entire branch yielded no solution
+ return None
+
+ if not all_matches:
+ raise RuntimeError('Cannot solve: no candidates')
+
+ solution = search_in_permutations()
+ if not solution:
+ raise NotSolvable('The requested resource requirements are not solvable %r'
+ % all_matches)
+ return solution
+
+
+def contains_hash(list_of_dicts, a_hash):
+ for d in list_of_dicts:
+ if d.get(HASH_KEY) == a_hash:
+ return True
+ return False
+
+def item_matches(item, wanted_item, ignore_keys=None):
+ if is_dict(wanted_item):
+ # match up two dicts
+ if not isinstance(item, dict):
+ return False
+ for key, wanted_val in wanted_item.items():
+ if ignore_keys and key in ignore_keys:
+ continue
+ if not item_matches(item.get(key), wanted_val, ignore_keys=ignore_keys):
+ return False
+ return True
+
+ if is_list(wanted_item):
+ if not is_list(item):
+ return False
+ # Validate that all elements in both lists are of the same type:
+ t = util.list_validate_same_elem_type(wanted_item + item)
+ if t is None:
+ return True # both lists are empty, return
+ # For lists of complex objects, we expect them to be sorted lists:
+ if t in (dict, list, tuple):
+ for i in range(max(len(wanted_item), len(item))):
+ log.ctx(idx=i)
+ subitem = item[i] if i < len(item) else util.empty_instance_type(t)
+ wanted_subitem = wanted_item[i] if i < len(wanted_item) else util.empty_instance_type(t)
+ if not item_matches(subitem, wanted_subitem, ignore_keys=ignore_keys):
+ return False
+ else: # for lists of basic elements, we handle them as unsorted sets:
+ for val in wanted_item:
+ if val not in item:
+ return False
+ return True
+
+ return item == wanted_item
+
+
+class ReservedResources(log.Origin):
+ '''
+ After all resources have been figured out, this is the API that a test case
+ gets to interact with resources. From those resources that have been
+ reserved for it, it can pick some to mark them as currently in use.
+ Functions like nitb() provide a resource by automatically picking its
+ dependencies from so far unused (but reserved) resource.
+ '''
+
+ def __init__(self, resources_pool, origin, reserved, modifiers):
+ self.resources_pool = resources_pool
+ self.origin = origin
+ self.reserved_original = reserved
+ self.reserved = copy.deepcopy(self.reserved_original)
+ config.overlay(self.reserved, modifiers)
+
+ def __repr__(self):
+ return 'resources(%s)=%s' % (self.origin.name(), pprint.pformat(self.reserved))
+
+ def get(self, kind, specifics=None):
+ if specifics is None:
+ specifics = {}
+ self.dbg('requesting use of', kind, specifics=specifics)
+ want = { kind: [specifics] }
+ available_dict = self.reserved.find(self.origin, want, skip_if_marked=USED_KEY,
+ do_copy=False, raise_if_missing=False,
+ log_label='Using')
+ available = available_dict.get(kind)
+ self.dbg(available=len(available))
+ if not available:
+ # cook up a detailed error message for the current situation
+ kind_reserved = self.reserved.get(kind, [])
+ used_count = len([r for r in kind_reserved if USED_KEY in r])
+ matching = self.reserved.find(self.origin, want, raise_if_missing=False, log_label=None).get(kind, [])
+ if not matching:
+ msg = 'none of the reserved resources matches requirements %r' % specifics
+ elif not (used_count < len(kind_reserved)):
+ msg = 'suite.conf reserved only %d x %r.' % (len(kind_reserved), kind)
+ else:
+ msg = ('No unused resource left that matches the requirements;'
+ ' Of reserved %d x %r, %d match the requirements, but all are already in use;'
+ ' Requirements: %r'
+ % (len(kind_reserved), kind, len(matching), specifics))
+ raise NoResourceExn('When trying to use instance nr %d of %r: %s' % (used_count + 1, kind, msg))
+
+ pick = available[0]
+ self.dbg(using=pick)
+ assert not pick.get(USED_KEY)
+ pick[USED_KEY] = True
+ return copy.deepcopy(pick)
+
+ def put(self, item):
+ if not item.get(USED_KEY):
+ raise RuntimeError('Can only put() a resource that is used: %r' % item)
+ hash_to_put = item.get(HASH_KEY)
+ if not hash_to_put:
+ raise RuntimeError('Can only put() a resource that has a hash marker: %r' % item)
+ for key, item_list in self.reserved.items():
+ my_list = self.get(key)
+ for my_item in my_list:
+ if hash_to_put == my_item.get(HASH_KEY):
+ my_item.pop(USED_KEY)
+
+ def put_all(self):
+ if not self.reserved:
+ return
+ for key, item_list in self.reserved.items():
+ for item in item_list:
+ item.pop(USED_KEY, None)
+
+ def free(self):
+ if self.reserved_original:
+ self.resources_pool.free(self.origin, self.reserved_original)
+ self.reserved_original = None
+
+ def counts(self):
+ counts = {}
+ for key in self.reserved.keys():
+ counts[key] = self.count(key)
+ return counts
+
+ def count(self, key):
+ return len(self.reserved.get(key) or [])
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/schema.py b/src/osmo_gsm_tester/schema.py
new file mode 100644
index 0000000..14fe640
--- /dev/null
+++ b/src/osmo_gsm_tester/schema.py
@@ -0,0 +1,231 @@
+# osmo_gsm_tester: validate dict structures
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+from . import log
+from .util import is_dict, is_list, str2bool, ENUM_OSMO_AUTH_ALGO
+
+KEY_RE = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
+IPV4_RE = re.compile('([0-9]{1,3}.){3}[0-9]{1,3}')
+HWADDR_RE = re.compile('([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}')
+IMSI_RE = re.compile('[0-9]{6,15}')
+KI_RE = re.compile('[0-9a-fA-F]{32}')
+MSISDN_RE = re.compile('[0-9]{1,15}')
+
+def match_re(name, regex, val):
+ while True:
+ if not isinstance(val, str):
+ break;
+ if not regex.fullmatch(val):
+ break;
+ return
+ raise ValueError('Invalid %s: %r' % (name, val))
+
+def band(val):
+ if val in ('GSM-900', 'GSM-1800', 'GSM-1900'):
+ return
+ raise ValueError('Unknown GSM band: %r' % val)
+
+def ipv4(val):
+ match_re('IPv4 address', IPV4_RE, val)
+ els = [int(el) for el in val.split('.')]
+ if not all([el >= 0 and el <= 255 for el in els]):
+ raise ValueError('Invalid IPv4 address: %r' % val)
+
+def hwaddr(val):
+ match_re('hardware address', HWADDR_RE, val)
+
+def imsi(val):
+ match_re('IMSI', IMSI_RE, val)
+
+def ki(val):
+ match_re('KI', KI_RE, val)
+
+def msisdn(val):
+ match_re('MSISDN', MSISDN_RE, val)
+
+def auth_algo(val):
+ if val not in ENUM_OSMO_AUTH_ALGO:
+ raise ValueError('Unknown Authentication Algorithm: %r' % val)
+
+def uint(val):
+ n = int(val)
+ if n < 0:
+ raise ValueError('Positive value expected instead of %d' % n)
+
+def uint8(val):
+ n = int(val)
+ if n < 0:
+ raise ValueError('Positive value expected instead of %d' % n)
+ if n > 255: # 2^8 - 1
+ raise ValueError('Value %d too big, max value is 255' % n)
+
+def uint16(val):
+ n = int(val)
+ if n < 0:
+ raise ValueError('Positive value expected instead of %d' % n)
+ if n > 65535: # 2^16 - 1
+ raise ValueError('Value %d too big, max value is 65535' % n)
+
+def times(val):
+ n = int(val)
+ if n < 1:
+ raise ValueError('Positive value >0 expected instead of %d' % n)
+
+def cipher(val):
+ if val in ('a5_0', 'a5_1', 'a5_2', 'a5_3', 'a5_4', 'a5_5', 'a5_6', 'a5_7'):
+ return
+ raise ValueError('Unknown Cipher value: %r' % val)
+
+def modem_feature(val):
+ if val in ('sms', 'gprs', 'voice', 'ussd', 'sim'):
+ return
+ raise ValueError('Unknown Modem Feature: %r' % val)
+
+def phy_channel_config(val):
+ if val in ('CCCH', 'CCCH+SDCCH4', 'TCH/F', 'TCH/H', 'SDCCH8', 'PDCH',
+ 'TCH/F_PDCH', 'CCCH+SDCCH4+CBCH', 'SDCCH8+CBCH','TCH/F_TCH/H_PDCH'):
+ return
+ raise ValueError('Unknown Physical channel config: %r' % val)
+
+def channel_allocator(val):
+ if val in ('ascending', 'descending'):
+ return
+ raise ValueError('Unknown Channel Allocator Policy %r' % val)
+
+def gprs_mode(val):
+ if val in ('none', 'gprs', 'egprs'):
+ return
+ raise ValueError('Unknown GPRS mode %r' % val)
+
+def codec(val):
+ if val in ('hr1', 'hr2', 'hr3', 'fr1', 'fr2', 'fr3'):
+ return
+ raise ValueError('Unknown Codec value: %r' % val)
+
+def osmo_trx_clock_ref(val):
+ if val in ('internal', 'external', 'gspdo'):
+ return
+ raise ValueError('Unknown OsmoTRX clock reference value: %r' % val)
+
+INT = 'int'
+STR = 'str'
+UINT = 'uint'
+BOOL_STR = 'bool_str'
+BAND = 'band'
+IPV4 = 'ipv4'
+HWADDR = 'hwaddr'
+IMSI = 'imsi'
+KI = 'ki'
+MSISDN = 'msisdn'
+AUTH_ALGO = 'auth_algo'
+TIMES='times'
+CIPHER = 'cipher'
+MODEM_FEATURE = 'modem_feature'
+PHY_CHAN = 'chan'
+CHAN_ALLOCATOR = 'chan_allocator'
+GPRS_MODE = 'gprs_mode'
+CODEC = 'codec'
+OSMO_TRX_CLOCK_REF = 'osmo_trx_clock_ref'
+
+SCHEMA_TYPES = {
+ INT: int,
+ STR: str,
+ UINT: uint,
+ BOOL_STR: str2bool,
+ BAND: band,
+ IPV4: ipv4,
+ HWADDR: hwaddr,
+ IMSI: imsi,
+ KI: ki,
+ MSISDN: msisdn,
+ AUTH_ALGO: auth_algo,
+ TIMES: times,
+ CIPHER: cipher,
+ MODEM_FEATURE: modem_feature,
+ PHY_CHAN: phy_channel_config,
+ CHAN_ALLOCATOR: channel_allocator,
+ GPRS_MODE: gprs_mode,
+ CODEC: codec,
+ OSMO_TRX_CLOCK_REF: osmo_trx_clock_ref,
+ }
+
+def validate(config, schema):
+ '''Make sure the given config dict adheres to the schema.
+ The schema is a dict of 'dict paths' in dot-notation with permitted
+ value type. All leaf nodes are validated, nesting dicts are implicit.
+
+ validate( { 'a': 123, 'b': { 'b1': 'foo', 'b2': [ 1, 2, 3 ] } },
+ { 'a': int,
+ 'b.b1': str,
+ 'b.b2[]': int } )
+
+ Raise a ValueError in case the schema is violated.
+ '''
+
+ def validate_item(path, value, schema):
+ want_type = schema.get(path)
+
+ if is_list(value):
+ if want_type:
+ raise ValueError('config item is a list, should be %r: %r' % (want_type, path))
+ path = path + '[]'
+ want_type = schema.get(path)
+
+ if not want_type:
+ if is_dict(value):
+ nest(path, value, schema)
+ return
+ if is_list(value) and value:
+ for list_v in value:
+ validate_item(path, list_v, schema)
+ return
+ raise ValueError('config item not known: %r' % path)
+
+ if want_type not in SCHEMA_TYPES:
+ raise ValueError('unknown type %r at %r' % (want_type, path))
+
+ if is_dict(value):
+ raise ValueError('config item is dict but should be a leaf node of type %r: %r'
+ % (want_type, path))
+
+ if is_list(value):
+ for list_v in value:
+ validate_item(path, list_v, schema)
+ return
+
+ log.ctx(path)
+ type_validator = SCHEMA_TYPES.get(want_type)
+ type_validator(value)
+
+ def nest(parent_path, config, schema):
+ if parent_path:
+ parent_path = parent_path + '.'
+ else:
+ parent_path = ''
+ for k,v in config.items():
+ if not KEY_RE.fullmatch(k):
+ raise ValueError('invalid config key: %r' % k)
+ path = parent_path + k
+ validate_item(path, v, schema)
+
+ nest(None, config, schema)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/sms.py b/src/osmo_gsm_tester/sms.py
new file mode 100644
index 0000000..0118517
--- /dev/null
+++ b/src/osmo_gsm_tester/sms.py
@@ -0,0 +1,55 @@
+# osmo_gsm_tester: DBUS client to talk to ofono
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+class Sms:
+ _last_sms_idx = 0
+
+ def __init__(self, src_msisdn=None, dst_msisdn=None, *tokens):
+ Sms._last_sms_idx += 1
+ self._src_msisdn = src_msisdn
+ self._dst_msisdn = dst_msisdn
+ msgs = ['message nr. %d' % Sms._last_sms_idx]
+ msgs.extend(tokens)
+ if src_msisdn:
+ msgs.append('from %s' % src_msisdn)
+ if dst_msisdn:
+ msgs.append('to %s' % dst_msisdn)
+ self.msg = ', '.join(msgs)
+
+ def __str__(self):
+ return self.msg
+
+ def __repr__(self):
+ return repr(self.msg)
+
+ def __eq__(self, other):
+ if isinstance(other, Sms):
+ return self.msg == other.msg
+ return self.msg == other
+
+ def src_msisdn(self):
+ return self._src_msisdn
+
+ def dst_msisdn(self):
+ return self._dst_msisdn
+
+ def matches(self, msg):
+ return self.msg == msg
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/smsc.py b/src/osmo_gsm_tester/smsc.py
new file mode 100644
index 0000000..d154801
--- /dev/null
+++ b/src/osmo_gsm_tester/smsc.py
@@ -0,0 +1,49 @@
+# osmo_gsm_tester: smsc interface
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from . import log, config
+
+class Smsc:
+
+ SMSC_POLICY_CLOSED = 'closed'
+ SMSC_POLICY_ACCEPT_ALL = 'accept-all'
+
+ def __init__(self, smpp_addr_port):
+ self.addr_port = smpp_addr_port
+ self.policy = self.SMSC_POLICY_CLOSED
+ self.esmes = []
+
+ def get_config(self):
+ values = { 'smsc': { 'policy': self.policy } }
+ esme_list = []
+ for esme in self.esmes:
+ esme_list.append(esme.conf_for_smsc())
+ config.overlay(values, dict(smsc=dict(esme_list=esme_list)))
+ return values
+
+ def esme_add(self, esme):
+ if esme.system_id == '':
+ raise log.Error('esme system_id cannot be empty')
+ self.esmes.append(esme)
+ esme.set_smsc(self)
+
+ def set_smsc_policy(self, smsc_policy):
+ self.policy = smsc_policy
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/suite.py b/src/osmo_gsm_tester/suite.py
new file mode 100644
index 0000000..e5ac9a8
--- /dev/null
+++ b/src/osmo_gsm_tester/suite.py
@@ -0,0 +1,466 @@
+# osmo_gsm_tester: test suite
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import time
+import pprint
+from . import config, log, util, resource, test
+from .event_loop import MainLoop
+from . import osmo_nitb, osmo_hlr, osmo_mgcpgw, osmo_mgw, osmo_msc, osmo_bsc, osmo_stp, osmo_ggsn, osmo_sgsn, modem, esme, osmocon, ms_driver, iperf3
+
+class Timeout(Exception):
+ pass
+
+class SuiteDefinition(log.Origin):
+ '''A test suite reserves resources for a number of tests.
+ Each test requires a specific number of modems, BTSs etc., which are
+ reserved beforehand by a test suite. This way several test suites can be
+ scheduled dynamically without resource conflicts arising halfway through
+ the tests.'''
+
+ CONF_FILENAME = 'suite.conf'
+
+ def __init__(self, suite_dir):
+ self.suite_dir = suite_dir
+ super().__init__(log.C_CNF, os.path.basename(self.suite_dir))
+ self.read_conf()
+
+ def read_conf(self):
+ self.dbg('reading %s' % SuiteDefinition.CONF_FILENAME)
+ if not os.path.isdir(self.suite_dir):
+ raise RuntimeError('No such directory: %r' % self.suite_dir)
+ self.conf = config.read(os.path.join(self.suite_dir,
+ SuiteDefinition.CONF_FILENAME),
+ resource.CONF_SCHEMA)
+ self.load_test_basenames()
+
+ def load_test_basenames(self):
+ self.test_basenames = []
+ for basename in sorted(os.listdir(self.suite_dir)):
+ if not basename.endswith('.py'):
+ continue
+ self.test_basenames.append(basename)
+
+class SuiteRun(log.Origin):
+ UNKNOWN = 'UNKNOWN'
+ PASS = 'PASS'
+ FAIL = 'FAIL'
+
+ def __init__(self, trial, suite_scenario_str, suite_definition, scenarios=[]):
+ super().__init__(log.C_TST, suite_scenario_str)
+ self.start_timestamp = None
+ self.duration = None
+ self.reserved_resources = None
+ self.objects_to_clean_up = None
+ self.test_import_modules_to_clean_up = []
+ self._resource_requirements = None
+ self._resource_modifiers = None
+ self._config = None
+ self._processes = None
+ self._run_dir = None
+ self.trial = trial
+ self.definition = suite_definition
+ self.scenarios = scenarios
+ self.resources_pool = resource.ResourcesPool()
+ self.status = SuiteRun.UNKNOWN
+ self.load_tests()
+
+ def load_tests(self):
+ self.tests = []
+ for test_basename in self.definition.test_basenames:
+ self.tests.append(test.Test(self, test_basename))
+
+ def register_for_cleanup(self, *obj):
+ assert all([hasattr(o, 'cleanup') for o in obj])
+ self.objects_to_clean_up = self.objects_to_clean_up or []
+ self.objects_to_clean_up.extend(obj)
+
+ def objects_cleanup(self):
+ while self.objects_to_clean_up:
+ obj = self.objects_to_clean_up.pop()
+ try:
+ obj.cleanup()
+ except Exception:
+ log.log_exn()
+
+ def test_import_modules_register_for_cleanup(self, mod):
+ '''
+ Tests are required to call this API for any module loaded from its own
+ lib subdir, because they are loaded in the global namespace. Otherwise
+ later tests importing modules with the same name will re-use an already
+ loaded module.
+ '''
+ if mod not in self.test_import_modules_to_clean_up:
+ self.dbg('registering module %r for cleanup' % mod)
+ self.test_import_modules_to_clean_up.append(mod)
+
+ def test_import_modules_cleanup(self):
+ while self.test_import_modules_to_clean_up:
+ mod = self.test_import_modules_to_clean_up.pop()
+ try:
+ self.dbg('Cleaning up module %r' % mod)
+ del sys.modules[mod.__name__]
+ del mod
+ except Exception:
+ log.log_exn()
+
+ def mark_start(self):
+ self.start_timestamp = time.time()
+ self.duration = 0
+ self.status = SuiteRun.UNKNOWN
+
+ def combined(self, conf_name, replicate_times=True):
+ log.dbg(combining=conf_name)
+ log.ctx(combining_scenarios=conf_name)
+ combination = self.definition.conf.get(conf_name, {})
+ if replicate_times:
+ combination = config.replicate_times(combination)
+ log.dbg(definition_conf=combination)
+ for scenario in self.scenarios:
+ log.ctx(combining_scenarios=conf_name, scenario=scenario.name())
+ c = scenario.get(conf_name, {})
+ if replicate_times:
+ c = config.replicate_times(c)
+ log.dbg(scenario=scenario.name(), conf=c)
+ if c is None:
+ continue
+ config.combine(combination, c)
+ return combination
+
+ def get_run_dir(self):
+ if self._run_dir is None:
+ self._run_dir = util.Dir(self.trial.get_run_dir().new_dir(self.name()))
+ return self._run_dir
+
+ def get_test_run_dir(self):
+ if self.current_test:
+ return self.current_test.get_run_dir()
+ return self.get_run_dir()
+
+ def resource_requirements(self):
+ if self._resource_requirements is None:
+ self._resource_requirements = self.combined('resources')
+ return self._resource_requirements
+
+ def resource_modifiers(self):
+ if self._resource_modifiers is None:
+ self._resource_modifiers = self.combined('modifiers')
+ return self._resource_modifiers
+
+ def config(self):
+ if self._config is None:
+ self._config = self.combined('config', False)
+ return self._config
+
+ def reserve_resources(self):
+ if self.reserved_resources:
+ raise RuntimeError('Attempt to reserve resources twice for a SuiteRun')
+ self.log('reserving resources in', self.resources_pool.state_dir, '...')
+ self.reserved_resources = self.resources_pool.reserve(self, self.resource_requirements(), self.resource_modifiers())
+
+ def run_tests(self, names=None):
+ suite_libdir = os.path.join(self.definition.suite_dir, 'lib')
+ try:
+ log.large_separator(self.trial.name(), self.name(), sublevel=2)
+ self.mark_start()
+ util.import_path_prepend(suite_libdir)
+ MainLoop.register_poll_func(self.poll)
+ if not self.reserved_resources:
+ self.reserve_resources()
+ for t in self.tests:
+ if names and not t.name() in names:
+ t.set_skip()
+ continue
+ self.current_test = t
+ t.run()
+ self.stop_processes()
+ self.objects_cleanup()
+ self.reserved_resources.put_all()
+ except Exception:
+ log.log_exn()
+ except BaseException as e:
+ # when the program is aborted by a signal (like Ctrl-C), escalate to abort all.
+ self.err('SUITE RUN ABORTED: %s' % type(e).__name__)
+ raise
+ finally:
+ # if sys.exit() called from signal handler (e.g. SIGINT), SystemExit
+ # base exception is raised. Make sure to stop processes in this
+ # finally section. Resources are automatically freed with 'atexit'.
+ self.stop_processes()
+ self.objects_cleanup()
+ self.free_resources()
+ MainLoop.unregister_poll_func(self.poll)
+ self.test_import_modules_cleanup()
+ util.import_path_remove(suite_libdir)
+ self.duration = time.time() - self.start_timestamp
+
+ passed, skipped, failed = self.count_test_results()
+ # if no tests ran, count it as failure
+ if passed and not failed:
+ self.status = SuiteRun.PASS
+ else:
+ self.status = SuiteRun.FAIL
+
+ log.large_separator(self.trial.name(), self.name(), self.status, sublevel=2, space_above=False)
+
+ def passed(self):
+ return self.status == SuiteRun.PASS
+
+ def count_test_results(self):
+ passed = 0
+ skipped = 0
+ failed = 0
+ for t in self.tests:
+ if t.status == test.Test.PASS:
+ passed += 1
+ elif t.status == test.Test.FAIL:
+ failed += 1
+ else:
+ skipped += 1
+ return (passed, skipped, failed)
+
+ def remember_to_stop(self, process, respawn=False):
+ '''Ask suite to monitor and manage lifecycle of the Process object. If a
+ process managed by suite finishes before cleanup time, the current test
+ will be marked as FAIL and end immediatelly. If respwan=True, then suite
+ will respawn() the process instead.'''
+ if self._processes is None:
+ self._processes = []
+ self._processes.insert(0, (process, respawn))
+
+ def stop_processes(self):
+ while self._processes:
+ process, respawn = self._processes.pop()
+ process.terminate()
+
+ def stop_process(self, process):
+ 'Remove process from monitored list and stop it'
+ for proc_respawn in self._processes:
+ proc, respawn = proc_respawn
+ if proc == process:
+ self._processes.remove(proc_respawn)
+ proc.terminate()
+
+ def free_resources(self):
+ if self.reserved_resources is None:
+ return
+ self.reserved_resources.free()
+
+ def ip_address(self, specifics=None):
+ return self.reserved_resources.get(resource.R_IP_ADDRESS, specifics=specifics)
+
+ def nitb(self, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_nitb.OsmoNitb(self, ip_address)
+
+ def hlr(self, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_hlr.OsmoHlr(self, ip_address)
+
+ def ggsn(self, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_ggsn.OsmoGgsn(self, ip_address)
+
+ def sgsn(self, hlr, ggsn, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_sgsn.OsmoSgsn(self, hlr, ggsn, ip_address)
+
+ def mgcpgw(self, ip_address=None, bts_ip=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_mgcpgw.OsmoMgcpgw(self, ip_address, bts_ip)
+
+ def mgw(self, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_mgw.OsmoMgw(self, ip_address)
+
+ def msc(self, hlr, mgcpgw, stp, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_msc.OsmoMsc(self, hlr, mgcpgw, stp, ip_address)
+
+ def bsc(self, msc, mgw, stp, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_bsc.OsmoBsc(self, msc, mgw, stp, ip_address)
+
+ def stp(self, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ return osmo_stp.OsmoStp(self, ip_address)
+
+ def ms_driver(self):
+ ms = ms_driver.MsDriver(self)
+ self.register_for_cleanup(ms)
+ return ms
+
+ def bts(self, specifics=None):
+ bts = bts_obj(self, self.reserved_resources.get(resource.R_BTS, specifics=specifics))
+ bts.set_lac(self.lac())
+ bts.set_rac(self.rac())
+ bts.set_cellid(self.cellid())
+ bts.set_bvci(self.bvci())
+ self.register_for_cleanup(bts)
+ return bts
+
+ def modem(self, specifics=None):
+ conf = self.reserved_resources.get(resource.R_MODEM, specifics=specifics)
+ self.dbg('create Modem object', conf=conf)
+ ms = modem.Modem(self, conf)
+ self.register_for_cleanup(ms)
+ return ms
+
+ def modems(self, count):
+ l = []
+ for i in range(count):
+ l.append(self.modem())
+ return l
+
+ def esme(self):
+ esme_obj = esme.Esme(self.msisdn())
+ self.register_for_cleanup(esme_obj)
+ return esme_obj
+
+ def osmocon(self, specifics=None):
+ conf = self.reserved_resources.get(resource.R_OSMOCON, specifics=specifics)
+ osmocon_obj = osmocon.Osmocon(self, conf=conf)
+ self.register_for_cleanup(osmocon_obj)
+ return osmocon_obj
+
+ def iperf3srv(self, ip_address=None):
+ if ip_address is None:
+ ip_address = self.ip_address()
+ iperf3srv_obj = iperf3.IPerf3Server(self, ip_address)
+ return iperf3srv_obj
+
+ def msisdn(self):
+ msisdn = self.resources_pool.next_msisdn(self)
+ self.log('using MSISDN', msisdn)
+ return msisdn
+
+ def lac(self):
+ lac = self.resources_pool.next_lac(self)
+ self.log('using LAC', lac)
+ return lac
+
+ def rac(self):
+ rac = self.resources_pool.next_rac(self)
+ self.log('using RAC', rac)
+ return rac
+
+ def cellid(self):
+ cellid = self.resources_pool.next_cellid(self)
+ self.log('using CellId', cellid)
+ return cellid
+
+ def bvci(self):
+ bvci = self.resources_pool.next_bvci(self)
+ self.log('using BVCI', bvci)
+ return bvci
+
+ def poll(self):
+ if self._processes:
+ for process, respawn in self._processes:
+ if process.terminated():
+ if respawn == True:
+ process.respawn()
+ else:
+ process.log_stdout_tail()
+ process.log_stderr_tail()
+ log.ctx(process)
+ raise log.Error('Process ended prematurely: %s' % process.name())
+
+ def prompt(self, *msgs, **msg_details):
+ 'ask for user interaction. Do not use in tests that should run automatically!'
+ if msg_details:
+ msgs = list(msgs)
+ msgs.append('{%s}' %
+ (', '.join(['%s=%r' % (k,v)
+ for k,v in sorted(msg_details.items())])))
+ msg = ' '.join(msgs) or 'Hit Enter to continue'
+ self.log('prompt:', msg)
+ sys.__stdout__.write('\n\n--- PROMPT ---\n')
+ sys.__stdout__.write(msg)
+ sys.__stdout__.write('\n')
+ sys.__stdout__.flush()
+ entered = util.input_polling('> ', MainLoop.poll)
+ self.log('prompt entered:', repr(entered))
+ return entered
+
+ def resource_status_str(self):
+ return '\n'.join(('',
+ 'SUITE RUN: %s' % self.origin_id(),
+ 'ASKED FOR:', pprint.pformat(self._resource_requirements),
+ 'RESERVED COUNT:', pprint.pformat(self.reserved_resources.counts()),
+ 'RESOURCES STATE:', repr(self.reserved_resources)))
+
+loaded_suite_definitions = {}
+
+def load(suite_name):
+ global loaded_suite_definitions
+
+ suite = loaded_suite_definitions.get(suite_name)
+ if suite is not None:
+ return suite
+
+ suites_dir = config.get_suites_dir()
+ suite_dir = suites_dir.child(suite_name)
+ if not suites_dir.exists(suite_name):
+ raise RuntimeError('Suite not found: %r in %r' % (suite_name, suites_dir))
+ if not suites_dir.isdir(suite_name):
+ raise RuntimeError('Suite name found, but not a directory: %r' % (suite_dir))
+
+ suite_def = SuiteDefinition(suite_dir)
+ loaded_suite_definitions[suite_name] = suite_def
+ return suite_def
+
+def parse_suite_scenario_str(suite_scenario_str):
+ tokens = suite_scenario_str.split(':')
+ if len(tokens) > 2:
+ raise RuntimeError('invalid combination string: %r' % suite_scenario_str)
+
+ suite_name = tokens[0]
+ if len(tokens) <= 1:
+ scenario_names = []
+ else:
+ scenario_names = tokens[1].split('+')
+
+ return suite_name, scenario_names
+
+def load_suite_scenario_str(suite_scenario_str):
+ suite_name, scenario_names = parse_suite_scenario_str(suite_scenario_str)
+ suite = load(suite_name)
+ scenarios = [config.get_scenario(scenario_name, resource.CONF_SCHEMA) for scenario_name in scenario_names]
+ return (suite_scenario_str, suite, scenarios)
+
+def bts_obj(suite_run, conf):
+ bts_type = conf.get('type')
+ log.dbg('create BTS object', type=bts_type)
+ bts_class = resource.KNOWN_BTS_TYPES.get(bts_type)
+ if bts_class is None:
+ raise RuntimeError('No such BTS type is defined: %r' % bts_type)
+ return bts_class(suite_run, conf)
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/template.py b/src/osmo_gsm_tester/template.py
new file mode 100644
index 0000000..0ddfc84
--- /dev/null
+++ b/src/osmo_gsm_tester/template.py
@@ -0,0 +1,58 @@
+# osmo_gsm_tester: automated cellular network hardware tests
+# Proxy to templating engine to handle files
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, sys
+from mako.template import Template
+from mako.lookup import TemplateLookup
+
+from . import log
+from .util import dict2obj
+
+_lookup = None
+_logger = log.Origin(log.C_CNF, 'no templates dir set')
+
+def set_templates_dir(*templates_dirs):
+ global _lookup
+ global _logger
+ if not templates_dirs:
+ # default templates dir is relative to this source file
+ templates_dirs = [os.path.join(os.path.dirname(__file__), 'templates')]
+ for d in templates_dirs:
+ if not os.path.isdir(d):
+ raise RuntimeError('templates dir is not a dir: %r'
+ % os.path.abspath(d))
+ _lookup = TemplateLookup(directories=templates_dirs)
+ _logger = log.Origin(log.C_CNF, 'Templates')
+
+def render(name, values):
+ '''feed values dict into template and return rendered result.
+ ".tmpl" is added to the name to look it up in the templates dir.'''
+ global _lookup
+ if _lookup is None:
+ set_templates_dir()
+ tmpl_name = name + '.tmpl'
+ log.ctx(tmpl_name)
+ template = _lookup.get_template(tmpl_name)
+ _logger.dbg('rendering', tmpl_name)
+
+ line_info_name = tmpl_name.replace('-', '_').replace('.', '_')
+ return template.render(**dict2obj(values))
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/templates/osmo-bsc.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bsc.cfg.tmpl
new file mode 100644
index 0000000..53e683b
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-bsc.cfg.tmpl
@@ -0,0 +1,122 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+line vty
+ no login
+ bind ${bsc.ip_address.addr}
+ctrl
+ bind ${bsc.ip_address.addr}
+e1_input
+ e1_line 0 driver ipa
+ ipa bind ${bsc.ip_address.addr}
+cs7 instance 1
+ point-code 0.0.2
+ asp asp0 2905 0 m3ua
+ remote-ip ${stp.ip_address.addr}
+ as as0 m3ua
+ asp asp0
+ routing-key 2 0.0.2
+ sccp-address bsc_local
+ point-code 0.0.2
+ routing-indicator PC
+ sccp-address msc_remote
+ point-code 0.0.1
+ routing-indicator PC
+network
+ network country code ${bsc.net.mcc}
+ mobile network code ${bsc.net.mnc}
+ encryption ${bsc.net.encryption}
+ neci 1
+ handover 0
+ handover window rxlev averaging 10
+ handover window rxqual averaging 1
+ handover window rxlev neighbor averaging 10
+ handover power budget interval 6
+ handover power budget hysteresis 3
+ handover maximum distance 9999
+%for bts in bsc.net.bts_list:
+ bts ${loop.index}
+ type ${bts.osmobsc_bts_type}
+ band ${bts.band}
+ cell_identity ${bts.cell_identity}
+ location_area_code ${bts.location_area_code}
+ training_sequence_code 7
+ base_station_id_code ${bts.base_station_id_code}
+ ms max power 33
+ cell reselection hysteresis 4
+ rxlev access min 0
+ channel allocator ${bts.channel_allocator}
+ rach tx integer 9
+ rach max transmission 7
+% if bsc.net.get('rsl_ip', False):
+ ip.access rsl-ip ${bsc.net.rsl_ip}
+% endif
+ ip.access unit_id ${bts.ipa_unit_id} 0
+ oml ip.access stream_id ${bts.stream_id} line 0
+% if bts.get('sgsn', False) and bts['gprs_mode'] != 'none':
+ gprs mode ${bts.gprs_mode}
+ gprs routing area ${bts.routing_area_code}
+ gprs network-control-order nc1
+ gprs cell bvci ${bts.bvci}
+ gprs cell timer blocking-timer 3
+ gprs cell timer blocking-retries 3
+ gprs cell timer unblocking-retries 3
+ gprs cell timer reset-timer 3
+ gprs cell timer reset-retries 3
+ gprs cell timer suspend-timer 10
+ gprs cell timer suspend-retries 3
+ gprs cell timer resume-timer 10
+ gprs cell timer resume-retries 3
+ gprs cell timer capability-update-timer 10
+ gprs cell timer capability-update-retries 3
+ gprs nsei ${bts.bvci}
+ gprs ns timer tns-block 3
+ gprs ns timer tns-block-retries 3
+ gprs ns timer tns-reset 3
+ gprs ns timer tns-reset-retries 3
+ gprs ns timer tns-test 30
+ gprs ns timer tns-alive 3
+ gprs ns timer tns-alive-retries 10
+ gprs nsvc 0 nsvci ${bts.bvci}
+ gprs nsvc 0 local udp port 23020
+ gprs nsvc 0 remote udp port 23000
+ gprs nsvc 0 remote ip ${bts.sgsn.ip_address.addr}
+% else:
+ gprs mode none
+% endif
+% for trx in bts.trx_list:
+ trx ${loop.index}
+ rf_locked 0
+ arfcn ${trx.arfcn}
+ nominal power ${trx.nominal_power}
+ max_power_red ${trx.max_power_red}
+ rsl e1 tei 0
+% for ts in trx.timeslot_list:
+ timeslot ${loop.index}
+ phys_chan_config ${ts.phys_chan_config}
+% endfor
+% endfor
+%endfor
+msc
+ core-mobile-country-code ${bsc.net.mcc}
+ core-mobile-network-code ${bsc.net.mnc}
+ ip.access rtp-base 25000
+ mgw remote-ip ${mgw.ip_address.addr}
+ mgw remote-port 2427
+ mgw local-ip ${bsc.ip_address.addr}
+ mgw endpoint-range 1 31
+ codec-list ${' '.join(bsc.net.codec_list)}
+ amr-config 12_2k forbidden
+ amr-config 10_2k forbidden
+ amr-config 7_95k forbidden
+ amr-config 7_40k forbidden
+ amr-config 6_70k forbidden
+ amr-config 5_90k allowed
+ amr-config 5_15k forbidden
+ amr-config 4_75k forbidden
+ msc-addr msc_remote
+ bsc-addr bsc_local
diff --git a/src/osmo_gsm_tester/templates/osmo-bts-octphy.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bts-octphy.cfg.tmpl
new file mode 100644
index 0000000..2a1e2d7
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-bts-octphy.cfg.tmpl
@@ -0,0 +1,54 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging color 1
+ logging print extended-timestamp 1
+ logging print category 1
+ logging level abis debug
+ logging level oml debug
+ logging level pag debug
+ logging level rll debug
+ logging level rr debug
+ logging level rsl debug
+ ! Level required by ready_for_pcu(): pcu info
+ logging level pcu info
+!
+line vty
+ bind ${osmo_bts_octphy.addr}
+ctrl
+ bind ${osmo_bts_octphy.addr}
+!
+%for phy in osmo_bts_octphy.phy_list:
+phy ${loop.index}
+ octphy hw-addr ${phy.hw_addr}
+ octphy net-device ${phy.net_device}
+ octphy rx-gain 70
+ %for inst in range(phy.num_instances):
+ instance ${loop.index}
+ %endfor
+%endfor
+bts 0
+ band ${osmo_bts_octphy.band}
+ ipa unit-id ${osmo_bts_octphy.ipa_unit_id} 0
+ oml remote-ip ${osmo_bts_octphy.oml_remote_ip}
+ pcu-socket ${osmo_bts_octphy.pcu_socket_path}
+ gsmtap-sapi bcch
+ gsmtap-sapi ccch
+ gsmtap-sapi rach
+ gsmtap-sapi agch
+ gsmtap-sapi pch
+ gsmtap-sapi sdcch
+ gsmtap-sapi tch/f
+ gsmtap-sapi tch/h
+ gsmtap-sapi pacch
+ gsmtap-sapi pdtch
+ gsmtap-sapi ptcch
+ gsmtap-sapi cbch
+ gsmtap-sapi sacch
+%for trx in osmo_bts_octphy.trx_list:
+ trx ${loop.index}
+ power-ramp max-initial 23000 mdBm
+ power-ramp step-size 2000 mdB
+ power-ramp step-interval 1
+ ms-power-control dsp
+ phy ${trx.phy_idx} instance ${trx.instance_idx}
+%endfor
diff --git a/src/osmo_gsm_tester/templates/osmo-bts-sysmo.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bts-sysmo.cfg.tmpl
new file mode 100644
index 0000000..536e5cb
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-bts-sysmo.cfg.tmpl
@@ -0,0 +1,41 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging color 1
+ logging print extended-timestamp 1
+ logging print category 1
+ logging level abis debug
+ logging level oml debug
+ logging level pag debug
+ logging level rll debug
+ logging level rr debug
+ logging level rsl debug
+ ! Level required by ready_for_pcu(): pcu info
+ logging level pcu info
+!
+line vty
+ bind ${osmo_bts_sysmo.addr}
+ctrl
+ bind ${osmo_bts_sysmo.addr}
+!
+phy 0
+ instance 0
+bts 0
+ band ${osmo_bts_sysmo.band}
+ ipa unit-id ${osmo_bts_sysmo.ipa_unit_id} 0
+ oml remote-ip ${osmo_bts_sysmo.oml_remote_ip}
+ pcu-socket ${osmo_bts_sysmo.pcu_socket_path}
+ gsmtap-sapi bcch
+ gsmtap-sapi ccch
+ gsmtap-sapi rach
+ gsmtap-sapi agch
+ gsmtap-sapi pch
+ gsmtap-sapi sdcch
+ gsmtap-sapi tch/f
+ gsmtap-sapi tch/h
+ gsmtap-sapi pacch
+ gsmtap-sapi pdtch
+ gsmtap-sapi ptcch
+ gsmtap-sapi cbch
+ gsmtap-sapi sacch
+ trx 0
+ phy 0 instance 0
diff --git a/src/osmo_gsm_tester/templates/osmo-bts-trx.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-bts-trx.cfg.tmpl
new file mode 100644
index 0000000..677b047
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-bts-trx.cfg.tmpl
@@ -0,0 +1,52 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging color 1
+ logging print extended-timestamp 1
+ logging print category 1
+ logging level abis debug
+ logging level oml debug
+ logging level pag debug
+ logging level rll debug
+ logging level rr debug
+ logging level rsl debug
+ logging level l1c info
+ logging level l1p error
+ logging level trx info
+ ! Level required by ready_for_pcu(): pcu info
+ logging level pcu info
+!
+line vty
+ bind ${osmo_bts_trx.addr}
+ctrl
+ bind ${osmo_bts_trx.addr}
+!
+phy 0
+ osmotrx ip local ${osmo_bts_trx.osmo_trx.bts_ip}
+ osmotrx ip remote ${osmo_bts_trx.osmo_trx.trx_ip}
+%for chan in osmo_bts_trx.osmo_trx.channels:
+ instance ${loop.index}
+ osmotrx rx-gain 25
+ osmotrx tx-attenuation oml
+%endfor
+bts 0
+ band ${osmo_bts_trx.band}
+ ipa unit-id ${osmo_bts_trx.ipa_unit_id} 0
+ oml remote-ip ${osmo_bts_trx.oml_remote_ip}
+ pcu-socket ${osmo_bts_trx.pcu_socket_path}
+ gsmtap-sapi bcch
+ gsmtap-sapi ccch
+ gsmtap-sapi rach
+ gsmtap-sapi agch
+ gsmtap-sapi pch
+ gsmtap-sapi sdcch
+ gsmtap-sapi tch/f
+ gsmtap-sapi tch/h
+ gsmtap-sapi pacch
+ gsmtap-sapi pdtch
+ gsmtap-sapi ptcch
+ gsmtap-sapi cbch
+ gsmtap-sapi sacch
+%for chan in osmo_bts_trx.osmo_trx.channels:
+ trx ${loop.index}
+ phy 0 instance ${loop.index}
+%endfor
diff --git a/src/osmo_gsm_tester/templates/osmo-ggsn.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-ggsn.cfg.tmpl
new file mode 100644
index 0000000..782f1e8
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-ggsn.cfg.tmpl
@@ -0,0 +1,78 @@
+!
+! OpenGGSN (0.94.1-adac) configuration saved from vty
+!!
+!
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level ip info
+ logging level tun info
+ logging level ggsn info
+ logging level sgsn notice
+ logging level icmp6 notice
+ logging level lglobal notice
+ logging level llapd notice
+ logging level linp notice
+ logging level lmux notice
+ logging level lmi notice
+ logging level lmib notice
+ logging level lsms notice
+ logging level lctrl notice
+ logging level lgtp info
+ logging level lstats notice
+ logging level lgsup notice
+ logging level loap notice
+ logging level lss7 notice
+ logging level lsccp notice
+ logging level lsua notice
+ logging level lm3ua notice
+ logging level lmgcp notice
+ logging level set-all debug
+!
+stats interval 5
+!
+line vty
+ bind ${ggsn.ip_address.addr}
+ctrl
+ bind ${ggsn.ip_address.addr}
+!
+ggsn ggsn0
+ gtp state-dir ${ggsn.statedir}
+ gtp bind-ip ${ggsn.ip_address.addr}
+ apn internet
+ gtpu-mode tun
+ tun-device tun4
+ type-support v4
+ ip prefix dynamic 176.16.222.0/24
+ ip dns 0 8.8.8.8
+ ip dns 1 8.8.8.4
+ ip ifconfig 176.16.222.1/24
+ no shutdown
+ apn inet6
+ gtpu-mode tun
+ tun-device tun6
+ type-support v6
+ ipv6 prefix dynamic fde4:8dba:82e1:2000:0:0:0:0/56
+ ipv6 dns 0 2001:4860:4860::8888
+ ipv6 dns 1 2001:4860:4860::8844
+ ipv6 ifconfig fde4:8dba:82e1:2000:0:0:0:0/56
+ ipv6 link-local fe80::1111:1111:1111:1111/64
+ no shutdown
+ apn inet46
+ gtpu-mode tun
+ tun-device tun46
+ type-support v4v6
+ ip prefix dynamic 176.16.46.0/24
+ ip dns 0 192.168.100.1
+ ip dns 1 8.8.8.8
+ ip ifconfig 176.16.46.0/24
+ ipv6 prefix dynamic fde4:8dba:82e1:2000:0:0:0:0/56
+ ipv6 dns 0 2001:4860:4860::8888
+ ipv6 dns 1 2001:4860:4860::8844
+ ipv6 ifconfig fde4:8dba:82e1:2000:0:0:0:0/56
+ ipv6 link-local fe80::1111:1111:1111:1112/64
+ no shutdown
+ default-apn internet
+ no shutdown ggsn
diff --git a/src/osmo_gsm_tester/templates/osmo-hlr.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-hlr.cfg.tmpl
new file mode 100644
index 0000000..b573620
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-hlr.cfg.tmpl
@@ -0,0 +1,16 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+line vty
+ no login
+ bind ${hlr.ip_address.addr}
+ctrl
+ bind ${hlr.ip_address.addr}
+hlr
+ gsup
+ bind ip ${hlr.ip_address.addr}
+ ussd route prefix *#100# internal own-msisdn
diff --git a/src/osmo_gsm_tester/templates/osmo-mgcpgw.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-mgcpgw.cfg.tmpl
new file mode 100644
index 0000000..970d53c
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-mgcpgw.cfg.tmpl
@@ -0,0 +1,21 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+line vty
+ no login
+ bind ${mgcpgw.ip_address.addr}
+mgcp
+ local ip ${mgcpgw.ip_address.addr}
+ bts ip ${mgcpgw.bts_ip}
+ bind ip ${mgcpgw.ip_address.addr}
+ bind port 2427
+ rtp base 4000
+ rtp force-ptime 20
+ sdp audio payload number 98
+ sdp audio payload name AMR/8000
+ number endpoints 31
+ no rtcp-omit
diff --git a/src/osmo_gsm_tester/templates/osmo-mgw.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-mgw.cfg.tmpl
new file mode 100644
index 0000000..9e338e1
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-mgw.cfg.tmpl
@@ -0,0 +1,16 @@
+!
+! MGCP configuration example
+!
+line vty
+ no login
+ bind ${mgw.ip_address.addr}
+mgcp
+ bind ip ${mgw.ip_address.addr}
+ bind port 2427
+ rtp net-range 4002 16000
+ rtp force-ptime 20
+ sdp audio payload number 98
+ sdp audio payload name AMR/8000
+ number endpoints 31
+ rtcp-omit
+ rtp-accept-all 1
diff --git a/src/osmo_gsm_tester/templates/osmo-mobile-lu.lua.tmpl b/src/osmo_gsm_tester/templates/osmo-mobile-lu.lua.tmpl
new file mode 100644
index 0000000..c25d799
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-mobile-lu.lua.tmpl
@@ -0,0 +1,18 @@
+package.path = '${test.lua_support}/?.lua;' .. package.path
+event = require('ms_support')
+send = 1
+
+function mm_cb(new_state, new_substate, old_substate)
+ if new_state == 19 and new_substate == 1 and send == 1 then
+ send = 0
+ event.send({lu_done=1})
+ end
+end
+
+local cbs = {
+ Mm=mm_cb
+}
+osmo.ms():register(cbs)
+osmo.ms().start()
+
+event.register(osmo.ms():number(), "${test.event_path}")
diff --git a/src/osmo_gsm_tester/templates/osmo-mobile.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-mobile.cfg.tmpl
new file mode 100644
index 0000000..395d6f4
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-mobile.cfg.tmpl
@@ -0,0 +1,51 @@
+no gps enable
+no hide-default
+ms ${test.ms_number}
+ layer2-socket ${test.virt_phy}
+ sim test
+ network-selection-mode auto
+ imei 000000000000000 0
+ imei-fixed
+ no emergency-imsi
+ no sms-service-center
+ no call-waiting
+ no auto-answer
+ no force-rekey
+ no clip
+ no clir
+ tx-power auto
+ no simulated-delay
+ no stick
+ location-updating
+ neighbour-measurement
+ codec full-speed prefer
+ codec half-speed
+ no abbrev
+ c7-any-timeout 0
+ no sms-store
+ support
+ sms
+ a5/1
+ a5/2
+ p-gsm
+ e-gsm
+ r-gsm
+ no gsm-850
+ dcs
+ no pcs
+ class-900 4
+ class-850 4
+ class-dcs 1
+ class-pcs 1
+ channel-capability sdcch+tchf+tchh
+ full-speech-v1
+ full-speech-v2
+ half-speech-v1
+ min-rxlev -106
+ dsc-max 90
+ no skip-max-per-band
+ test-sim
+ imsi ${test.imsi}
+ ki comp128 ${test.ki_comp128}
+ no barred-access
+ lua-script ${test.script}
diff --git a/src/osmo_gsm_tester/templates/osmo-msc.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-msc.cfg.tmpl
new file mode 100644
index 0000000..b370d5e
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-msc.cfg.tmpl
@@ -0,0 +1,50 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+line vty
+ no login
+ bind ${msc.ip_address.addr}
+network
+ network country code ${msc.net.mcc}
+ mobile network code ${msc.net.mnc}
+ short name ${msc.net.short_name}
+ long name ${msc.net.long_name}
+ encryption ${msc.net.encryption}
+ authentication ${msc.net.authentication}
+cs7 instance 0
+ point-code 0.0.1
+ asp asp0 2905 0 m3ua
+ remote-ip ${stp.ip_address.addr}
+ as as0 m3ua
+ asp asp0
+ routing-key 1 0.0.1
+msc
+ mgw remote-ip ${mgw.ip_address.addr}
+ mgw remote-port 2427
+ mgw local-ip ${msc.ip_address.addr}
+ mgw endpoint-range 1 31
+ mgw bts-base 8000
+ assign-tmsi
+ cs7-instance-iu 0
+ cs7-instance-a 0
+ctrl
+ bind ${msc.ip_address.addr}
+smpp
+ local-tcp-ip ${msc.ip_address.addr} 2775
+ system-id test-msc
+ policy ${smsc.policy}
+%for esme in smsc.esme_list:
+ esme ${esme.system_id}
+% if esme.password == '':
+ no password
+% else:
+ password ${esme.password}
+% endif
+ default-route
+%endfor
+hlr
+ remote-ip ${hlr.ip_address.addr}
diff --git a/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl
new file mode 100644
index 0000000..ee7381f
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-nitb.cfg.tmpl
@@ -0,0 +1,110 @@
+! Configuration rendered by osmo-gsm-tester
+password foo
+!
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+!
+line vty
+ no login
+ bind ${nitb.ip_address.addr}
+!
+e1_input
+ e1_line 0 driver ipa
+ ipa bind ${nitb.ip_address.addr}
+network
+ network country code ${nitb.net.mcc}
+ mobile network code ${nitb.net.mnc}
+ short name ${nitb.net.short_name}
+ long name ${nitb.net.long_name}
+ auth policy ${nitb.net.auth_policy}
+ location updating reject cause 13
+ encryption ${nitb.net.encryption}
+ neci 1
+ rrlp mode none
+ mm info 1
+ handover 0
+ handover window rxlev averaging 10
+ handover window rxqual averaging 1
+ handover window rxlev neighbor averaging 10
+ handover power budget interval 6
+ handover power budget hysteresis 3
+ handover maximum distance 9999
+%for bts in nitb.net.bts_list:
+ bts ${loop.index}
+ type ${bts.osmobsc_bts_type}
+ band ${bts.band}
+ cell_identity ${bts.cell_identity}
+ location_area_code ${bts.location_area_code}
+ training_sequence_code 7
+ base_station_id_code ${bts.base_station_id_code}
+ ms max power 33
+ cell reselection hysteresis 4
+ rxlev access min 0
+ channel allocator ascending
+ rach tx integer 9
+ rach max transmission 7
+ ip.access unit_id ${bts.ipa_unit_id} 0
+ oml ip.access stream_id ${bts.stream_id} line 0
+% if bts.get('sgsn', False):
+ gprs mode gprs
+ gprs routing area ${bts.routing_area_code}
+ gprs network-control-order nc1
+ gprs cell bvci ${bts.bvci}
+ gprs cell timer blocking-timer 3
+ gprs cell timer blocking-retries 3
+ gprs cell timer unblocking-retries 3
+ gprs cell timer reset-timer 3
+ gprs cell timer reset-retries 3
+ gprs cell timer suspend-timer 10
+ gprs cell timer suspend-retries 3
+ gprs cell timer resume-timer 10
+ gprs cell timer resume-retries 3
+ gprs cell timer capability-update-timer 10
+ gprs cell timer capability-update-retries 3
+ gprs nsei ${bts.bvci}
+ gprs ns timer tns-block 3
+ gprs ns timer tns-block-retries 3
+ gprs ns timer tns-reset 3
+ gprs ns timer tns-reset-retries 3
+ gprs ns timer tns-test 30
+ gprs ns timer tns-alive 3
+ gprs ns timer tns-alive-retries 10
+ gprs nsvc 0 nsvci ${bts.bvci}
+ gprs nsvc 0 local udp port 23020
+ gprs nsvc 0 remote udp port 23000
+ gprs nsvc 0 remote ip ${bts.sgsn.ip_address.addr}
+% else:
+ gprs mode none
+% endif
+% for trx in bts.trx_list:
+ trx ${loop.index}
+ rf_locked 0
+ arfcn ${trx.arfcn}
+ nominal power ${trx.nominal_power}
+ max_power_red ${trx.max_power_red}
+ rsl e1 tei 0
+% for ts in trx.timeslot_list:
+ timeslot ${loop.index}
+ phys_chan_config ${ts.phys_chan_config}
+% endfor
+% endfor
+%endfor
+smpp
+ local-tcp-ip ${nitb.ip_address.addr} 2775
+ system-id test-nitb
+ policy ${smsc.policy}
+%for esme in smsc.esme_list:
+ esme ${esme.system_id}
+% if esme.password == '':
+ no password
+% else:
+ password ${esme.password}
+% endif
+ default-route
+%endfor
+ctrl
+ bind ${nitb.ip_address.addr}
diff --git a/src/osmo_gsm_tester/templates/osmo-pcu-sysmo.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-pcu-sysmo.cfg.tmpl
new file mode 100644
index 0000000..bcb3d7c
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-pcu-sysmo.cfg.tmpl
@@ -0,0 +1,12 @@
+log stderr
+ logging color 1
+ logging print extended-timestamp 1
+ logging print category 1
+ logging level set-all info
+pcu
+ pcu-socket ${osmo_pcu_sysmo.pcu_socket_path}
+ flow-control-interval 10
+ cs 2
+ alloc-algorithm dynamic
+ alpha 0
+ gamma 0
diff --git a/src/osmo_gsm_tester/templates/osmo-pcu.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-pcu.cfg.tmpl
new file mode 100644
index 0000000..da7425c
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-pcu.cfg.tmpl
@@ -0,0 +1,12 @@
+log stderr
+ logging color 1
+ logging print extended-timestamp 1
+ logging print category 1
+ logging level set-all info
+pcu
+ pcu-socket ${osmo_pcu.pcu_socket_path}
+ flow-control-interval 10
+ cs 2
+ alloc-algorithm dynamic
+ alpha 0
+ gamma 0
diff --git a/src/osmo_gsm_tester/templates/osmo-sgsn.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-sgsn.cfg.tmpl
new file mode 100644
index 0000000..a59b7cd
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-sgsn.cfg.tmpl
@@ -0,0 +1,34 @@
+!
+! Osmocom SGSN configuration
+!
+!
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+line vty
+ no login
+sgsn
+ gtp local-ip ${sgsn.ip_address.addr}
+ ggsn 0 remote-ip ${ggsn.ip_address.addr}
+ ggsn 0 gtp-version 1
+ auth-policy remote
+ gsup remote-ip ${hlr.ip_address.addr}
+ gsup remote-port 4222
+!
+ns
+ timer tns-block 3
+ timer tns-block-retries 3
+ timer tns-reset 3
+ timer tns-reset-retries 3
+ timer tns-test 30
+ timer tns-alive 3
+ timer tns-alive-retries 10
+ encapsulation udp local-ip ${sgsn.ip_address.addr}
+ encapsulation udp local-port 23000
+ encapsulation framerelay-gre enabled 0
+!
+bssgp
+!
diff --git a/src/osmo_gsm_tester/templates/osmo-stp.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-stp.cfg.tmpl
new file mode 100644
index 0000000..c180e82
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-stp.cfg.tmpl
@@ -0,0 +1,17 @@
+! Configuration rendered by osmo-gsm-tester
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging print extended-timestamp 1
+ logging level set-all debug
+line vty
+ no login
+ bind ${stp.ip_address.addr}
+!ctrl
+! bind ${stp.ip_address.addr}
+cs7 instance 0
+ xua rkm routing-key-allocation dynamic-permitted
+ listen m3ua 2905
+ accept-asp-connections dynamic-permitted
+ local-ip ${stp.ip_address.addr}
diff --git a/src/osmo_gsm_tester/templates/osmo-trx.cfg.tmpl b/src/osmo_gsm_tester/templates/osmo-trx.cfg.tmpl
new file mode 100644
index 0000000..edfc0ea
--- /dev/null
+++ b/src/osmo_gsm_tester/templates/osmo-trx.cfg.tmpl
@@ -0,0 +1,42 @@
+!
+! OsmoTRX example configuration
+!
+log stderr
+ logging filter all 1
+ logging color 1
+ logging print category 1
+ logging timestamp 1
+ logging print file basename
+ logging print extended-timestamp 1
+ logging level set-all info
+!
+line vty
+ bind ${osmo_trx.trx_ip}
+ctrl
+ bind ${osmo_trx.trx_ip}
+trx
+ bind-ip ${osmo_trx.trx_ip}
+ remote-ip ${osmo_trx.bts_ip}
+ base-port 5700
+ egprs ${osmo_trx.egprs}
+%if osmo_trx.get('multi_arfcn', False):
+ multi-arfcn enable
+%else:
+ multi-arfcn disable
+%endif
+%if osmo_trx.get('dev_args', False):
+ dev-args ${osmo_trx.dev_args}
+%endif
+ tx-sps 4
+ rx-sps 4
+ clock-ref ${osmo_trx.clock_reference}
+ rt-prio 18
+%for chan in osmo_trx.channels:
+ chan ${loop.index}
+% if chan.get('tx_path', False):
+ tx-path ${chan.tx_path}
+% endif
+% if chan.get('rx_path', False):
+ rx-path ${chan.rx_path}
+ %endif
+%endfor
diff --git a/src/osmo_gsm_tester/test.py b/src/osmo_gsm_tester/test.py
new file mode 100644
index 0000000..6f141f1
--- /dev/null
+++ b/src/osmo_gsm_tester/test.py
@@ -0,0 +1,116 @@
+# osmo_gsm_tester: test class
+#
+# Copyright (C) 2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Pau Espin Pedrol <pespin@sysmocom.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import time
+import traceback
+from . import testenv
+
+from . import log, util, resource
+
+class Test(log.Origin):
+ UNKNOWN = 'UNKNOWN'
+ SKIP = 'skip'
+ PASS = 'pass'
+ FAIL = 'FAIL'
+
+ def __init__(self, suite_run, test_basename):
+ self.basename = test_basename
+ super().__init__(log.C_TST, self.basename)
+ self._run_dir = None
+ self.suite_run = suite_run
+ self.path = os.path.join(self.suite_run.definition.suite_dir, self.basename)
+ self.status = Test.UNKNOWN
+ self.start_timestamp = 0
+ self.duration = 0
+ self.fail_type = None
+ self.fail_message = None
+
+ def get_run_dir(self):
+ if self._run_dir is None:
+ self._run_dir = util.Dir(self.suite_run.get_run_dir().new_dir(self._name))
+ return self._run_dir
+
+ def run(self):
+ try:
+ log.large_separator(self.suite_run.trial.name(), self.suite_run.name(), self.name(), sublevel=3)
+ self.status = Test.UNKNOWN
+ self.start_timestamp = time.time()
+ from . import suite, sms, process
+ from .event_loop import MainLoop
+ testenv.setup(self.suite_run, self, suite, MainLoop, sms, process)
+ with self.redirect_stdout():
+ util.run_python_file('%s.%s' % (self.suite_run.definition.name(), self.basename),
+ self.path)
+ if self.status == Test.UNKNOWN:
+ self.set_pass()
+ except Exception as e:
+ if hasattr(e, 'msg'):
+ msg = e.msg
+ else:
+ msg = str(e)
+ if isinstance(e, AssertionError):
+ # AssertionError lacks further information on what was
+ # asserted. Find the line where the code asserted:
+ msg += log.get_src_from_exc_info(sys.exc_info())
+ # add source file information to failure report
+ if hasattr(e, 'origins'):
+ msg += ' [%s]' % e.origins
+ tb_str = traceback.format_exc()
+ if isinstance(e, resource.NoResourceExn):
+ tb_str += self.suite_run.resource_status_str()
+ self.set_fail(type(e).__name__, msg, tb_str, log.get_src_from_exc_info())
+ except BaseException as e:
+ # when the program is aborted by a signal (like Ctrl-C), escalate to abort all.
+ self.err('TEST RUN ABORTED: %s' % type(e).__name__)
+ raise
+
+ def name(self):
+ l = log.get_line_for_src(self.path)
+ if l is not None:
+ return '%s:%s' % (self._name, l)
+ return super().name()
+
+ def set_fail(self, fail_type, fail_message, tb_str=None, src=4):
+ self.status = Test.FAIL
+ self.duration = time.time() - self.start_timestamp
+ self.fail_type = fail_type
+ self.fail_message = fail_message
+
+ if tb_str is None:
+ # populate an exception-less call to set_fail() with traceback info
+ tb_str = ''.join(traceback.format_stack()[:-1])
+
+ self.fail_tb = tb_str
+ self.err('%s: %s' % (self.fail_type, self.fail_message), _src=src)
+ if self.fail_tb:
+ self.log(self.fail_tb, _level=log.L_TRACEBACK)
+ self.log('Test FAILED (%.1f sec)' % self.duration)
+
+ def set_pass(self):
+ self.status = Test.PASS
+ self.duration = time.time() - self.start_timestamp
+ self.log('Test passed (%.1f sec)' % self.duration)
+
+ def set_skip(self):
+ self.status = Test.SKIP
+ self.duration = 0
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/testenv.py b/src/osmo_gsm_tester/testenv.py
new file mode 100644
index 0000000..ceea028
--- /dev/null
+++ b/src/osmo_gsm_tester/testenv.py
@@ -0,0 +1,57 @@
+# osmo_gsm_tester: context for individual test runs
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# These will be initialized before each test run.
+# A test script can thus establish its context by doing:
+# from osmo_gsm_tester.testenv import *
+trial = None
+suite = None
+test = None
+resources = None
+log = None
+dbg = None
+err = None
+wait = None
+wait_no_raise = None
+sleep = None
+poll = None
+prompt = None
+Timeout = None
+Sms = None
+process = None
+
+def setup(suite_run, _test, suite_module, event_module, sms_module, process_module):
+ global trial, suite, test, resources, log, dbg, err, wait, wait_no_raise, sleep, poll, prompt, Timeout, Sms, process
+ trial = suite_run.trial
+ suite = suite_run
+ test = _test
+ resources = suite_run.reserved_resources
+ log = test.log
+ dbg = test.dbg
+ err = test.err
+ wait = lambda *args, **kwargs: event_module.wait(suite_run, *args, **kwargs)
+ wait_no_raise = lambda *args, **kwargs: event_module.wait_no_raise(suite_run, *args, **kwargs)
+ sleep = lambda *args, **kwargs: event_module.sleep(suite_run, *args, **kwargs)
+ poll = event_module.poll
+ prompt = suite_run.prompt
+ Timeout = suite_module.Timeout
+ Sms = sms_module.Sms
+ process = process_module
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/trial.py b/src/osmo_gsm_tester/trial.py
new file mode 100644
index 0000000..149d34c
--- /dev/null
+++ b/src/osmo_gsm_tester/trial.py
@@ -0,0 +1,210 @@
+# osmo_gsm_tester: trial: directory of binaries to be tested
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import time
+import shutil
+import tarfile
+
+from . import log, util, suite, report
+
+FILE_MARK_TAKEN = 'taken'
+FILE_CHECKSUMS = 'checksums.md5'
+TIMESTAMP_FMT = '%Y-%m-%d_%H-%M-%S'
+FILE_LAST_RUN = 'last_run'
+FILE_LOG = 'log'
+FILE_LOG_BRIEF = 'log_brief'
+
+class Trial(log.Origin):
+ UNKNOWN = 'UNKNOWN'
+ PASS = 'PASS'
+ FAIL = 'FAIL'
+
+ @staticmethod
+ def next(trials_dir):
+
+ with trials_dir.lock('Trial.next'):
+ trials = [e for e in trials_dir.children()
+ if trials_dir.isdir(e) and not trials_dir.exists(e, FILE_MARK_TAKEN)]
+ if not trials:
+ return None
+ # sort by time to get the one that waited longest
+ trials.sort(key=lambda e: os.path.getmtime(trials_dir.child(e)))
+ next_trial = trials[0]
+ return Trial(trials_dir.child(next_trial)).take()
+
+ def __init__(self, trial_dir):
+ self.path = os.path.abspath(trial_dir)
+ super().__init__(log.C_TST, os.path.basename(self.path))
+ self.dir = util.Dir(self.path)
+ self.inst_dir = util.Dir(self.dir.child('inst'))
+ self.bin_tars = []
+ self.suites = []
+ self.status = Trial.UNKNOWN
+ self._run_dir = None
+ self.log_targets = None
+
+ def __repr__(self):
+ return self.name()
+
+ def __enter__(self):
+ '''add a log target to log to the run dir, write taken marker, log a
+ starting separator.'''
+ run_dir = self.get_run_dir()
+ detailed_log = run_dir.new_child(FILE_LOG)
+ self.log_targets = [
+ log.FileLogTarget(detailed_log)
+ .set_all_levels(log.L_DBG)
+ .style_change(trace=True),
+ log.FileLogTarget(run_dir.new_child(FILE_LOG_BRIEF))
+ .style_change(src=False, all_origins_on_levels=(log.L_ERR, log.L_TRACEBACK))
+ ]
+ log.large_separator(self.name(), sublevel=1)
+ self.log('Detailed log at', detailed_log)
+ self.take()
+ return self
+
+ def __exit__(self, *exc_info):
+ '''log a report, then remove log file targets for this trial'''
+ self.log_report()
+ for lt in self.log_targets:
+ lt.remove()
+ self.log_targets = None
+
+ def take(self):
+ self.dir.touch(FILE_MARK_TAKEN)
+ return self
+
+ def get_run_dir(self):
+ if self._run_dir is not None:
+ return self._run_dir
+ self._run_dir = util.Dir(self.dir.new_child('run.%s' % time.strftime(TIMESTAMP_FMT)))
+ self._run_dir.mkdir()
+
+ last_run = self.dir.child(FILE_LAST_RUN)
+ if os.path.islink(last_run):
+ os.remove(last_run)
+ if not os.path.exists(last_run):
+ os.symlink(self.dir.rel_path(self._run_dir.path), last_run)
+ return self._run_dir
+
+ def verify(self):
+ "verify checksums"
+
+ if not self.dir.exists():
+ raise RuntimeError('Trial dir does not exist: %r' % self.dir)
+ if not self.dir.isdir():
+ raise RuntimeError('Trial dir is not a dir: %r' % self.dir)
+
+ checksums = self.dir.child(FILE_CHECKSUMS)
+ if not self.dir.isfile(FILE_CHECKSUMS):
+ raise RuntimeError('No checksums file in trial dir: %r', checksums)
+
+ with open(checksums, 'r') as f:
+ line_nr = 0
+ for line in [l.strip() for l in f.readlines()]:
+ line_nr += 1
+ if not line:
+ continue
+ md5, filename = line.split(' ')
+ file_path = self.dir.child(filename)
+
+ if not self.dir.isfile(filename):
+ raise RuntimeError('File listed in checksums file but missing in trials dir:'
+ ' %r vs. %r line %d' % (file_path, checksums, line_nr))
+
+ if md5 != util.md5_of_file(file_path):
+ raise RuntimeError('Checksum mismatch for %r vs. %r line %d'
+ % (file_path, checksums, line_nr))
+
+ if filename.endswith('.tgz'):
+ self.bin_tars.append(filename)
+
+ def has_bin_tar(self, bin_name):
+ bin_tar_start = '%s.' % bin_name
+ matches = [t for t in self.bin_tars if t.startswith(bin_tar_start)]
+ self.dbg(bin_name=bin_name, matches=matches)
+ if not matches:
+ return None
+ if len(matches) > 1:
+ raise RuntimeError('More than one match for bin name %r: %r' % (bin_name, matches))
+ bin_tar = matches[0]
+ bin_tar_path = self.dir.child(bin_tar)
+ if not os.path.isfile(bin_tar_path):
+ raise RuntimeError('Not a file or missing: %r' % bin_tar_path)
+ return bin_tar_path
+
+ def get_inst(self, bin_name):
+ bin_tar = self.has_bin_tar(bin_name)
+ if not bin_tar:
+ raise RuntimeError('No such binary available: %r' % bin_name)
+ inst_dir = self.inst_dir.child(bin_name)
+
+ if os.path.isdir(inst_dir):
+ # already unpacked
+ return inst_dir
+
+ t = None
+ try:
+ os.makedirs(inst_dir)
+ t = tarfile.open(bin_tar)
+ t.extractall(inst_dir)
+ return inst_dir
+
+ except:
+ shutil.rmtree(inst_dir)
+ raise
+ finally:
+ if t:
+ try:
+ t.close()
+ except:
+ pass
+
+ def add_suite_run(self, suite_scenario_str, suite_def, scenarios):
+ suite_run = suite.SuiteRun(self, suite_scenario_str, suite_def, scenarios)
+ self.suites.append(suite_run)
+
+ def run_suites(self, names=None):
+ self.status = Trial.UNKNOWN
+ try:
+ for suite_run in self.suites:
+ try:
+ suite_run.run_tests(names)
+ except BaseException as e:
+ # when the program is aborted by a signal (like Ctrl-C), escalate to abort all.
+ self.err('TRIAL RUN ABORTED: %s' % type(e).__name__)
+ # log the traceback before the trial's logging is ended
+ log.log_exn()
+ raise
+ finally:
+ if suite_run.status != suite.SuiteRun.PASS:
+ self.status = Trial.FAIL
+ if self.status == Trial.UNKNOWN:
+ self.status = Trial.PASS
+ finally:
+ junit_path = self.get_run_dir().new_file(self.name()+'.xml')
+ self.log('Storing JUnit report in', junit_path)
+ report.trial_to_junit_write(self, junit_path)
+
+ def log_report(self):
+ log.large_separator(self.name(), self.status)
+ self.log(report.trial_to_text(self))
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_gsm_tester/util.py b/src/osmo_gsm_tester/util.py
new file mode 100644
index 0000000..27c71cf
--- /dev/null
+++ b/src/osmo_gsm_tester/util.py
@@ -0,0 +1,417 @@
+# osmo_gsm_tester: language snippets
+#
+# Copyright (C) 2016-2017 by sysmocom - s.f.m.c. GmbH
+#
+# Author: Neels Hofmeyr <neels@hofmeyr.de>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import time
+import fcntl
+import hashlib
+import tempfile
+import shutil
+import atexit
+import threading
+import importlib.util
+import fcntl
+import tty
+import readline
+import subprocess
+
+# This mirrors enum osmo_auth_algo in libosmocore/include/osmocom/crypt/auth.h
+# so that the index within the tuple matches the enum value.
+OSMO_AUTH_ALGO_NONE = 'none'
+ENUM_OSMO_AUTH_ALGO = (OSMO_AUTH_ALGO_NONE, 'comp128v1', 'comp128v2', 'comp128v3', 'xor', 'milenage')
+
+def osmo_auth_algo_by_name(algo_str):
+ 'Return enum osmo_auth_algo numeric value as from libosmocore, raise ValueError if not defined.'
+ return ENUM_OSMO_AUTH_ALGO.index(algo_str.lower())
+
+def prepend_library_path(path):
+ lp = os.getenv('LD_LIBRARY_PATH')
+ if not lp:
+ return path
+ return path + ':' + lp
+
+def change_elf_rpath(binary, paths, run_dir):
+ '''
+ Change RPATH field in ELF executable binary.
+ This feature can be used to tell the loaded to load the trial libraries, as
+ LD_LIBRARY_PATH is disabled for paths with modified capabilities.
+ '''
+ from .process import Process
+ proc = Process('patchelf', run_dir, ['patchelf', '--set-rpath', paths, binary])
+ proc.launch()
+ proc.wait()
+ if proc.result != 0:
+ raise RuntimeError('patchelf finished with err code %d' % proc.result)
+
+def ip_to_iface(ip):
+ try:
+ for iface in os.listdir('/sys/class/net'):
+ proc = subprocess.Popen(['ip', 'addr', 'show', 'dev', iface], stdout=subprocess.PIPE, universal_newlines=True)
+ for line in proc.stdout.readlines():
+ if 'inet' in line and ' ' + ip + '/' in line:
+ return line.split()[-1]
+ except Exception as e:
+ pass
+ return None
+
+def dst_ip_get_local_bind(ip):
+ '''Retrieve default IP addr to bind to in order to route traffic to dst addr'''
+ try:
+ proc = subprocess.Popen(['ip', 'route', 'get', ip], stdout=subprocess.PIPE, universal_newlines=True)
+ output = proc.stdout.readlines()
+ words = output[0].split()
+ i = 0
+ while i < len(words):
+ if words[i] == 'src':
+ return words[i+1]
+ i += 1
+ except Exception as e:
+ pass
+ return None
+
+def setcap_net_raw(binary, run_dir):
+ '''
+ This functionality requires specific setup on the host running
+ osmo-gsm-tester. See osmo-gsm-tester manual for more information.
+ '''
+ from .process import Process
+ SETCAP_NET_RAW_BIN = 'osmo-gsm-tester_setcap_net_raw.sh'
+ proc = Process(SETCAP_NET_RAW_BIN, run_dir, ['sudo', SETCAP_NET_RAW_BIN, binary])
+ proc.launch()
+ proc.wait()
+ if proc.result != 0:
+ raise RuntimeError('%s finished with err code %d' % (SETCAP_NET_RAW_BIN, proc.result))
+
+def setcap_net_admin(binary, run_dir):
+ '''
+ This functionality requires specific setup on the host running
+ osmo-gsm-tester. See osmo-gsm-tester manual for more information.
+ '''
+ from .process import Process
+ SETCAP_NET_ADMIN_BIN = 'osmo-gsm-tester_setcap_net_admin.sh'
+ proc = Process(SETCAP_NET_ADMIN_BIN, run_dir, ['sudo', SETCAP_NET_ADMIN_BIN, binary])
+ proc.launch()
+ proc.wait()
+ if proc.result != 0:
+ raise RuntimeError('%s finished with err code %d' % (SETCAP_NET_ADMIN_BIN, proc.result))
+
+def import_path_prepend(pathname):
+ dir = os.path.realpath(pathname)
+ if dir not in sys.path:
+ sys.path.insert(0, dir)
+
+def import_path_remove(pathname):
+ dir = os.path.realpath(pathname)
+ if dir in sys.path:
+ sys.path.remove(dir)
+
+class listdict(dict):
+ 'a dict of lists { "a": [1, 2, 3], "b": [1, 2] }'
+
+ def add(self, name, item):
+ l = self.get(name)
+ if not l:
+ l = []
+ self[name] = l
+ l.append(item)
+ return l
+
+ def add_dict(self, d):
+ for k,v in d.items():
+ self.add(k, v)
+
+class DictProxy:
+ '''
+ allow accessing dict entries like object members
+ syntactical sugar, adapted from http://stackoverflow.com/a/31569634
+ so that e.g. templates can do ${bts.member} instead of ${bts['member']}
+ '''
+ def __init__(self, obj):
+ self.obj = obj
+
+ def __getitem__(self, key):
+ return dict2obj(self.obj[key])
+
+ def __getattr__(self, key):
+ 'provide error information to know which template item was missing'
+ try:
+ return dict2obj(getattr(self.obj, key))
+ except AttributeError:
+ try:
+ return self[key]
+ except KeyError:
+ raise AttributeError(key)
+
+def dict2obj(value):
+ if is_list(value) or is_dict(value):
+ return DictProxy(value)
+ return value
+
+
+class FileLock:
+ def __init__(self, path, owner):
+ self.path = path
+ self.owner = owner
+ self.f = None
+
+ def __enter__(self):
+ if self.f is not None:
+ return
+ self.fd = os.open(self.path, os.O_CREAT | os.O_WRONLY)
+ fcntl.flock(self.fd, fcntl.LOCK_EX)
+ os.truncate(self.fd, 0)
+ os.write(self.fd, str(self.owner).encode('utf-8'))
+ os.fsync(self.fd)
+
+ def __exit__(self, *exc_info):
+ #fcntl.flock(self.fd, fcntl.LOCK_UN)
+ os.truncate(self.fd, 0)
+ os.fsync(self.fd)
+ os.close(self.fd)
+ self.fd = -1
+
+ def lock(self):
+ self.__enter__()
+
+ def unlock(self):
+ self.__exit__()
+
+
+class Dir():
+ LOCK_FILE = 'lock'
+
+ def __init__(self, path):
+ self.path = path
+ self.lock_path = os.path.join(self.path, Dir.LOCK_FILE)
+
+ def lock(self, origin_id):
+ '''
+ return lock context, usage:
+
+ with my_dir.lock(origin):
+ read_from(my_dir.child('foo.txt'))
+ write_to(my_dir.child('bar.txt'))
+ '''
+ self.mkdir()
+ return FileLock(self.lock_path, origin_id)
+
+ @staticmethod
+ def ensure_abs_dir_exists(*path_elements):
+ l = len(path_elements)
+ if l < 1:
+ raise RuntimeError('Cannot create empty path')
+ if l == 1:
+ path = path_elements[0]
+ else:
+ path = os.path.join(*path_elements)
+ if not os.path.isdir(path):
+ os.makedirs(path)
+
+ def child(self, *rel_path):
+ if not rel_path:
+ return self.path
+ return os.path.join(self.path, *rel_path)
+
+ def mk_parentdir(self, *rel_path):
+ child = self.child(*rel_path)
+ child_parent = os.path.dirname(child)
+ Dir.ensure_abs_dir_exists(child_parent)
+ return child
+
+ def mkdir(self, *rel_path):
+ child = self.child(*rel_path)
+ Dir.ensure_abs_dir_exists(child)
+ return child
+
+ def children(self):
+ return os.listdir(self.path)
+
+ def exists(self, *rel_path):
+ return os.path.exists(self.child(*rel_path))
+
+ def isdir(self, *rel_path):
+ return os.path.isdir(self.child(*rel_path))
+
+ def isfile(self, *rel_path):
+ return os.path.isfile(self.child(*rel_path))
+
+ def new_child(self, *rel_path):
+ attempt = 1
+ prefix, suffix = os.path.splitext(self.child(*rel_path))
+ rel_path_fmt = '%s%%s%s' % (prefix, suffix)
+ while True:
+ path = rel_path_fmt % (('_%d'%attempt) if attempt > 1 else '')
+ if not os.path.exists(path):
+ break
+ attempt += 1
+ continue
+ Dir.ensure_abs_dir_exists(os.path.dirname(path))
+ return path
+
+ def rel_path(self, path):
+ return os.path.relpath(path, self.path)
+
+ def touch(self, *rel_path):
+ touch_file(self.child(*rel_path))
+
+ def new_file(self, *rel_path):
+ path = self.new_child(*rel_path)
+ touch_file(path)
+ return path
+
+ def new_dir(self, *rel_path):
+ path = self.new_child(*rel_path)
+ Dir.ensure_abs_dir_exists(path)
+ return path
+
+ def __str__(self):
+ return self.path
+ def __repr__(self):
+ return self.path
+
+def touch_file(path):
+ with open(path, 'a') as f:
+ f.close()
+
+def is_dict(l):
+ return isinstance(l, dict)
+
+def is_list(l):
+ return isinstance(l, (list, tuple))
+
+
+def dict_add(a, *b, **c):
+ for bb in b:
+ a.update(bb)
+ a.update(c)
+ return a
+
+def _hash_recurse(acc, obj, ignore_keys):
+ if is_dict(obj):
+ for key, val in sorted(obj.items()):
+ if key in ignore_keys:
+ continue
+ _hash_recurse(acc, val, ignore_keys)
+ return
+
+ if is_list(obj):
+ for item in obj:
+ _hash_recurse(acc, item, ignore_keys)
+ return
+
+ acc.update(str(obj).encode('utf-8'))
+
+def hash_obj(obj, *ignore_keys):
+ acc = hashlib.sha1()
+ _hash_recurse(acc, obj, ignore_keys)
+ return acc.hexdigest()
+
+
+def md5(of_content):
+ if isinstance(of_content, str):
+ of_content = of_content.encode('utf-8')
+ return hashlib.md5(of_content).hexdigest()
+
+def md5_of_file(path):
+ with open(path, 'rb') as f:
+ return md5(f.read())
+
+_tempdir = None
+
+def get_tempdir(remove_on_exit=True):
+ global _tempdir
+ if _tempdir is not None:
+ return _tempdir
+ _tempdir = tempfile.mkdtemp()
+ if remove_on_exit:
+ atexit.register(lambda: shutil.rmtree(_tempdir))
+ return _tempdir
+
+
+if hasattr(importlib.util, 'module_from_spec'):
+ def run_python_file(module_name, path):
+ spec = importlib.util.spec_from_file_location(module_name, path)
+ spec.loader.exec_module( importlib.util.module_from_spec(spec) )
+else:
+ from importlib.machinery import SourceFileLoader
+ def run_python_file(module_name, path):
+ SourceFileLoader(module_name, path).load_module()
+
+def msisdn_inc(msisdn_str):
+ 'add 1 and preserve leading zeros'
+ return ('%%0%dd' % len(msisdn_str)) % (int(msisdn_str) + 1)
+
+class InputThread(threading.Thread):
+ def __init__(self, prompt):
+ super().__init__()
+ self.prompt = prompt
+ self.result = None
+
+ def run(self):
+ self.result = input(self.prompt)
+
+def input_polling(prompt, poll_func):
+ input_thread = InputThread(prompt)
+ input_thread.start()
+
+ while input_thread.is_alive():
+ poll_func()
+ time.sleep(1)
+
+ input_thread.join()
+ return input_thread.result
+
+def str2bool(val):
+ if val is None or not val:
+ return False
+ if val.upper() in ['FALSE', 'NO', 'OFF']:
+ return False
+ if val.upper() in ['TRUE','YES', 'ON']:
+ return True
+ raise ValueError('Invalid BOOL field: %r' % val)
+
+def list_validate_same_elem_type(li):
+ '''
+ Checks that all elements in the list are of the same type and returns that type.
+ If the list is empty, returns None
+ If one of the elements is not of the same type, it throws a ValueError exception.
+ '''
+ if len(li) == 0:
+ return None
+ t = type(li[0])
+ for elem in li:
+ if type(elem) != t:
+ raise ValueError('List contains elements of different types: %r vs %r' % (t, type(elem)))
+ return t
+
+def empty_instance_type(t):
+ if t == dict:
+ return {}
+ elif t == list:
+ return []
+ elif t == tuple:
+ return ()
+ raise ValueError('type %r not supported!' % t)
+
+def encryption2osmovty(val):
+ assert val[:3] == 'a5_'
+ return 'a5 ' + val[3:]
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_ms_driver/__init__.py b/src/osmo_ms_driver/__init__.py
new file mode 100644
index 0000000..0c7b4b9
--- /dev/null
+++ b/src/osmo_ms_driver/__init__.py
@@ -0,0 +1,22 @@
+# osmo_ms_driver: automated cellular network tests
+#
+# Copyright (C) 2018 by sysmocom - s.f.m.c. GmbH
+#
+# Authors: Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from osmo_gsm_tester import __version__
+
+# vim: expandtab tabstop=4 shiftwidth=4
diff --git a/src/osmo_ms_driver/__main__.py b/src/osmo_ms_driver/__main__.py
new file mode 100644
index 0000000..7e1afa8
--- /dev/null
+++ b/src/osmo_ms_driver/__main__.py
@@ -0,0 +1,97 @@
+# osmo_ms_driver: Main test runner
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Local modules
+from .event_server import EventServer
+from .simple_loop import SimpleLoop
+from .location_update_test import MassUpdateLocationTest
+from .cdf import cdfs
+from .starter import BinaryOptions
+from osmo_gsm_tester import log, util
+
+# System modules
+from datetime import timedelta
+
+import argparse
+import atexit
+import datetime
+import signal
+import tempfile
+import os.path
+import os
+
+def parser():
+ parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('-s', '--launch-duration', dest='launch_duration',
+ default=60, type=int,
+ help="Time launching applications should take in seconds")
+ parser.add_argument('-i', '--launch-interval', dest='launch_interval',
+ default=100, type=int,
+ help="Time between launching in milliseconds")
+ parser.add_argument('-t', '--test-duration', dest="test_duration",
+ default=120, type=int,
+ help="Time of the test duration in seconds")
+ parser.add_argument('-d', '--distribution', dest="cdf_name",
+ choices=cdfs.keys(), default="ease_in_out",
+ help="Curve to use for starting within launch duration")
+ parser.add_argument('-m', '--number-ms', dest="num_ms",
+ default=10, type=int,
+ help="Number of MobileStations to simulate")
+ return parser
+
+def main():
+ # Create a default log to stdout
+ log.LogTarget().style(src=False)
+
+ args = parser().parse_args()
+
+ # We don't care what is happening to child processes we spawn!
+ signal.signal(signal.SIGCHLD, signal.SIG_IGN)
+
+ loop = SimpleLoop()
+
+ tmp_dir = util.Dir(tempfile.mkdtemp(suffix="osmo-ms-driver"))
+ log.log("Going to store files in ", tmp_dir=tmp_dir)
+
+ # How long should starting all apps take
+ time_start=datetime.timedelta(seconds=args.launch_duration)
+ # In which steps to start processes
+ time_step=datetime.timedelta(milliseconds=args.launch_interval)
+
+ # Event server path
+ event_server_path = os.path.join(str(tmp_dir), "osmo_ms_driver.unix")
+
+ # The function that decides when to start something
+ cdf = cdfs[args.cdf_name](time_start, time_step)
+
+ # Event server to handle MS->test events
+ ev_server = EventServer("ev_server", event_server_path)
+ ev_server.listen(loop)
+
+ # Just a single test for now.
+ options = BinaryOptions("virtphy", "mobile", os.environ)
+ test = MassUpdateLocationTest("lu_test", options, args.num_ms, cdf, ev_server, tmp_dir)
+ atexit.register(test.stop_all)
+
+ # Run until everything has been launched
+ test.run_test(loop, timedelta(seconds=args.test_duration))
+
+ # Print stats
+ test.print_stats()
+
+if __name__ == '__main__':
+ main()
diff --git a/src/osmo_ms_driver/cdf.py b/src/osmo_ms_driver/cdf.py
new file mode 100644
index 0000000..e163cc3
--- /dev/null
+++ b/src/osmo_ms_driver/cdf.py
@@ -0,0 +1,112 @@
+# osmo_ms_driver: A cumululative distribution function class.
+# Help to start processes over time.
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+from datetime import timedelta
+
+class DistributionFunctionHandler(object):
+ """
+ The goal is to start n "mobile" processes. We like to see some
+ conflicts (RACH bursts being ignored) but starting n processes
+ at the same time is not a realistic model.
+ We use the concept of cumulative distribution function here. On
+ the x-axis we have time (maybe in steps of 10ms) and on the
+ y-axis we have the percentage (from 0.0 to 1.0) of how many
+ processes should run at the given time.
+ """
+
+ def __init__(self, step, duration, fun):
+ self._step = step
+ self._fun = fun
+ self._x = 0.0
+ self._y = self._fun(self._x)
+ self._target = 1.0
+ self._duration = duration
+
+ def step_size(self):
+ return self._step
+
+ def set_target(self, scale):
+ """
+ Scale the percentage to the target value..
+ """
+ self._target = scale
+
+ def is_done(self):
+ return self._y >= 1.0
+
+ def current_value(self):
+ return self._y
+
+ def current_scaled_value(self):
+ return self._y * self._target
+
+ def step_once(self):
+ self._x = self._x + self._step.total_seconds()
+ self._y = self._fun(self._x)
+
+ def duration(self):
+ return self._duration
+
+
+def immediate(step_size=timedelta(milliseconds=20)):
+ """
+ Reaches 100% at the first step.
+ """
+ duration = timedelta(seconds=0)
+ return DistributionFunctionHandler(step_size, duration, lambda x: 1)
+
+def linear_with_slope(slope, duration, step_size=timedelta(milliseconds=20)):
+ """
+ Use the slope and step size you want
+ """
+ return DistributionFunctionHandler(step_size, duration, lambda x: slope*x)
+
+def linear_with_duration(duration, step_size=timedelta(milliseconds=20)):
+ """
+ Linear progression that reaches 100% after duration.total_seconds()
+ """
+ slope = 1.0/duration.total_seconds()
+ return linear_with_slope(slope, duration, step_size)
+
+def _in_out(x):
+ """
+ Internal in/out function inspired by Qt
+ """
+ assert x <= 1.0
+ # Needs to be between 0..1 and increase first
+ if x < 0.5:
+ return (x*x) * 2
+ # deaccelerate now. in_out(0.5) == 0.5, in_out(1.0) == 1.0
+ x = x * 2 - 1
+ return -0.5 * (x*(x-2)- 1)
+
+def ease_in_out_duration(duration, step_size=timedelta(milliseconds=20)):
+ """
+ Example invocation
+ """
+ scale = 1.0/duration.total_seconds()
+ return DistributionFunctionHandler(step_size, duration,
+ lambda x: _in_out(x*scale))
+
+
+cdfs = {
+ 'immediate': lambda x,y: immediate(y),
+ 'linear': linear_with_duration,
+ 'ease_in_out': ease_in_out_duration,
+}
diff --git a/src/osmo_ms_driver/event_server.py b/src/osmo_ms_driver/event_server.py
new file mode 100644
index 0000000..ce9d5c1
--- /dev/null
+++ b/src/osmo_ms_driver/event_server.py
@@ -0,0 +1,32 @@
+
+from osmo_gsm_tester import log
+
+import time
+
+
+class EventServer(log.Origin):
+ """
+ Listen for AF_UNIX/SOCK_DGRAM messages from test apps and
+ forward them.
+ """
+ def __init__(self, name, path):
+ super().__init__(log.C_RUN, name)
+ self._path = path
+ self._handlers = []
+
+ def register(self, cb):
+ self._handlers.append(cb)
+
+ def server_path(self):
+ return self._path
+
+ def listen(self, loop):
+ self._server = loop.create_unix_server(self.read_cb, self._path)
+
+ def read_cb(self, obj, mask):
+ # addresss doesn't give us the remote but currently we don't
+ # need it.
+ data, ancdata, flags, addr = self._server.recvmsg(4096, 4096)
+ now = time.clock_gettime(time.CLOCK_MONOTONIC)
+ for handler in self._handlers:
+ handler(data, addr, now)
diff --git a/src/osmo_ms_driver/location_update_test.py b/src/osmo_ms_driver/location_update_test.py
new file mode 100644
index 0000000..90e405e
--- /dev/null
+++ b/src/osmo_ms_driver/location_update_test.py
@@ -0,0 +1,215 @@
+# osmo_ms_driver: Location Update Test
+# Create MS's and wait for the Location Update to succeed.
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from copy import copy
+from osmo_gsm_tester import log
+from .starter import OsmoVirtPhy, OsmoMobile
+from .test_support import imsi_ki_gen, Results
+
+from datetime import timedelta
+
+import time
+
+class LUResult(Results):
+
+ def __init__(self, name):
+ super().__init__(name)
+ self._time_of_lu = None
+
+ def set_lu_time(self, time):
+ assert self._time_of_lu is None
+ self._time_of_lu = time
+
+ def has_lu_time(self):
+ return self._time_of_lu is not None
+
+ def lu_time(self):
+ return self._time_of_lu or 0
+
+ def lu_delay(self):
+ return self.lu_time() - self.start_time()
+
+class MassUpdateLocationTest(log.Origin):
+ """
+ A test to launch a configurable amount of MS and make them
+ execute a Location Updating Procedure.
+
+ Configure the number of MS to be tested and a function that
+ decides how quickly to start them and a timeout.
+ """
+
+ TEMPLATE_LUA = "osmo-mobile-lu.lua"
+ TEMPLATE_CFG = "osmo-mobile.cfg"
+
+ def __init__(self, name, options, number_of_ms, cdf_function,
+ event_server, tmp_dir, suite_run=None):
+ super().__init__(log.C_RUN, name)
+ self._binary_options = options
+ self._number_of_ms = number_of_ms
+ self._cdf = cdf_function
+ self._cdf.set_target(number_of_ms)
+ self._suite_run = suite_run
+ self._unstarted = []
+ self._mobiles = []
+ self._phys = []
+ self._results = {}
+ imsi_gen = imsi_ki_gen()
+
+ self._outstanding = number_of_ms
+ for i in range(0, number_of_ms):
+ ms_name = "%.5d" % i
+
+ phy = OsmoVirtPhy(options.virtphy, options.env,
+ ms_name, tmp_dir)
+ self._phys.append(phy)
+
+ launcher = OsmoMobile(options.mobile, options.env,
+ ms_name, tmp_dir, self.TEMPLATE_LUA,
+ self.TEMPLATE_CFG, imsi_gen,
+ phy.phy_filename(),
+ event_server.server_path())
+ self._results[ms_name] = LUResult(ms_name)
+ self._mobiles.append(launcher)
+ self._event_server = event_server
+ self._event_server.register(self.handle_msg)
+ self._unstarted = copy(self._mobiles)
+ self._started = []
+
+ def mobiles(self):
+ return self._mobiles
+
+ def pre_launch(self, loop):
+ """
+ We need the virtphy's be ready when the lua script in the
+ mobile comes and kicks-off the test. In lua we don't seem to
+ be able to just stat/check if a file/socket exists so we need
+ to do this from here.
+ """
+ self.log("Pre-launching all virtphy's")
+ for phy in self._phys:
+ phy.start(loop, self._suite_run)
+
+ self.log("Checking if sockets are in the filesystem")
+ for phy in self._phys:
+ phy.verify_ready()
+
+ def prepare(self, loop):
+ self.log("Starting testcase")
+
+ self.pre_launch(loop)
+
+ self._start_time = time.clock_gettime(time.CLOCK_MONOTONIC)
+ self._end_time = self._start_time + \
+ self._cdf.duration().total_seconds() + \
+ timedelta(seconds=120).total_seconds()
+
+ self._started = []
+ self._too_slow = 0
+
+ def step_once(self, loop, current_time):
+ if len(self._unstarted) <= 0:
+ return current_time, None
+
+ step_size = self._cdf.step_size().total_seconds()
+
+ # Start
+ self._cdf.step_once()
+
+ # Check for timeout
+ # start pending MS
+ while len(self._started) < self._cdf.current_scaled_value() and len(self._unstarted) > 0:
+ ms = self._unstarted.pop(0)
+ ms.start(loop, self._suite_run)
+ launch_time = time.clock_gettime(time.CLOCK_MONOTONIC)
+ self._results[ms.name_number()].set_launch_time(launch_time)
+ self._started.append(ms)
+
+ now_time = time.clock_gettime(time.CLOCK_MONOTONIC)
+ sleep_time = (current_time + step_size) - now_time
+ if sleep_time <= 0:
+ self.log("Starting too slowly. Moving on",
+ target=(current_time + step_size), now=now_time, sleep=sleep_time)
+ self._too_slow += 1
+ sleep_time = 0
+
+ if len(self._unstarted) == 0:
+ end_time = time.clock_gettime(time.CLOCK_MONOTONIC)
+ self.log("All started...", too_slow=self._too_slow, duration=end_time - self._start_time)
+ return current_time, None
+
+ return current_time + step_size, sleep_time
+
+ def run_test(self, loop, test_duration):
+ self.prepare(loop)
+
+ to_complete_time = self._start_time + test_duration.total_seconds()
+ tick_time = self._start_time
+
+ while not self.all_completed():
+ tick_time, sleep_time = self.step_once(loop, tick_time)
+ now_time = time.clock_gettime(time.CLOCK_MONOTONIC)
+ if sleep_time is None:
+ sleep_time = to_complete_time - now_time
+ if sleep_time < 0:
+ break
+ loop.schedule_timeout(sleep_time)
+ loop.select()
+
+ def stop_all(self):
+ for launcher in self._started:
+ launcher.terminate()
+
+ def handle_msg(self, _data, addr, time):
+ import json
+ data = json.loads(_data.decode())
+
+ if data['type'] == 'register':
+ ms = self._results[data['ms']]
+ ms.set_start_time(time)
+ launch_delay = ms.start_time() - ms.launch_time()
+ self.log("MS start registered ", ms=ms, at=time, delay=launch_delay)
+ elif data['type'] == 'event':
+ if data['data']['lu_done'] == 1:
+ ms = self._results[data['ms']]
+ if not ms.has_lu_time():
+ self._outstanding = self._outstanding - 1
+ ms.set_lu_time(time)
+ self.log("MS performed LU ", ms=ms, at=time, lu_delay=ms.lu_delay())
+ else:
+ print(time, data)
+ raise Exception("Unknown event type..:" + _data.decode())
+
+
+ def all_completed(self):
+ return self._outstanding == 0
+
+ def results_min_max(self, results):
+ min_value = max_value = None
+ for result in results:
+ if min_value is None or result.lu_delay() < min_value:
+ min_value = result.lu_delay()
+ if max_value is None or result.lu_delay() > max_value:
+ max_value = result.lu_delay()
+ return min_value, max_value
+
+ def print_stats(self):
+ all_completed = self.all_completed()
+ min_value, max_value = self.results_min_max(filter(lambda x: x.has_lu_time(), self._results.values()))
+
+ self.log("Tests done", all_completed=all_completed,
+ min=min_value, max=max_value)
diff --git a/src/osmo_ms_driver/lua/json.lua b/src/osmo_ms_driver/lua/json.lua
new file mode 100644
index 0000000..dda6193
--- /dev/null
+++ b/src/osmo_ms_driver/lua/json.lua
@@ -0,0 +1,380 @@
+--
+-- json.lua
+--
+-- Copyright (c) 2015 rxi
+--
+-- This library is free software; you can redistribute it and/or modify it
+-- under the terms of the MIT license. See LICENSE for details.
+--
+
+local json = { _version = "0.1.0" }
+
+-------------------------------------------------------------------------------
+-- Encode
+-------------------------------------------------------------------------------
+
+local encode
+
+local escape_char_map = {
+ [ "\\" ] = "\\\\",
+ [ "\"" ] = "\\\"",
+ [ "\b" ] = "\\b",
+ [ "\f" ] = "\\f",
+ [ "\n" ] = "\\n",
+ [ "\r" ] = "\\r",
+ [ "\t" ] = "\\t",
+}
+
+local escape_char_map_inv = { [ "\\/" ] = "/" }
+for k, v in pairs(escape_char_map) do
+ escape_char_map_inv[v] = k
+end
+
+
+local function escape_char(c)
+ return escape_char_map[c] or string.format("\\u%04x", c:byte())
+end
+
+
+local function encode_nil(val)
+ return "null"
+end
+
+
+local function encode_table(val, stack)
+ local res = {}
+ stack = stack or {}
+
+ -- Circular reference?
+ if stack[val] then error("circular reference") end
+
+ stack[val] = true
+
+ if val[1] ~= nil or next(val) == nil then
+ -- Treat as array -- check keys are valid and it is not sparse
+ local n = 0
+ for k in pairs(val) do
+ if type(k) ~= "number" then
+ error("invalid table: mixed or invalid key types")
+ end
+ n = n + 1
+ end
+ if n ~= #val then
+ error("invalid table: sparse array")
+ end
+ -- Encode
+ for i, v in ipairs(val) do
+ table.insert(res, encode(v, stack))
+ end
+ stack[val] = nil
+ return "[" .. table.concat(res, ",") .. "]"
+
+ else
+ -- Treat as an object
+ for k, v in pairs(val) do
+ if type(k) ~= "string" then
+ error("invalid table: mixed or invalid key types")
+ end
+ table.insert(res, encode(k, stack) .. ":" .. encode(v, stack))
+ end
+ stack[val] = nil
+ return "{" .. table.concat(res, ",") .. "}"
+ end
+end
+
+
+local function encode_string(val)
+ return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"'
+end
+
+
+local function encode_number(val)
+ -- Check for NaN, -inf and inf
+ if val ~= val or val <= -math.huge or val >= math.huge then
+ error("unexpected number value '" .. tostring(val) .. "'")
+ end
+ return string.format("%.14g", val)
+end
+
+
+local type_func_map = {
+ [ "nil" ] = encode_nil,
+ [ "table" ] = encode_table,
+ [ "string" ] = encode_string,
+ [ "number" ] = encode_number,
+ [ "boolean" ] = tostring,
+}
+
+
+encode = function(val, stack)
+ local t = type(val)
+ local f = type_func_map[t]
+ if f then
+ return f(val, stack)
+ end
+ error("unexpected type '" .. t .. "'")
+end
+
+
+function json.encode(val)
+ return ( encode(val) )
+end
+
+
+-------------------------------------------------------------------------------
+-- Decode
+-------------------------------------------------------------------------------
+
+local parse
+
+local function create_set(...)
+ local res = {}
+ for i = 1, select("#", ...) do
+ res[ select(i, ...) ] = true
+ end
+ return res
+end
+
+local space_chars = create_set(" ", "\t", "\r", "\n")
+local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",")
+local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u")
+local literals = create_set("true", "false", "null")
+
+local literal_map = {
+ [ "true" ] = true,
+ [ "false" ] = false,
+ [ "null" ] = nil,
+}
+
+
+local function next_char(str, idx, set, negate)
+ for i = idx, #str do
+ if set[str:sub(i, i)] ~= negate then
+ return i
+ end
+ end
+ return #str + 1
+end
+
+
+local function decode_error(str, idx, msg)
+ local line_count = 1
+ local col_count = 1
+ for i = 1, idx - 1 do
+ col_count = col_count + 1
+ if str:sub(i, i) == "\n" then
+ line_count = line_count + 1
+ col_count = 1
+ end
+ end
+ error( string.format("%s at line %d col %d", msg, line_count, col_count) )
+end
+
+
+local function codepoint_to_utf8(n)
+ -- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa
+ local f = math.floor
+ if n <= 0x7f then
+ return string.char(n)
+ elseif n <= 0x7ff then
+ return string.char(f(n / 64) + 192, n % 64 + 128)
+ elseif n <= 0xffff then
+ return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128)
+ elseif n <= 0x10ffff then
+ return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128,
+ f(n % 4096 / 64) + 128, n % 64 + 128)
+ end
+ error( string.format("invalid unicode codepoint '%x'", n) )
+end
+
+
+local function parse_unicode_escape(s)
+ local n1 = tonumber( s:sub(3, 6), 16 )
+ local n2 = tonumber( s:sub(9, 12), 16 )
+ -- Surrogate pair?
+ if n2 then
+ return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000)
+ else
+ return codepoint_to_utf8(n1)
+ end
+end
+
+
+local function parse_string(str, i)
+ local has_unicode_escape = false
+ local has_surrogate_escape = false
+ local has_escape = false
+ local last
+ for j = i + 1, #str do
+ local x = str:byte(j)
+
+ if x < 32 then
+ decode_error(str, j, "control character in string")
+ end
+
+ if last == 92 then -- "\\" (escape char)
+ if x == 117 then -- "u" (unicode escape sequence)
+ local hex = str:sub(j + 1, j + 5)
+ if not hex:find("%x%x%x%x") then
+ decode_error(str, j, "invalid unicode escape in string")
+ end
+ if hex:find("^[dD][89aAbB]") then
+ has_surrogate_escape = true
+ else
+ has_unicode_escape = true
+ end
+ else
+ local c = string.char(x)
+ if not escape_chars[c] then
+ decode_error(str, j, "invalid escape char '" .. c .. "' in string")
+ end
+ has_escape = true
+ end
+ last = nil
+
+ elseif x == 34 then -- '"' (end of string)
+ local s = str:sub(i + 1, j - 1)
+ if has_surrogate_escape then
+ s = s:gsub("\\u[dD][89aAbB]..\\u....", parse_unicode_escape)
+ end
+ if has_unicode_escape then
+ s = s:gsub("\\u....", parse_unicode_escape)
+ end
+ if has_escape then
+ s = s:gsub("\\.", escape_char_map_inv)
+ end
+ return s, j + 1
+
+ else
+ last = x
+ end
+ end
+ decode_error(str, i, "expected closing quote for string")
+end
+
+
+local function parse_number(str, i)
+ local x = next_char(str, i, delim_chars)
+ local s = str:sub(i, x - 1)
+ local n = tonumber(s)
+ if not n then
+ decode_error(str, i, "invalid number '" .. s .. "'")
+ end
+ return n, x
+end
+
+
+local function parse_literal(str, i)
+ local x = next_char(str, i, delim_chars)
+ local word = str:sub(i, x - 1)
+ if not literals[word] then
+ decode_error(str, i, "invalid literal '" .. word .. "'")
+ end
+ return literal_map[word], x
+end
+
+
+local function parse_array(str, i)
+ local res = {}
+ local n = 1
+ i = i + 1
+ while 1 do
+ local x
+ i = next_char(str, i, space_chars, true)
+ -- Empty / end of array?
+ if str:sub(i, i) == "]" then
+ i = i + 1
+ break
+ end
+ -- Read token
+ x, i = parse(str, i)
+ res[n] = x
+ n = n + 1
+ -- Next token
+ i = next_char(str, i, space_chars, true)
+ local chr = str:sub(i, i)
+ i = i + 1
+ if chr == "]" then break end
+ if chr ~= "," then decode_error(str, i, "expected ']' or ','") end
+ end
+ return res, i
+end
+
+
+local function parse_object(str, i)
+ local res = {}
+ i = i + 1
+ while 1 do
+ local key, val
+ i = next_char(str, i, space_chars, true)
+ -- Empty / end of object?
+ if str:sub(i, i) == "}" then
+ i = i + 1
+ break
+ end
+ -- Read key
+ if str:sub(i, i) ~= '"' then
+ decode_error(str, i, "expected string for key")
+ end
+ key, i = parse(str, i)
+ -- Read ':' delimiter
+ i = next_char(str, i, space_chars, true)
+ if str:sub(i, i) ~= ":" then
+ decode_error(str, i, "expected ':' after key")
+ end
+ i = next_char(str, i + 1, space_chars, true)
+ -- Read value
+ val, i = parse(str, i)
+ -- Set
+ res[key] = val
+ -- Next token
+ i = next_char(str, i, space_chars, true)
+ local chr = str:sub(i, i)
+ i = i + 1
+ if chr == "}" then break end
+ if chr ~= "," then decode_error(str, i, "expected '}' or ','") end
+ end
+ return res, i
+end
+
+
+local char_func_map = {
+ [ '"' ] = parse_string,
+ [ "0" ] = parse_number,
+ [ "1" ] = parse_number,
+ [ "2" ] = parse_number,
+ [ "3" ] = parse_number,
+ [ "4" ] = parse_number,
+ [ "5" ] = parse_number,
+ [ "6" ] = parse_number,
+ [ "7" ] = parse_number,
+ [ "8" ] = parse_number,
+ [ "9" ] = parse_number,
+ [ "-" ] = parse_number,
+ [ "t" ] = parse_literal,
+ [ "f" ] = parse_literal,
+ [ "n" ] = parse_literal,
+ [ "[" ] = parse_array,
+ [ "{" ] = parse_object,
+}
+
+
+parse = function(str, idx)
+ local chr = str:sub(idx, idx)
+ local f = char_func_map[chr]
+ if f then
+ return f(str, idx)
+ end
+ decode_error(str, idx, "unexpected character '" .. chr .. "'")
+end
+
+
+function json.decode(str)
+ if type(str) ~= "string" then
+ error("expected argument of type string, got " .. type(str))
+ end
+ return ( parse(str, next_char(str, 1, space_chars, true)) )
+end
+
+
+return json
diff --git a/src/osmo_ms_driver/lua/ms_support.lua b/src/osmo_ms_driver/lua/ms_support.lua
new file mode 100644
index 0000000..6490bec
--- /dev/null
+++ b/src/osmo_ms_driver/lua/ms_support.lua
@@ -0,0 +1,32 @@
+json = require("json")
+socket = require("socket")
+socket.unix = require("socket.unix")
+
+local g_c = socket.unix.dgram()
+local g_ms = nil
+
+local mod = {}
+
+-- Register the MS instance with the system
+function mod.register(ms, path)
+ g_ms = ms
+
+ osmo.unix_passcred(g_c:getfd())
+ g_c:connect(path)
+
+ local event = {}
+ event['ms'] = g_ms
+ event['type'] = 'register'
+ g_c:send(json.encode(event))
+end
+
+-- Send an event
+function mod.send(data)
+ local event = {}
+ event['ms'] = g_ms
+ event['type'] = 'event'
+ event['data'] = data
+ g_c:send(json.encode(event))
+end
+
+return mod
diff --git a/src/osmo_ms_driver/simple_loop.py b/src/osmo_ms_driver/simple_loop.py
new file mode 100644
index 0000000..29a4b5b
--- /dev/null
+++ b/src/osmo_ms_driver/simple_loop.py
@@ -0,0 +1,62 @@
+# osmo_ms_driver: Event loop because asyncio is not up to the job
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from osmo_gsm_tester import log
+
+import os
+import selectors
+import socket
+
+
+class SimpleLoop(log.Origin):
+ def __init__(self):
+ super().__init__(log.C_RUN, "SimpleLoop")
+ self._loop = selectors.DefaultSelector()
+ self._timeout = None
+
+ def register_fd(self, fd, event, callback):
+ self._loop.register(fd, event, callback)
+
+ def schedule_timeout(self, timeout):
+ assert self._timeout == None
+ self._timeout = timeout
+
+ def create_unix_server(self, cb, path):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+
+ if len(path.encode()) > 107:
+ raise log.Error('Path for unix socket is longer than max allowed len for unix socket path (107):', path)
+
+ # If not a special Linux namespace...
+ if path[0] != '\0':
+ try:
+ os.unlink(path)
+ except FileNotFoundError:
+ pass
+
+ # Now bind+listen+NONBLOCK
+ sock.bind(path)
+ sock.setblocking(False)
+
+ self.register_fd(sock.fileno(), selectors.EVENT_READ, cb)
+ return sock
+
+ def select(self):
+ events = self._loop.select(timeout=self._timeout)
+ self._timeout = None
+ for key, mask in events:
+ key.data(key.fileobj, mask)
diff --git a/src/osmo_ms_driver/starter.py b/src/osmo_ms_driver/starter.py
new file mode 100644
index 0000000..bf7727f
--- /dev/null
+++ b/src/osmo_ms_driver/starter.py
@@ -0,0 +1,134 @@
+# osmo_ms_driver: Starter for processes
+# Help to start processes over time.
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from osmo_gsm_tester import log, process, template
+
+import collections
+import os
+import os.path
+import time
+
+BinaryOptions = collections.namedtuple("BinaryOptions", ["virtphy", "mobile", "env"])
+
+class Launcher(log.Origin):
+ def __init__(self, binary, env, base_name, name_number, tmp_dir):
+ super().__init__(log.C_RUN, "{}/{}".format(base_name, name_number))
+ self._binary = binary
+ self._env = env
+ self._name_number = name_number
+ self._tmp_dir = tmp_dir.new_dir(self.name())
+
+ def name_number(self):
+ return self._name_number
+
+class OsmoVirtPhy(Launcher):
+ def __init__(self, binary, env, name_number, tmp_dir):
+ super().__init__(binary, env, "osmo-ms-virt-phy", name_number, tmp_dir)
+ self._phy_filename = os.path.join(self._tmp_dir, "osmocom_l2_" + self._name_number)
+ self._vphy_proc = None
+
+ def phy_filename(self):
+ return self._phy_filename
+
+ def start(self, loop, suite_run=None):
+ if len(self._phy_filename.encode()) > 107:
+ raise log.Error('Path for unix socket is longer than max allowed len for unix socket path (107):', self._phy_filename)
+
+ self.log("Starting virtphy")
+ args = [self._binary, "--l1ctl-sock=" + self._phy_filename]
+ self._vphy_proc = process.Process(self.name(), self._tmp_dir,
+ args, env=self._env)
+ if suite_run:
+ suite_run.remember_to_stop(self._vphy_proc)
+ self._vphy_proc.launch()
+
+ def verify_ready(self):
+ while True:
+ if os.path.exists(self._phy_filename):
+ return
+ time.sleep(0.2)
+
+ def terminate(self):
+ """Clean up things."""
+ if self._vphy_proc:
+ self._vphy_proc.terminate()
+
+class OsmoMobile(Launcher):
+ def __init__(self, binary, env, name_number, tmp_dir, lua_tmpl, cfg_tmpl, imsi_ki_generator, phy_filename, ev_server_path):
+ super().__init__(binary, env, "osmo-ms-mob", name_number, tmp_dir)
+ self._lua_template = lua_tmpl
+ self._cfg_template = cfg_tmpl
+ self._imsi_ki_generator = imsi_ki_generator
+ self._phy_filename = phy_filename
+ self._ev_server_path = ev_server_path
+ self._imsi, self._ki = next(self._imsi_ki_generator)
+ self._omob_proc = None
+
+ def imsi(self):
+ return self._imsi
+
+ def ki(self):
+ return self._ki
+
+ def write_lua_cfg(self):
+ lua_support = os.path.join(os.path.dirname(__file__), 'lua')
+ cfg = {
+ 'test': {
+ 'event_path': self._ev_server_path,
+ 'lua_support': lua_support,
+ }
+ }
+ lua_cfg_file = os.path.join(self._tmp_dir, "lua_" + self._name_number + ".lua")
+ lua_script = template.render(self._lua_template, cfg)
+ with open(lua_cfg_file, 'w') as w:
+ w.write(lua_script)
+ return lua_cfg_file
+
+ def write_mob_cfg(self, lua_filename, phy_filename):
+ cfg = {
+ 'test': {
+ 'script': lua_filename,
+ 'virt_phy': phy_filename,
+ 'imsi': self._imsi,
+ 'ki_comp128': self._ki,
+ 'ms_number': self._name_number,
+ }
+ }
+ mob_cfg_file = os.path.join(self._tmp_dir, "mob_" + self._name_number + ".cfg")
+ mob_vty = template.render(self._cfg_template, cfg)
+ with open(mob_cfg_file, 'w') as w:
+ w.write(mob_vty)
+ return mob_cfg_file
+
+ def start(self, loop, suite_run=None):
+ lua_filename = self.write_lua_cfg()
+ mob_filename = self.write_mob_cfg(lua_filename, self._phy_filename)
+
+ self.log("Starting mobile")
+ # Let the kernel pick an unused port for the VTY.
+ args = [self._binary, "-c", mob_filename, "--vty-port=0"]
+ self._omob_proc = process.Process(self.name(), self._tmp_dir,
+ args, env=self._env)
+ if suite_run:
+ suite_run.remember_to_stop(self._omob_proc)
+ self._omob_proc.launch()
+
+ def terminate(self):
+ """Clean up things."""
+ if self._omob_proc:
+ self._omob_proc.terminate()
diff --git a/src/osmo_ms_driver/test_support.py b/src/osmo_ms_driver/test_support.py
new file mode 100644
index 0000000..f1c34fb
--- /dev/null
+++ b/src/osmo_ms_driver/test_support.py
@@ -0,0 +1,51 @@
+# osmo_ms_driver: Test helpers and base classes
+#
+# Copyright (C) 2018 by Holger Hans Peter Freyther
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from osmo_gsm_tester import log
+
+def imsi_ki_gen():
+ """
+ Generate IMSIs and KIs to be used by test.
+ """
+ n = 1010000000000
+ while True:
+ yield ("%.15d" % n, "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00")
+ n += 1
+
+class Results(log.Origin):
+ """
+ A base class to collect results from tests.
+ """
+
+ def __init__(self, name):
+ super().__init__(log.C_RUN, name)
+ self._time_of_registration = None
+ self._time_of_launch = None
+
+ def set_start_time(self, time):
+ assert self._time_of_registration is None
+ self._time_of_registration = time
+
+ def set_launch_time(self, time):
+ assert self._time_of_launch is None
+ self._time_of_launch = time
+
+ def start_time(self):
+ return self._time_of_registration or 0
+
+ def launch_time(self):
+ return self._time_of_launch or 0
diff --git a/suites/debug/interactive.py b/suites/debug/interactive.py
new file mode 100755
index 0000000..98bf174
--- /dev/null
+++ b/suites/debug/interactive.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+hlr = suite.hlr()
+bts = suite.bts()
+pcu = bts.pcu()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+ggsn = suite.ggsn()
+sgsn = suite.sgsn(hlr, ggsn)
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+
+modems = suite.modems(int(prompt('How many modems?')))
+
+bsc.bts_add(bts)
+sgsn.bts_add(bts)
+
+hlr.start()
+stp.start()
+ggsn.start()
+sgsn.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+
+bts.start()
+print('Waiting for bts to connect to bsc...')
+wait(bsc.bts_is_connected, bts)
+print('Waiting for bts to be ready...')
+wait(bts.ready_for_pcu)
+pcu.start()
+
+for m in modems:
+ hlr.subscriber_add(m)
+ m.connect(msc.mcc_mnc())
+
+while True:
+ cmd = prompt('Enter command: (q)uit (s)ms (g)et-registered (w)ait-registered, call-list [<ms_msisdn>], call-dial <src_msisdn> <dst_msisdn>, call-wait-incoming <src_msisdn> <dst_msisdn>, call-answer <mt_msisdn> <call_id>, call-hangup <ms_msisdn> <call_id>, ussd <command>, data-attach, data-wait, data-detach, data-activate')
+ cmd = cmd.strip().lower()
+
+ if not cmd:
+ continue
+
+ params = cmd.split()
+
+ if 'quit'.startswith(cmd):
+ break
+
+ elif 'wait-registered'.startswith(cmd):
+ try:
+ for m in modems:
+ wait(m.is_connected, msc.mcc_mnc())
+ wait(msc.subscriber_attached, *modems)
+ except Timeout:
+ print('Timeout while waiting for registration.')
+
+ elif 'get-registered'.startswith(cmd):
+ print(msc.imsi_list_attached())
+ print('RESULT: %s' %
+ ('All modems are registered.' if msc.subscriber_attached(*modems)
+ else 'Some modem(s) not registered yet.'))
+
+ elif 'sms'.startswith(cmd):
+ for mo in modems:
+ for mt in modems:
+ mo.sms_send(mt.msisdn, 'to ' + mt.name())
+
+ elif cmd.startswith('call-list'):
+ if len(params) != 1 and len(params) != 2:
+ print('wrong format')
+ continue
+ for ms in modems:
+ if len(params) == 1 or str(ms.msisdn) == params[1]:
+ print('call-list: %r %r' % (ms.name(), ms.call_id_list()))
+
+ elif cmd.startswith('call-dial'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ src_msisdn, dst_msisdn = params[1:]
+ for mo in modems:
+ if str(mo.msisdn) == src_msisdn:
+ print('dialing %s->%s' % (src_msisdn, dst_msisdn))
+ call_id = mo.call_dial(dst_msisdn)
+ print('dial success: call_id=%r' % call_id)
+
+ elif cmd.startswith('call-wait-incoming'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ src_msisdn, dst_msisdn = params[1:]
+ for mt in modems:
+ if str(mt.msisdn) == dst_msisdn:
+ print('waiting for incoming %s->%s' % (src_msisdn, dst_msisdn))
+ call_id = mt.call_wait_incoming(src_msisdn)
+ print('incoming call success: call_id=%r' % call_id)
+
+ elif cmd.startswith('call-answer'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ mt_msisdn, call_id = params[1:]
+ for mt in modems:
+ if str(mt.msisdn) == mt_msisdn:
+ print('answering %s %r' % (mt.name(), call_id))
+ mt.call_answer(call_id)
+
+ elif cmd.startswith('call-hangup'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ ms_msisdn, call_id = params[1:]
+ for ms in modems:
+ if str(ms.msisdn) == ms_msisdn:
+ print('hanging up %s %r' % (ms.name(), call_id))
+ ms.call_hangup(call_id)
+
+ elif cmd.startswith('ussd'):
+ if len(params) != 2:
+ print('wrong format')
+ continue
+ ussd_cmd = params[1]
+ for ms in modems:
+ print('modem %s: ussd %s' % (ms.name(), ussd_cmd))
+ response = ms.ussd_send(ussd_cmd)
+ print('modem %s: response=%r' % (ms.name(), response))
+
+ elif cmd.startswith('data-attach'):
+ if len(params) != 1:
+ print('wrong format')
+ continue
+ for ms in modems:
+ print('modem %s: attach' % ms.name())
+ ms.attach()
+ wait(ms.is_attached)
+ print('modem %s: attached' % ms.name())
+
+ elif cmd.startswith('data-detach'):
+ if len(params) != 1:
+ print('wrong format')
+ continue
+ for ms in modems:
+ print('modem %s: detach' % ms.name())
+ ms.attach()
+ wait(lambda: not ms.is_attached())
+ print('modem %s: detached' % ms.name())
+
+ elif cmd.startswith('data-activate'):
+ if len(params) != 1:
+ print('wrong format')
+ continue
+ for ms in modems:
+ print('modem %s: activate' % ms.name())
+ response = ms.activate_context()
+ print('modem %s: response=%r' % (ms.name(), response))
+
+ else:
+ print('Unknown command: %s' % cmd)
diff --git a/suites/debug/suite.conf b/suites/debug/suite.conf
new file mode 100644
index 0000000..2f36e1d
--- /dev/null
+++ b/suites/debug/suite.conf
@@ -0,0 +1,7 @@
+resources:
+ ip_address:
+ - times: 8
+ bts:
+ - times: 1
+ modem:
+ - times: 4
diff --git a/suites/dynts/suite.conf b/suites/dynts/suite.conf
new file mode 100644
index 0000000..3b32480
--- /dev/null
+++ b/suites/dynts/suite.conf
@@ -0,0 +1,10 @@
+resources:
+ ip_address:
+ - times: 8 # msc, bsc, hlr, stp, mgw*2, sgsn, ggsn
+ bts:
+ - times: 1
+ modem:
+ - times: 2
+ features:
+ - gprs
+ - voice
diff --git a/suites/dynts/switch_tch_pdch.py b/suites/dynts/switch_tch_pdch.py
new file mode 100755
index 0000000..f0bbd38
--- /dev/null
+++ b/suites/dynts/switch_tch_pdch.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+
+def activate_pdp(ms_mo, ms_mt):
+ # We need to use inet46 since ofono qmi only uses ipv4v6 eua (OS#2713)
+ ctx_id_v4_mo = ms_mo.activate_context(apn='inet46', protocol=ms_mo.CTX_PROT_IPv4)
+ print('ms_mo pdp ctx %r activated' % repr(ctx_id_v4_mo))
+ ctx_id_v4_mt = ms_mt.activate_context(apn='inet46', protocol=ms_mt.CTX_PROT_IPv4)
+ print('ms_mt pdp ctx %r activated' % repr(ctx_id_v4_mt))
+ sleep(5)
+ ms_mo.deactivate_context(ctx_id_v4_mo)
+ ms_mt.deactivate_context(ctx_id_v4_mt)
+
+def make_call(ms_mo, ms_mt):
+ assert len(ms_mo.call_id_list()) == 0 and len(ms_mt.call_id_list()) == 0
+ mo_cid = ms_mo.call_dial(ms_mt)
+ mt_cid = ms_mt.call_wait_incoming(ms_mo)
+ print('dial success')
+
+ assert not ms_mo.call_is_active(mo_cid) and not ms_mt.call_is_active(mt_cid)
+ ms_mt.call_answer(mt_cid)
+ wait(ms_mo.call_is_active, mo_cid)
+ wait(ms_mt.call_is_active, mt_cid)
+ print('answer success, call established and ongoing')
+
+ sleep(5) # maintain the call active for 5 seconds
+
+ assert ms_mo.call_is_active(mo_cid) and ms_mt.call_is_active(mt_cid)
+ ms_mo.call_hangup(mo_cid)
+ ms_mt.call_hangup(mt_cid)
+ wait(lambda: len(ms_mo.call_id_list()) == 0 and len(ms_mt.call_id_list()) == 0)
+ print('hangup success')
+
+hlr = suite.hlr()
+bts = suite.bts()
+pcu = bts.pcu()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+ggsn = suite.ggsn()
+sgsn = suite.sgsn(hlr, ggsn)
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms_mo = suite.modem()
+ms_mt = suite.modem()
+
+bsc.bts_add(bts)
+sgsn.bts_add(bts)
+
+print('start network...')
+hlr.start()
+stp.start()
+ggsn.start()
+sgsn.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+print('Waiting for bts to be ready...')
+wait(bts.ready_for_pcu)
+pcu.start()
+
+hlr.subscriber_add(ms_mo)
+hlr.subscriber_add(ms_mt)
+
+ms_mo.connect(msc.mcc_mnc())
+ms_mt.connect(msc.mcc_mnc())
+ms_mo.attach()
+ms_mt.attach()
+
+ms_mo.log_info()
+ms_mt.log_info()
+
+print('waiting for modems to attach...')
+wait(ms_mo.is_connected, msc.mcc_mnc())
+wait(ms_mt.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms_mo)
+wait(msc.subscriber_attached, ms_mt)
+
+print('waiting for modems to attach to data services...')
+wait(ms_mo.is_attached)
+wait(ms_mt.is_attached)
+
+print('1: activate_pdp')
+activate_pdp(ms_mo, ms_mt)
+print('2: make_call')
+make_call(ms_mo, ms_mt)
+print('3: Wait 30 seconds to let PCU handle the PDCH channels again')
+sleep(30)
+print('3: activate_pdp')
+activate_pdp(ms_mo, ms_mt)
+print('4: make_call')
+make_call(ms_mo, ms_mt)
+print('Done!')
diff --git a/suites/encryption/lib/testlib.py b/suites/encryption/lib/testlib.py
new file mode 100644
index 0000000..3948941
--- /dev/null
+++ b/suites/encryption/lib/testlib.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+def encryption_test_setup_run(enable_auth, algo):
+ hlr = suite.hlr()
+ bts = suite.bts()
+ mgw_msc = suite.mgw()
+ mgw_bsc = suite.mgw()
+ stp = suite.stp()
+ msc = suite.msc(hlr, mgw_msc, stp)
+ bsc = suite.bsc(msc, mgw_bsc, stp)
+ ms = suite.modem()
+
+ print('start network...')
+ msc.set_authentication(enable_auth)
+ msc.set_encryption(algo)
+ bsc.set_encryption(algo)
+ hlr.start()
+ stp.start()
+ msc.start()
+ mgw_msc.start()
+ mgw_bsc.start()
+ bsc.bts_add(bts)
+ bsc.start()
+ bts.start()
+ wait(bsc.bts_is_connected, bts)
+
+ ms.log_info()
+ good_ki = ms.ki()
+ bad_ki = ("%1X" % (int(good_ki[0], 16) ^ 0x01)) + good_ki[1:]
+
+ print('KI changed: ' + good_ki + " => " + bad_ki)
+ ms.set_ki(bad_ki)
+ hlr.subscriber_add(ms)
+ if enable_auth:
+ print('Attempt connection with wrong KI...')
+ ms.connect(msc.mcc_mnc())
+
+ sleep(40) # TODO: read pcap or CTRL interface and look for Rejected? (gsm_a.dtap.msg_mm_type == 0x04)
+ print('Asserting modem did not register')
+ # FIXME: this can fail because ofono qmi signals registered before being accepted by network. See OS#2458
+ # assert not ms.is_connected(msc.mcc_mnc())
+ assert not msc.subscriber_attached(ms)
+
+ hlr.subscriber_delete(ms)
+ print('KI changed: ' + bad_ki + " => " + good_ki)
+ ms.set_ki(good_ki)
+ hlr.subscriber_add(ms, ms.msisdn)
+ print('Attempt connection with correct KI...')
+ else:
+ print('Attempt connection with wrong KI, should work as it is not used...')
+ ms.connect(msc.mcc_mnc())
+ wait(ms.is_connected, msc.mcc_mnc())
+ wait(msc.subscriber_attached, ms)
diff --git a/suites/encryption/register_a5_0_authopt.py b/suites/encryption/register_a5_0_authopt.py
new file mode 100755
index 0000000..1b7f471
--- /dev/null
+++ b/suites/encryption/register_a5_0_authopt.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+import testlib
+suite.test_import_modules_register_for_cleanup(testlib)
+from testlib import encryption_test_setup_run
+
+encryption_test_setup_run(False, 'a5_0')
diff --git a/suites/encryption/register_a5_0_authreq.py b/suites/encryption/register_a5_0_authreq.py
new file mode 100755
index 0000000..feca525
--- /dev/null
+++ b/suites/encryption/register_a5_0_authreq.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+import testlib
+suite.test_import_modules_register_for_cleanup(testlib)
+from testlib import encryption_test_setup_run
+
+encryption_test_setup_run(True, 'a5_0')
diff --git a/suites/encryption/register_a5_1_authreq.py b/suites/encryption/register_a5_1_authreq.py
new file mode 100755
index 0000000..077819b
--- /dev/null
+++ b/suites/encryption/register_a5_1_authreq.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+import testlib
+suite.test_import_modules_register_for_cleanup(testlib)
+from testlib import encryption_test_setup_run
+
+encryption_test_setup_run(True, 'a5_1')
diff --git a/suites/encryption/register_a5_3_authreq.py b/suites/encryption/register_a5_3_authreq.py
new file mode 100755
index 0000000..219c109
--- /dev/null
+++ b/suites/encryption/register_a5_3_authreq.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+import testlib
+suite.test_import_modules_register_for_cleanup(testlib)
+from testlib import encryption_test_setup_run
+
+encryption_test_setup_run(True, 'a5_3')
diff --git a/suites/encryption/suite.conf b/suites/encryption/suite.conf
new file mode 100644
index 0000000..18e94a3
--- /dev/null
+++ b/suites/encryption/suite.conf
@@ -0,0 +1,16 @@
+resources:
+ ip_address:
+ - times: 6 # msc, bsc, hlr, stp, mgw*2
+ bts:
+ - times: 1
+ ciphers:
+ - a5_0
+ - a5_1
+ modem:
+ - times: 1
+ ciphers:
+ - a5_0
+ - a5_1
+
+defaults:
+ timeout: 120s
diff --git a/suites/gprs/cs_paging_gprs_active.py b/suites/gprs/cs_paging_gprs_active.py
new file mode 100755
index 0000000..fc0e160
--- /dev/null
+++ b/suites/gprs/cs_paging_gprs_active.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+
+# Following test verifies CS paging works when MS is GPRS attached.
+# See OS#2204 for more information.
+
+from osmo_gsm_tester.testenv import *
+
+hlr = suite.hlr()
+bts = suite.bts()
+pcu = bts.pcu()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+ggsn = suite.ggsn()
+sgsn = suite.sgsn(hlr, ggsn)
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms_mo = suite.modem()
+ms_mt = suite.modem()
+
+bsc.bts_add(bts)
+sgsn.bts_add(bts)
+
+print('start network...')
+hlr.start()
+stp.start()
+ggsn.start()
+sgsn.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+print('Waiting for bts to be ready...')
+wait(bts.ready_for_pcu)
+pcu.start()
+
+hlr.subscriber_add(ms_mo)
+hlr.subscriber_add(ms_mt)
+
+ms_mo.connect(msc.mcc_mnc())
+ms_mt.connect(msc.mcc_mnc())
+ms_mo.attach()
+ms_mt.attach()
+
+ms_mo.log_info()
+ms_mt.log_info()
+
+print('waiting for modems to attach...')
+wait(ms_mo.is_connected, msc.mcc_mnc())
+wait(ms_mt.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms_mo, ms_mt)
+
+print('waiting for modems to attach to data services...')
+wait(ms_mo.is_attached)
+wait(ms_mt.is_attached)
+
+# We need to use inet46 since ofono qmi only uses ipv4v6 eua (OS#2713)
+ctx_id_v4_mo = ms_mo.activate_context(apn='inet46', protocol=ms_mo.CTX_PROT_IPv4)
+ctx_id_v4_mt = ms_mt.activate_context(apn='inet46', protocol=ms_mt.CTX_PROT_IPv4)
+
+assert len(ms_mo.call_id_list()) == 0 and len(ms_mt.call_id_list()) == 0
+mo_cid = ms_mo.call_dial(ms_mt)
+mt_cid = ms_mt.call_wait_incoming(ms_mo)
+print('dial success')
+
+assert not ms_mo.call_is_active(mo_cid) and not ms_mt.call_is_active(mt_cid)
+ms_mt.call_answer(mt_cid)
+wait(ms_mo.call_is_active, mo_cid)
+wait(ms_mt.call_is_active, mt_cid)
+print('answer success, call established and ongoing')
+
+sleep(5) # maintain the call active for 5 seconds
+
+assert ms_mo.call_is_active(mo_cid) and ms_mt.call_is_active(mt_cid)
+ms_mt.call_hangup(mt_cid)
+wait(lambda: len(ms_mo.call_id_list()) == 0 and len(ms_mt.call_id_list()) == 0)
+print('hangup success')
+
+ms_mo.deactivate_context(ctx_id_v4_mo)
+ms_mt.deactivate_context(ctx_id_v4_mt)
diff --git a/suites/gprs/iperf3.py b/suites/gprs/iperf3.py
new file mode 100755
index 0000000..e25519a
--- /dev/null
+++ b/suites/gprs/iperf3.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+import testlib
+suite.test_import_modules_register_for_cleanup(testlib)
+from testlib import setup_run_iperf3_test_parallel
+
+setup_run_iperf3_test_parallel(1)
diff --git a/suites/gprs/iperf3m4.py b/suites/gprs/iperf3m4.py
new file mode 100755
index 0000000..1cc27ed
--- /dev/null
+++ b/suites/gprs/iperf3m4.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+import testlib
+suite.test_import_modules_register_for_cleanup(testlib)
+from testlib import setup_run_iperf3_test_parallel
+
+setup_run_iperf3_test_parallel(4)
diff --git a/suites/gprs/lib/testlib.py b/suites/gprs/lib/testlib.py
new file mode 100644
index 0000000..13a6671
--- /dev/null
+++ b/suites/gprs/lib/testlib.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+def print_results(cli_res, srv_res):
+ cli_sent = cli_res['end']['sum_sent']
+ cli_recv = cli_res['end']['sum_received']
+ print("RESULT client:")
+ print("\tSEND: %d KB, %d kbps, %d seconds (%d retrans)" % (cli_sent['bytes']/1000, cli_sent['bits_per_second']/1000, cli_sent['seconds'], cli_sent['retransmits']))
+ print("\tRECV: %d KB, %d kbps, %d seconds" % (cli_recv['bytes']/1000, cli_recv['bits_per_second']/1000, cli_recv['seconds']))
+ print("RESULT server:")
+ print("\tSEND: %d KB, %d kbps, %d seconds" % (cli_sent['bytes']/1000, cli_sent['bits_per_second']/1000, cli_sent['seconds']))
+ print("\tRECV: %d KB, %d kbps, %d seconds" % (cli_recv['bytes']/1000, cli_recv['bits_per_second']/1000, cli_recv['seconds']))
+
+def run_iperf3_cli_parallel(iperf3clients, ms_li):
+ assert len(iperf3clients) == len(ms_li)
+ procs = []
+ for i in range(len(iperf3clients)):
+ print("Running iperf3 client to %s through %r" % (str(iperf3clients[i]), repr(ms_li[i].tmp_ctx_id)))
+ procs.append(iperf3clients[i].prepare_test_proc(ms_li[i].netns()))
+ try:
+ for proc in procs:
+ proc.launch()
+ for proc in procs:
+ proc.wait()
+ except Exception as e:
+ for proc in procs:
+ proc.terminate()
+ raise e
+
+
+def setup_run_iperf3_test_parallel(num_ms):
+ hlr = suite.hlr()
+ bts = suite.bts()
+ pcu = bts.pcu()
+ mgw_msc = suite.mgw()
+ mgw_bsc = suite.mgw()
+ stp = suite.stp()
+ ggsn = suite.ggsn()
+ sgsn = suite.sgsn(hlr, ggsn)
+ msc = suite.msc(hlr, mgw_msc, stp)
+ bsc = suite.bsc(msc, mgw_bsc, stp)
+
+ iperf3srv_addr = suite.ip_address()
+ servers = []
+ clients = []
+ ms_li = []
+ for i in range(num_ms):
+ iperf3srv = suite.iperf3srv(iperf3srv_addr)
+ iperf3srv.set_port(iperf3srv.DEFAULT_SRV_PORT + i)
+ servers.append(iperf3srv)
+
+ iperf3cli = iperf3srv.create_client()
+ clients.append(iperf3cli)
+
+ ms = suite.modem()
+ ms_li.append(ms)
+
+ bsc.bts_add(bts)
+ sgsn.bts_add(bts)
+
+ for iperf3srv in servers:
+ print('start iperfv3 server %s...' % str(iperf3srv) )
+ iperf3srv.start()
+
+ print('start network...')
+ hlr.start()
+ stp.start()
+ ggsn.start()
+ sgsn.start()
+ msc.start()
+ mgw_msc.start()
+ mgw_bsc.start()
+ bsc.start()
+
+ bts.start()
+ wait(bsc.bts_is_connected, bts)
+ print('Waiting for bts to be ready...')
+ wait(bts.ready_for_pcu)
+ pcu.start()
+
+ for ms in ms_li:
+ hlr.subscriber_add(ms)
+ ms.connect(msc.mcc_mnc())
+ ms.attach()
+ ms.log_info()
+
+ print('waiting for modems to attach...')
+ for ms in ms_li:
+ wait(ms.is_connected, msc.mcc_mnc())
+ wait(msc.subscriber_attached, *ms_li)
+
+ print('waiting for modems to attach to data services...')
+ for ms in ms_li:
+ wait(ms.is_attached)
+ # We need to use inet46 since ofono qmi only uses ipv4v6 eua (OS#2713)
+ ctx_id_v4 = ms.activate_context(apn='inet46', protocol=ms.CTX_PROT_IPv4)
+ print("Setting up data plan for %r" % repr(ctx_id_v4))
+ ms.setup_context_data_plane(ctx_id_v4)
+ setattr(ms, 'tmp_ctx_id', ctx_id_v4)
+
+ run_iperf3_cli_parallel(clients, ms_li)
+
+ for i in range(num_ms):
+ servers[i].stop()
+ print("Results for %s through %r" % (str(servers[i]), repr(ms_li[i].tmp_ctx_id)))
+ print_results(clients[i].get_results(), servers[i].get_results())
+
+ for ms in ms_li:
+ ms.deactivate_context(ms.tmp_ctx_id)
+ delattr(ms, 'tmp_ctx_id')
diff --git a/suites/gprs/ping.py b/suites/gprs/ping.py
new file mode 100755
index 0000000..9186fe6
--- /dev/null
+++ b/suites/gprs/ping.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+hlr = suite.hlr()
+bts = suite.bts()
+pcu = bts.pcu()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+ggsn = suite.ggsn()
+sgsn = suite.sgsn(hlr, ggsn)
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms = suite.modem()
+
+bsc.bts_add(bts)
+sgsn.bts_add(bts)
+
+print('start network...')
+hlr.start()
+stp.start()
+ggsn.start()
+sgsn.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+print('Waiting for bts to be ready...')
+wait(bts.ready_for_pcu)
+pcu.start()
+
+hlr.subscriber_add(ms)
+
+ms.connect(msc.mcc_mnc())
+ms.attach()
+
+ms.log_info()
+
+print('waiting for modems to attach...')
+wait(ms.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms)
+
+print('waiting for modems to attach to data services...')
+wait(ms.is_attached)
+
+# We need to use inet46 since ofono qmi only uses ipv4v6 eua (OS#2713)
+ctx_id_v4 = ms.activate_context(apn='inet46', protocol=ms.CTX_PROT_IPv4)
+print("Setting up data plan for %r" % repr(ctx_id_v4))
+ms.setup_context_data_plane(ctx_id_v4)
+print("Running 10 ping requests for %r" % repr(ctx_id_v4))
+ms.run_netns_wait('ping', ('ping', '-c', '10', ggsn.addr()))
+ms.deactivate_context(ctx_id_v4)
+
+# We need to use inet46 since ofono qmi only uses ipv4v6 eua (OS#2713)
+ctx_id_v6 = ms.activate_context(apn='inet46', protocol=ms.CTX_PROT_IPv6)
+sleep(5)
+# TODO: send ping to server or open TCP conn with a socket in python
+ms.deactivate_context(ctx_id_v6)
+
+# IPv46 (dual) not supported in ofono qmi: org.ofono.Error.Failed: Operation failed (36)
+# ctx_id_v46 = ms.activate_context(apn='inet46', protocol=ms.CTX_PROT_IPv46)
+# sleep(5)
+# TODO: send ping to server or open TCP conn with a socket in python
+# ms.deactivate_context(ctx_id_v46)
diff --git a/suites/gprs/ping_idle_ping.py b/suites/gprs/ping_idle_ping.py
new file mode 100755
index 0000000..e0968ac
--- /dev/null
+++ b/suites/gprs/ping_idle_ping.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python3
+
+# Following test verifies GPRS works fine after MS stays idle (no data
+# sent/received) for a long while.
+# See OS#3678 and OS#2455 for more information.
+
+from osmo_gsm_tester.testenv import *
+
+hlr = suite.hlr()
+bts = suite.bts()
+pcu = bts.pcu()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+ggsn = suite.ggsn()
+sgsn = suite.sgsn(hlr, ggsn)
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms = suite.modem()
+
+bsc.bts_add(bts)
+sgsn.bts_add(bts)
+
+print('start network...')
+hlr.start()
+stp.start()
+ggsn.start()
+sgsn.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+print('Waiting for bts to be ready...')
+wait(bts.ready_for_pcu)
+pcu.start()
+
+hlr.subscriber_add(ms)
+
+ms.connect(msc.mcc_mnc())
+ms.attach()
+
+ms.log_info()
+
+print('waiting for modems to attach...')
+wait(ms.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms)
+
+print('waiting for modems to attach to data services...')
+wait(ms.is_attached)
+
+# We need to use inet46 since ofono qmi only uses ipv4v6 eua (OS#2713)
+ctx_id_v4 = ms.activate_context(apn='inet46', protocol=ms.CTX_PROT_IPv4)
+print("Setting up data plane for %r" % repr(ctx_id_v4))
+ms.setup_context_data_plane(ctx_id_v4)
+print("[1] Running 10 ping requests for %r" % repr(ctx_id_v4))
+ms.run_netns_wait('ping1', ('ping', '-c', '10', ggsn.addr()))
+
+print("Sleeping for 60 seconds")
+sleep(60)
+
+print("[2] Running 10 ping requests for %r" % repr(ctx_id_v4))
+ms.run_netns_wait('ping2', ('ping', '-c', '10', ggsn.addr()))
+ms.deactivate_context(ctx_id_v4)
diff --git a/suites/gprs/suite.conf b/suites/gprs/suite.conf
new file mode 100644
index 0000000..d40c1e5
--- /dev/null
+++ b/suites/gprs/suite.conf
@@ -0,0 +1,13 @@
+resources:
+ ip_address:
+ - times: 9 # msc, bsc, hlr, stp, mgw*2, sgsn, ggsn, iperf3srv
+ bts:
+ - times: 1
+ modem:
+ - times: 2
+ features:
+ - gprs
+ - voice
+ - times: 2
+ features:
+ - gprs
diff --git a/suites/nitb_debug/error.py b/suites/nitb_debug/error.py
new file mode 100644
index 0000000..032e26c
--- /dev/null
+++ b/suites/nitb_debug/error.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+# This can be used to verify that a test error is reported properly.
+assert False
diff --git a/suites/nitb_debug/fail.py b/suites/nitb_debug/fail.py
new file mode 100644
index 0000000..a2d9e3a
--- /dev/null
+++ b/suites/nitb_debug/fail.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+# This can be used to verify that a test failure is reported properly.
+test.set_fail('ExpectedFail', 'This failure is expected')
diff --git a/suites/nitb_debug/fail_raise.py b/suites/nitb_debug/fail_raise.py
new file mode 100644
index 0000000..c30a4f5
--- /dev/null
+++ b/suites/nitb_debug/fail_raise.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+class ExpectedExn(Exception):
+ pass
+
+# This can be used to verify that a test failure is reported properly.
+raise ExpectedExn('This failure is expected')
diff --git a/suites/nitb_debug/interactive.py b/suites/nitb_debug/interactive.py
new file mode 100755
index 0000000..595cfd9
--- /dev/null
+++ b/suites/nitb_debug/interactive.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+print('use resources...')
+nitb = suite.nitb()
+bts = suite.bts()
+modems = suite.modems(int(prompt('How many modems?')))
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+for m in modems:
+ nitb.subscriber_add(m)
+ m.connect(nitb.mcc_mnc())
+
+while True:
+ cmd = prompt('Enter command: (q)uit (s)ms (g)et-registered (w)ait-registered, call-list [<ms_msisdn>], call-dial <src_msisdn> <dst_msisdn>, call-wait-incoming <src_msisdn> <dst_msisdn>, call-answer <mt_msisdn> <call_id>, call-hangup <ms_msisdn> <call_id>, ussd <command>')
+ cmd = cmd.strip().lower()
+
+ if not cmd:
+ continue
+
+ params = cmd.split()
+
+ if 'quit'.startswith(cmd):
+ break
+
+ elif 'wait-registered'.startswith(cmd):
+ try:
+ for m in modems:
+ wait(m.is_connected, nitb.mcc_mnc())
+ wait(nitb.subscriber_attached, *modems)
+ except Timeout:
+ print('Timeout while waiting for registration.')
+
+ elif 'get-registered'.startswith(cmd):
+ print(nitb.imsi_list_attached())
+ print('RESULT: %s' %
+ ('All modems are registered.' if nitb.subscriber_attached(*modems)
+ else 'Some modem(s) not registered yet.'))
+
+ elif 'sms'.startswith(cmd):
+ for mo in modems:
+ for mt in modems:
+ mo.sms_send(mt.msisdn, 'to ' + mt.name())
+
+ elif cmd.startswith('call-list'):
+ if len(params) != 1 and len(params) != 2:
+ print('wrong format')
+ continue
+ for ms in modems:
+ if len(params) == 1 or str(ms.msisdn) == params[1]:
+ print('call-list: %r %r' % (ms.name(), ms.call_id_list()))
+
+ elif cmd.startswith('call-dial'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ src_msisdn, dst_msisdn = params[1:]
+ for mo in modems:
+ if str(mo.msisdn) == src_msisdn:
+ print('dialing %s->%s' % (src_msisdn, dst_msisdn))
+ call_id = mo.call_dial(dst_msisdn)
+ print('dial success: call_id=%r' % call_id)
+
+ elif cmd.startswith('call-wait-incoming'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ src_msisdn, dst_msisdn = params[1:]
+ for mt in modems:
+ if str(mt.msisdn) == dst_msisdn:
+ print('waiting for incoming %s->%s' % (src_msisdn, dst_msisdn))
+ call_id = mt.call_wait_incoming(src_msisdn)
+ print('incoming call success: call_id=%r' % call_id)
+
+ elif cmd.startswith('call-answer'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ mt_msisdn, call_id = params[1:]
+ for mt in modems:
+ if str(mt.msisdn) == mt_msisdn:
+ print('answering %s %r' % (mt.name(), call_id))
+ mt.call_answer(call_id)
+
+ elif cmd.startswith('call-hangup'):
+ if len(params) != 3:
+ print('wrong format')
+ continue
+ ms_msisdn, call_id = params[1:]
+ for ms in modems:
+ if str(ms.msisdn) == ms_msisdn:
+ print('hanging up %s %r' % (ms.name(), call_id))
+ ms.call_hangup(call_id)
+
+ elif cmd.startswith('ussd'):
+ if len(params) != 2:
+ print('wrong format')
+ continue
+ ussd_cmd = params[1]
+ for ms in modems:
+ print('modem %s: ussd %s' % (ms.name(), ussd_cmd))
+ response = ms.ussd_send(ussd_cmd)
+ print('modem %s: response=%r' % (ms.name(), response))
+
+ else:
+ print('Unknown command: %s' % cmd)
diff --git a/suites/nitb_debug/pass.py b/suites/nitb_debug/pass.py
new file mode 100644
index 0000000..c07f079
--- /dev/null
+++ b/suites/nitb_debug/pass.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+# This can be used to verify that a test passes correctly.
+pass
diff --git a/suites/nitb_debug/suite.conf b/suites/nitb_debug/suite.conf
new file mode 100644
index 0000000..adfc161
--- /dev/null
+++ b/suites/nitb_debug/suite.conf
@@ -0,0 +1,10 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 1
+ modem:
+ - times: 4
+
+defaults:
+ timeout: 60s
diff --git a/suites/nitb_netreg/register.py b/suites/nitb_netreg/register.py
new file mode 100755
index 0000000..d5fbeb7
--- /dev/null
+++ b/suites/nitb_netreg/register.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+print('use resources...')
+nitb = suite.nitb()
+bts = suite.bts()
+ms = suite.modem()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+nitb.subscriber_add(ms)
+
+ms.connect(nitb.mcc_mnc())
+
+print(ms.info())
+
+wait(ms.is_connected, nitb.mcc_mnc())
+wait(nitb.subscriber_attached, ms)
diff --git a/suites/nitb_netreg/register_default.py b/suites/nitb_netreg/register_default.py
new file mode 100755
index 0000000..545525d
--- /dev/null
+++ b/suites/nitb_netreg/register_default.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+print('use resources...')
+nitb = suite.nitb()
+bts = suite.bts()
+ms = suite.modem()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+nitb.subscriber_add(ms)
+
+ms.connect()
+
+print(ms.info())
+
+wait(ms.is_connected)
+wait(nitb.subscriber_attached, ms)
diff --git a/suites/nitb_netreg/suite.conf b/suites/nitb_netreg/suite.conf
new file mode 100644
index 0000000..1bb1dbb
--- /dev/null
+++ b/suites/nitb_netreg/suite.conf
@@ -0,0 +1,10 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 1
+ modem:
+ - times: 1
+
+defaults:
+ timeout: 40s
diff --git a/suites/nitb_netreg_mass/register_default_mass.py b/suites/nitb_netreg_mass/register_default_mass.py
new file mode 100644
index 0000000..ede2b74
--- /dev/null
+++ b/suites/nitb_netreg_mass/register_default_mass.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+"""
+Run a network registration with a 'massive' amount of MS
+using the ms_driver infrastructure.
+"""
+from osmo_gsm_tester.testenv import *
+
+print('use resources...')
+nitb = suite.nitb()
+bts = suite.bts()
+ms_driver = suite.ms_driver()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+# Configure all MS that the MS driver knows about.
+for ms in ms_driver.ms_subscribers():
+ nitb.subscriber_add(ms)
+
+# Run the base test.
+ms_driver.run_test()
+
+# Print stats
+ms_driver.print_stats()
diff --git a/suites/nitb_netreg_mass/suite.conf b/suites/nitb_netreg_mass/suite.conf
new file mode 100644
index 0000000..bfb23cd
--- /dev/null
+++ b/suites/nitb_netreg_mass/suite.conf
@@ -0,0 +1,8 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 1
+
+defaults:
+ timeout: 40s
diff --git a/suites/nitb_smpp/esme_connect_policy_acceptall.py b/suites/nitb_smpp/esme_connect_policy_acceptall.py
new file mode 100755
index 0000000..904226b
--- /dev/null
+++ b/suites/nitb_smpp/esme_connect_policy_acceptall.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases while in 'accept-all' policy:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) which do not appear on
+# the config file
+
+from osmo_gsm_tester.testenv import *
+
+nitb = suite.nitb()
+smsc = nitb.smsc
+esme = suite.esme()
+
+# Here we deliberately omit calling smsc.esme_add() to avoid having it included
+# in the smsc config.
+smsc.set_smsc_policy(smsc.SMSC_POLICY_ACCEPT_ALL)
+esme.set_smsc(smsc)
+
+nitb.start()
+
+# Due to accept-all policy, connect() should work even if we didn't previously
+# configure the esme in the smsc, no matter the system_id / password we use.
+log('Test connect with non-empty values in system_id and password')
+esme.set_system_id('foo')
+esme.set_password('bar')
+esme.connect()
+esme.disconnect()
+
+log('Test connect with empty values in system_id and password')
+esme.set_system_id('')
+esme.set_password('')
+esme.connect()
+esme.disconnect()
diff --git a/suites/nitb_smpp/esme_connect_policy_closed.py b/suites/nitb_smpp/esme_connect_policy_closed.py
new file mode 100755
index 0000000..eaabb3d
--- /dev/null
+++ b/suites/nitb_smpp/esme_connect_policy_closed.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases while in 'closed' policy:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) with password previously
+# defined in its configuration file.
+# * SMPP interface of SMSC rejects ESMEs with known system id but wrong password.
+# * SMPP interface of SMSC rejects ESEMs with unknown system id
+
+from osmo_gsm_tester.testenv import *
+
+SMPP_ESME_RINVPASWD = 0x0000000E
+SMPP_ESME_RINVSYSID = 0x0000000F
+
+nitb = suite.nitb()
+smsc = nitb.smsc
+esme = suite.esme()
+esme_no_pwd = suite.esme()
+esme_no_pwd.set_password('')
+
+smsc.set_smsc_policy(smsc.SMSC_POLICY_CLOSED)
+smsc.esme_add(esme)
+smsc.esme_add(esme_no_pwd)
+
+nitb.start()
+
+log('Test with correct credentials (no password)')
+esme_no_pwd.connect()
+esme_no_pwd.disconnect()
+
+log('Test with correct credentials (no password, non empty)')
+esme_no_pwd.set_password('foobar')
+esme_no_pwd.connect()
+esme_no_pwd.disconnect()
+
+log('Test with correct credentials')
+esme.connect()
+esme.disconnect()
+
+log('Test with bad password, checking for failure')
+correct_password = esme.password
+new_password = 'barfoo' if correct_password == 'foobar' else 'foobar'
+esme.set_password(new_password)
+esme.run_method_expect_failure(SMPP_ESME_RINVPASWD, esme.connect)
+esme.set_password(correct_password)
+
+log('Test with bad system_id, checking for failure')
+correct_system_id = esme.system_id
+new_system_id = 'barfoo' if correct_system_id == 'foobar' else 'foobar'
+esme.set_system_id(new_system_id)
+esme.run_method_expect_failure(SMPP_ESME_RINVSYSID, esme.connect)
+esme.set_system_id(correct_system_id)
diff --git a/suites/nitb_smpp/esme_ms_sms_storeforward.py b/suites/nitb_smpp/esme_ms_sms_storeforward.py
new file mode 100755
index 0000000..391a040
--- /dev/null
+++ b/suites/nitb_smpp/esme_ms_sms_storeforward.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) with password previously
+# defined in its configuration file.
+# * When SMS is sent in 'store & forward' mode, ESME fails to send an SMS to non registered MS.
+# * When SMS is sent in 'store & forward' mode, ESME can send an SMS to a not yet registered MS.
+# * When SMS is sent in 'store & forward' mode, ESME can send an SMS to an already registered MS.
+# * When SMS is sent in 'store & forward' mode, ESME receives a SMS receipt if it asked for it.
+
+from osmo_gsm_tester.testenv import *
+
+SMPP_ESME_RINVDSTADR = 0x0000000B
+
+nitb = suite.nitb()
+bts = suite.bts()
+ms = suite.modem()
+esme = suite.esme()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.smsc.esme_add(esme)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+esme.connect()
+nitb.subscriber_add(ms)
+
+wrong_msisdn = ms.msisdn + esme.msisdn
+print('sending sms with wrong msisdn %s, it will fail' % wrong_msisdn)
+msg = Sms(esme.msisdn, wrong_msisdn, 'smpp message with wrong dest')
+esme.run_method_expect_failure(SMPP_ESME_RINVDSTADR, esme.sms_send_wait_resp, msg, esme.MSGMODE_STOREFORWARD)
+
+print('sending sms, it will be stored...')
+msg = Sms(esme.msisdn, ms.msisdn, 'smpp send not-yet-registered message')
+umref = esme.sms_send_wait_resp(msg, esme.MSGMODE_STOREFORWARD, receipt=True)
+
+print('MS registers and will receive the SMS...')
+ms.connect(nitb.mcc_mnc())
+wait(ms.is_connected, nitb.mcc_mnc())
+wait(nitb.subscriber_attached, ms)
+wait(ms.sms_was_received, msg)
+print('Waiting to receive and consume sms receipt with reference', umref)
+wait(esme.receipt_was_received, umref)
+
+print('checking MS can receive SMS while registered...')
+msg = Sms(esme.msisdn, ms.msisdn, 'smpp send already-registered message')
+umref = esme.sms_send_wait_resp(msg, esme.MSGMODE_STOREFORWARD, receipt=True)
+wait(ms.sms_was_received, msg)
+print('Waiting to receive and consume sms receipt with reference', umref)
+wait(esme.receipt_was_received, umref)
+esme.disconnect()
diff --git a/suites/nitb_smpp/esme_ms_sms_transaction.py b/suites/nitb_smpp/esme_ms_sms_transaction.py
new file mode 100755
index 0000000..adc9dae
--- /dev/null
+++ b/suites/nitb_smpp/esme_ms_sms_transaction.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) with password previously
+# defined in its configuration file.
+# * When SMS is sent in 'transaction' mode, ESME can send an SMS to an already registered MS.
+# * When SMS is sent in 'transaction' mode, ESME fails to send an SMS to non registered MS.
+
+from osmo_gsm_tester.testenv import *
+
+SMPP_ESME_RINVDSTADR = 0x0000000B
+
+nitb = suite.nitb()
+bts = suite.bts()
+ms = suite.modem()
+esme = suite.esme()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.smsc.esme_add(esme)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+esme.connect()
+nitb.subscriber_add(ms)
+ms.connect(nitb.mcc_mnc())
+
+ms.log_info()
+print('waiting for modem to attach...')
+wait(ms.is_connected, nitb.mcc_mnc())
+wait(nitb.subscriber_attached, ms)
+
+print('sending first sms...')
+msg = Sms(esme.msisdn, ms.msisdn, 'smpp send message')
+esme.sms_send(msg, esme.MSGMODE_TRANSACTION)
+wait(ms.sms_was_received, msg)
+
+print('sending second sms (unicode chars not in gsm aplhabet)...')
+msg = Sms(esme.msisdn, ms.msisdn, 'chars:[кизаçйж]')
+esme.sms_send(msg, esme.MSGMODE_TRANSACTION)
+wait(ms.sms_was_received, msg)
+
+wrong_msisdn = ms.msisdn + esme.msisdn
+print('sending third sms (with wrong msisdn %s)' % wrong_msisdn)
+msg = Sms(esme.msisdn, wrong_msisdn, 'smpp message with wrong dest')
+esme.run_method_expect_failure(SMPP_ESME_RINVDSTADR, esme.sms_send_wait_resp, msg, esme.MSGMODE_TRANSACTION)
+
+esme.disconnect()
diff --git a/suites/nitb_smpp/suite.conf b/suites/nitb_smpp/suite.conf
new file mode 100644
index 0000000..eb5dc01
--- /dev/null
+++ b/suites/nitb_smpp/suite.conf
@@ -0,0 +1,12 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 1
+ modem:
+ - times: 1
+ features:
+ - sms
+
+defaults:
+ timeout: 60s
diff --git a/suites/nitb_sms/mo_mt_sms.py b/suites/nitb_sms/mo_mt_sms.py
new file mode 100755
index 0000000..10897ff
--- /dev/null
+++ b/suites/nitb_sms/mo_mt_sms.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+nitb = suite.nitb()
+bts = suite.bts()
+ms_mo = suite.modem()
+ms_mt = suite.modem()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+nitb.subscriber_add(ms_mo)
+nitb.subscriber_add(ms_mt)
+
+ms_mo.connect(nitb.mcc_mnc())
+ms_mt.connect(nitb.mcc_mnc())
+
+ms_mo.log_info()
+ms_mt.log_info()
+
+print('waiting for modems to attach...')
+wait(ms_mo.is_connected, nitb.mcc_mnc())
+wait(ms_mt.is_connected, nitb.mcc_mnc())
+wait(nitb.subscriber_attached, ms_mo, ms_mt)
+
+sms = ms_mo.sms_send(ms_mt)
+wait(ms_mt.sms_was_received, sms)
diff --git a/suites/nitb_sms/suite.conf b/suites/nitb_sms/suite.conf
new file mode 100644
index 0000000..485402b
--- /dev/null
+++ b/suites/nitb_sms/suite.conf
@@ -0,0 +1,12 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 1
+ modem:
+ - times: 2
+ features:
+ - sms
+
+defaults:
+ timeout: 60s
diff --git a/suites/nitb_ussd/assert_extension.py b/suites/nitb_ussd/assert_extension.py
new file mode 100755
index 0000000..8ccab2d
--- /dev/null
+++ b/suites/nitb_ussd/assert_extension.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+USSD_COMMAND_GET_EXTENSION = '*#100#'
+
+nitb = suite.nitb()
+bts = suite.bts()
+ms = suite.modem()
+
+print('start nitb and bts...')
+nitb.bts_add(bts)
+nitb.start()
+bts.start()
+wait(nitb.bts_is_connected, bts)
+
+nitb.subscriber_add(ms)
+
+ms.connect(nitb.mcc_mnc())
+ms.log_info()
+
+print('waiting for modems to attach...')
+wait(ms.is_connected, nitb.mcc_mnc())
+wait(nitb.subscriber_attached, ms)
+
+# ofono (qmi) currently changes state to 'registered' jut after sending
+# 'Location Update Request', but before receiving 'Location Updating Accept'.
+# Which means we can reach lines below and send USSD code while still not being
+# attached, which will then fail. See OsmoGsmTester #2239 for more detailed
+# information.
+# Until we find an ofono fix or a better way to workaround this, let's just
+# sleep for a while in order to receive the 'Location Updating Accept' message
+# before attemting to send the USSD.
+sleep(10)
+
+print('Sending ussd code %s' % USSD_COMMAND_GET_EXTENSION)
+response = ms.ussd_send(USSD_COMMAND_GET_EXTENSION)
+assert ' ' + ms.msisdn + '\r' in response
diff --git a/suites/nitb_ussd/suite.conf b/suites/nitb_ussd/suite.conf
new file mode 100644
index 0000000..232a5d8
--- /dev/null
+++ b/suites/nitb_ussd/suite.conf
@@ -0,0 +1,12 @@
+resources:
+ ip_address:
+ - times: 1
+ bts:
+ - times: 1
+ modem:
+ - times: 1
+ features:
+ - ussd
+
+defaults:
+ timeout: 60s
diff --git a/suites/smpp/esme_connect_policy_acceptall.py b/suites/smpp/esme_connect_policy_acceptall.py
new file mode 100755
index 0000000..168b4f3
--- /dev/null
+++ b/suites/smpp/esme_connect_policy_acceptall.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases while in 'accept-all' policy:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) which do not appear on
+# the config file
+
+from osmo_gsm_tester.testenv import *
+
+hlr = suite.hlr()
+mgw_msc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+smsc = msc.smsc
+esme = suite.esme()
+
+# Here we deliberately omit calling smsc.esme_add() to avoid having it included
+# in the smsc config.
+smsc.set_smsc_policy(smsc.SMSC_POLICY_ACCEPT_ALL)
+esme.set_smsc(smsc)
+
+stp.start()
+hlr.start()
+msc.start()
+mgw_msc.start()
+
+# Due to accept-all policy, connect() should work even if we didn't previously
+# configure the esme in the smsc, no matter the system_id / password we use.
+log('Test connect with non-empty values in system_id and password')
+esme.set_system_id('foo')
+esme.set_password('bar')
+esme.connect()
+esme.disconnect()
+
+log('Test connect with empty values in system_id and password')
+esme.set_system_id('')
+esme.set_password('')
+esme.connect()
+esme.disconnect()
diff --git a/suites/smpp/esme_connect_policy_closed.py b/suites/smpp/esme_connect_policy_closed.py
new file mode 100755
index 0000000..487e5a4
--- /dev/null
+++ b/suites/smpp/esme_connect_policy_closed.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases while in 'closed' policy:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) with password previously
+# defined in its configuration file.
+# * SMPP interface of SMSC rejects ESMEs with known system id but wrong password.
+# * SMPP interface of SMSC rejects ESEMs with unknown system id
+
+from osmo_gsm_tester.testenv import *
+
+SMPP_ESME_RINVPASWD = 0x0000000E
+SMPP_ESME_RINVSYSID = 0x0000000F
+
+hlr = suite.hlr()
+bts = suite.bts()
+mgw_msc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+smsc = msc.smsc
+
+esme = suite.esme()
+esme_no_pwd = suite.esme()
+esme_no_pwd.set_password('')
+
+smsc.set_smsc_policy(smsc.SMSC_POLICY_CLOSED)
+smsc.esme_add(esme)
+smsc.esme_add(esme_no_pwd)
+
+stp.start()
+hlr.start()
+msc.start()
+mgw_msc.start()
+
+log('Test with correct credentials (no password)')
+esme_no_pwd.connect()
+esme_no_pwd.disconnect()
+
+log('Test with correct credentials (no password, non empty)')
+esme_no_pwd.set_password('foobar')
+esme_no_pwd.connect()
+esme_no_pwd.disconnect()
+
+log('Test with correct credentials')
+esme.connect()
+esme.disconnect()
+
+log('Test with bad password, checking for failure')
+correct_password = esme.password
+new_password = 'barfoo' if correct_password == 'foobar' else 'foobar'
+esme.set_password(new_password)
+esme.run_method_expect_failure(SMPP_ESME_RINVPASWD, esme.connect)
+esme.set_password(correct_password)
+
+log('Test with bad system_id, checking for failure')
+correct_system_id = esme.system_id
+new_system_id = 'barfoo' if correct_system_id == 'foobar' else 'foobar'
+esme.set_system_id(new_system_id)
+esme.run_method_expect_failure(SMPP_ESME_RINVSYSID, esme.connect)
+esme.set_system_id(correct_system_id)
diff --git a/suites/smpp/esme_ms_sms_storeforward.py b/suites/smpp/esme_ms_sms_storeforward.py
new file mode 100755
index 0000000..681bc29
--- /dev/null
+++ b/suites/smpp/esme_ms_sms_storeforward.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) with password previously
+# defined in its configuration file.
+# * When SMS is sent in 'store & forward' mode, ESME fails to send an SMS to non registered MS.
+# * When SMS is sent in 'store & forward' mode, ESME can send an SMS to a not yet registered MS.
+# * When SMS is sent in 'store & forward' mode, ESME can send an SMS to an already registered MS.
+# * When SMS is sent in 'store & forward' mode, ESME receives a SMS receipt if it asked for it.
+
+from osmo_gsm_tester.testenv import *
+
+SMPP_ESME_RINVDSTADR = 0x0000000B
+
+hlr = suite.hlr()
+bts = suite.bts()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+bsc.bts_add(bts)
+
+ms = suite.modem()
+esme = suite.esme()
+msc.smsc.esme_add(esme)
+
+hlr.start()
+stp.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+bts.start()
+wait(bsc.bts_is_connected, bts)
+
+esme.connect()
+hlr.subscriber_add(ms)
+
+wrong_msisdn = ms.msisdn + esme.msisdn
+print('sending sms with wrong msisdn %s, it will be stored but not delivered' % wrong_msisdn)
+msg = Sms(esme.msisdn, wrong_msisdn, 'smpp message with wrong dest')
+# Since osmo-msc 1e67fea7ba5c6336, we accept all sms in store&forward mode without looking at HLR
+# esme.run_method_expect_failure(SMPP_ESME_RINVDSTADR, esme.sms_send_wait_resp, msg, esme.MSGMODE_STOREFORWARD)
+umref_wrong = esme.sms_send_wait_resp(msg, esme.MSGMODE_STOREFORWARD, receipt=True)
+
+print('sending sms, it will be stored...')
+msg = Sms(esme.msisdn, ms.msisdn, 'smpp send not-yet-registered message')
+umref = esme.sms_send_wait_resp(msg, esme.MSGMODE_STOREFORWARD, receipt=True)
+
+print('MS registers and will receive the SMS...')
+ms.connect(msc.mcc_mnc())
+wait(ms.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms)
+wait(ms.sms_was_received, msg)
+print('Waiting to receive and consume sms receipt with reference', umref)
+wait(esme.receipt_was_received, umref)
+
+print('checking MS can receive SMS while registered...')
+msg = Sms(esme.msisdn, ms.msisdn, 'smpp send already-registered message')
+umref = esme.sms_send_wait_resp(msg, esme.MSGMODE_STOREFORWARD, receipt=True)
+wait(ms.sms_was_received, msg)
+print('Waiting to receive and consume sms receipt with reference', umref)
+wait(esme.receipt_was_received, umref)
+
+print('Asserting the sms with wrong msisdn was not delivered', umref_wrong)
+assert not esme.receipt_was_received(umref_wrong)
+
+esme.disconnect()
diff --git a/suites/smpp/esme_ms_sms_transaction.py b/suites/smpp/esme_ms_sms_transaction.py
new file mode 100755
index 0000000..16b01cc
--- /dev/null
+++ b/suites/smpp/esme_ms_sms_transaction.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+# This test checks following use-cases:
+# * SMPP interface of SMSC accepts SMPP clients (ESMEs) with password previously
+# defined in its configuration file.
+# * When SMS is sent in 'transaction' mode, ESME can send an SMS to an already registered MS.
+# * When SMS is sent in 'transaction' mode, ESME fails to send an SMS to non registered MS.
+
+from osmo_gsm_tester.testenv import *
+
+SMPP_ESME_RINVDSTADR = 0x0000000B
+
+hlr = suite.hlr()
+bts = suite.bts()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+bsc.bts_add(bts)
+
+ms = suite.modem()
+esme = suite.esme()
+msc.smsc.esme_add(esme)
+
+hlr.start()
+stp.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+bsc.start()
+bts.start()
+wait(bsc.bts_is_connected, bts)
+
+esme.connect()
+hlr.subscriber_add(ms)
+ms.connect(msc.mcc_mnc())
+
+ms.log_info()
+print('waiting for modem to attach...')
+wait(ms.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms)
+
+print('sending first sms...')
+msg = Sms(esme.msisdn, ms.msisdn, 'smpp send message')
+esme.sms_send(msg, esme.MSGMODE_TRANSACTION)
+wait(ms.sms_was_received, msg)
+
+print('sending second sms (unicode chars not in gsm aplhabet)...')
+msg = Sms(esme.msisdn, ms.msisdn, 'chars:[кизаçйж]')
+esme.sms_send(msg, esme.MSGMODE_TRANSACTION)
+wait(ms.sms_was_received, msg)
+
+wrong_msisdn = ms.msisdn + esme.msisdn
+print('sending third sms (with wrong msisdn %s)' % wrong_msisdn)
+msg = Sms(esme.msisdn, wrong_msisdn, 'smpp message with wrong dest')
+esme.run_method_expect_failure(SMPP_ESME_RINVDSTADR, esme.sms_send_wait_resp, msg, esme.MSGMODE_TRANSACTION)
+
+esme.disconnect()
diff --git a/suites/smpp/suite.conf b/suites/smpp/suite.conf
new file mode 100644
index 0000000..61e7015
--- /dev/null
+++ b/suites/smpp/suite.conf
@@ -0,0 +1,12 @@
+resources:
+ ip_address:
+ - times: 6 # msc, bsc, hlr, stp, mgw*2
+ bts:
+ - times: 1
+ modem:
+ - times: 1
+ features:
+ - sms
+
+defaults:
+ timeout: 60s
diff --git a/suites/sms/mo_mt_sms.py b/suites/sms/mo_mt_sms.py
new file mode 100755
index 0000000..7654ea6
--- /dev/null
+++ b/suites/sms/mo_mt_sms.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+hlr = suite.hlr()
+bts = suite.bts()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms_mo = suite.modem()
+ms_mt = suite.modem()
+
+hlr.start()
+stp.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+
+bsc.bts_add(bts)
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+
+hlr.subscriber_add(ms_mo)
+hlr.subscriber_add(ms_mt)
+
+ms_mo.connect(msc.mcc_mnc())
+ms_mt.connect(msc.mcc_mnc())
+
+ms_mo.log_info()
+ms_mt.log_info()
+
+print('waiting for modems to attach...')
+wait(ms_mo.is_connected, msc.mcc_mnc())
+wait(ms_mt.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms_mo, ms_mt)
+
+sms = ms_mo.sms_send(ms_mt)
+wait(ms_mt.sms_was_received, sms)
diff --git a/suites/sms/suite.conf b/suites/sms/suite.conf
new file mode 100644
index 0000000..28a81ea
--- /dev/null
+++ b/suites/sms/suite.conf
@@ -0,0 +1,9 @@
+resources:
+ ip_address:
+ - times: 6 # msc, bsc, hlr, stp, mgw*2
+ bts:
+ - times: 1
+ modem:
+ - times: 2
+ features:
+ - sms
diff --git a/suites/ussd/assert_extension.py b/suites/ussd/assert_extension.py
new file mode 100755
index 0000000..475de09
--- /dev/null
+++ b/suites/ussd/assert_extension.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+USSD_COMMAND_GET_EXTENSION = '*#100#'
+
+hlr = suite.hlr()
+bts = suite.bts()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms = suite.modem()
+
+hlr.start()
+stp.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+
+bsc.bts_add(bts)
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+
+hlr.subscriber_add(ms)
+
+ms.connect(msc.mcc_mnc())
+
+ms.log_info()
+
+print('waiting for modems to attach...')
+wait(ms.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms)
+
+# ofono (qmi) currently changes state to 'registered' jut after sending
+# 'Location Update Request', but before receiving 'Location Updating Accept'.
+# Which means we can reach lines below and send USSD code while still not being
+# attached, which will then fail. See OsmoGsmTester #2239 for more detailed
+# information.
+# Until we find an ofono fix or a better way to workaround this, let's just
+# sleep for a while in order to receive the 'Location Updating Accept' message
+# before attemting to send the USSD.
+sleep(10)
+
+print('Sending ussd code %s' % USSD_COMMAND_GET_EXTENSION)
+response = ms.ussd_send(USSD_COMMAND_GET_EXTENSION)
+log('got ussd response: %r' % repr(response))
+assert response.endswith(' ' + ms.msisdn)
diff --git a/suites/ussd/suite.conf b/suites/ussd/suite.conf
new file mode 100644
index 0000000..460147a
--- /dev/null
+++ b/suites/ussd/suite.conf
@@ -0,0 +1,9 @@
+resources:
+ ip_address:
+ - times: 6 # msc, bsc, hlr, stp, mgw*2
+ bts:
+ - times: 1
+ modem:
+ - times: 1
+ features:
+ - ussd
diff --git a/suites/voice/mo_mt_call.py b/suites/voice/mo_mt_call.py
new file mode 100755
index 0000000..aeda80d
--- /dev/null
+++ b/suites/voice/mo_mt_call.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+from osmo_gsm_tester.testenv import *
+
+hlr = suite.hlr()
+bts = suite.bts()
+mgw_msc = suite.mgw()
+mgw_bsc = suite.mgw()
+stp = suite.stp()
+msc = suite.msc(hlr, mgw_msc, stp)
+bsc = suite.bsc(msc, mgw_bsc, stp)
+ms_mo = suite.modem()
+ms_mt = suite.modem()
+
+hlr.start()
+stp.start()
+msc.start()
+mgw_msc.start()
+mgw_bsc.start()
+
+bsc.bts_add(bts)
+bsc.start()
+
+bts.start()
+wait(bsc.bts_is_connected, bts)
+
+hlr.subscriber_add(ms_mo)
+hlr.subscriber_add(ms_mt)
+
+ms_mo.connect(msc.mcc_mnc())
+ms_mt.connect(msc.mcc_mnc())
+
+ms_mo.log_info()
+ms_mt.log_info()
+
+print('waiting for modems to attach...')
+wait(ms_mo.is_connected, msc.mcc_mnc())
+wait(ms_mt.is_connected, msc.mcc_mnc())
+wait(msc.subscriber_attached, ms_mo, ms_mt)
+
+assert len(ms_mo.call_id_list()) == 0 and len(ms_mt.call_id_list()) == 0
+mo_cid = ms_mo.call_dial(ms_mt)
+mt_cid = ms_mt.call_wait_incoming(ms_mo)
+print('dial success')
+
+assert not ms_mo.call_is_active(mo_cid) and not ms_mt.call_is_active(mt_cid)
+ms_mt.call_answer(mt_cid)
+wait(ms_mo.call_is_active, mo_cid)
+wait(ms_mt.call_is_active, mt_cid)
+print('answer success, call established and ongoing')
+
+sleep(5) # maintain the call active for 5 seconds
+
+assert ms_mo.call_is_active(mo_cid) and ms_mt.call_is_active(mt_cid)
+ms_mt.call_hangup(mt_cid)
+wait(lambda: len(ms_mo.call_id_list()) == 0 and len(ms_mt.call_id_list()) == 0)
+print('hangup success')
diff --git a/suites/voice/suite.conf b/suites/voice/suite.conf
new file mode 100644
index 0000000..522b1ca
--- /dev/null
+++ b/suites/voice/suite.conf
@@ -0,0 +1,9 @@
+resources:
+ ip_address:
+ - times: 6 # msc, bsc, hlr, stp, mgw*2
+ bts:
+ - times: 1
+ modem:
+ - times: 2
+ features:
+ - voice
diff --git a/ttcn3/README.txt b/ttcn3/README.txt
new file mode 100644
index 0000000..3886c80
--- /dev/null
+++ b/ttcn3/README.txt
@@ -0,0 +1,20 @@
+This directory contains a set of scripts and osmo-gsm-tester testsuites to run
+osmo-ttcn3-hacks.git BTS_tests.ttcn (https://git.osmocom.org/osmo-ttcn3-hacks/tree/bts).
+
+The idea is to set up automatically the following components:
+TTCN3 <-> osmocon (osmocom-bb) <-> motorola C123 <-> RF network <-> BTS_TO_TEST <-> TTCN3 + osmo-bsc
+
+* A jenkins job builds a docker image containing a built BTS_tests TTCN testsuite.
+* Another jenkins job retrieves the artifacts from osmo-gsm-tester-build jobs
+ plus one for required osmocon binary. This job then calls osmo-gsm-tester/ttcn3/jenkins-run.sh, which will:
+** Pull the above mentioned docker image containing BTS_Tests.
+** Start osmo-gsm-tester with OSMO_GSM_TESTER_OPTS=osmo-gsm-tester/ttcn3/paths.conf,
+ that contains mostly same stuff as regular osmo-gsm-tester jobs, but with a
+ different testsuite containing 1 test "ttcn3_bts_tests.py".
+** The test "ttcn3_bts_tests.py" does the following:
+*** Start and manage all osmocom required components to run BTS_Tests: osmo-bts, osmo-bsc, osmocon, etc.
+*** Generate the BTS_Tests.cfg required by BTS_Tests from a template to adapt to dynamic bits set by osmo-gsm-tester.
+*** Launch script osmo-gsm-tester/ttcn3/suites/ttcn3_bts_tests/scripts/run_ttcn3_docker.sh with parameters and wait for it to finish.
+ This script will start and manage the lifecycle of the docker container running BTS_Tests
+
+See OS#3155 for more information regarding this topic.
diff --git a/ttcn3/default-suites.conf b/ttcn3/default-suites.conf
new file mode 100644
index 0000000..cfa46b4
--- /dev/null
+++ b/ttcn3/default-suites.conf
@@ -0,0 +1,2 @@
+- ttcn3_bts_tests:trx
+- ttcn3_bts_tests:sysmo
diff --git a/ttcn3/defaults.conf b/ttcn3/defaults.conf
new file mode 120000
index 0000000..e47699d
--- /dev/null
+++ b/ttcn3/defaults.conf
@@ -0,0 +1 @@
+../example/defaults.conf
\ No newline at end of file
diff --git a/ttcn3/jenkins-run.sh b/ttcn3/jenkins-run.sh
new file mode 100755
index 0000000..c744606
--- /dev/null
+++ b/ttcn3/jenkins-run.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+set -e -x
+base="$PWD"
+
+time_start="$(date '+%F %T')"
+
+prepare_docker() {
+ OLDPWD=$PWD
+
+ # update docker-playground and update the BSC and bsc-test containers (if needed)
+ DIR=~/jenkins/docker-playground
+ if [ ! -d "$DIR" ]; then
+ mkdir -p ~/jenkins/ && cd ~/jenkins
+ git clone git://git.osmocom.org/docker-playground
+ fi
+ cd $DIR
+ git remote prune origin; git fetch; git checkout -f -B master origin/master
+ cd $DIR/debian-stretch-titan && make
+ docker pull laforge/debian-stretch-titan:latest # HACK
+ cd $DIR/ttcn3-bts-test && make
+ # execute the script to start containers, read results, ...
+ #cd $DIR/ttcn3-bts-test && sh -x ./jenkins.sh
+ PWD=$OLDPWD
+}
+
+docker pull registry.sysmocom.de/ttcn3-bts-test
+
+# remove older trial dirs and *-run.tgz, if any
+trial_dir_prefix="trial-"
+rm -rf "$trial_dir_prefix"* || true
+
+# Expecting *.tgz artifacts to be copied to this workspace from the various
+# jenkins-*.sh runs, via jenkins job configuration. Compose a trial dir:
+trial_dir="${trial_dir_prefix}$BUILD_NUMBER"
+mkdir -p "$trial_dir"
+
+mv *.tgz "$trial_dir"
+cat *.md5 >> "$trial_dir/checksums.md5"
+rm *.md5
+
+# OSMO_GSM_TESTER_OPTS is a way to pass in e.g. logging preferences from the
+# jenkins build job.
+# On failure, first clean up below and then return the exit code.
+exit_code="1"
+if python3 -u "$(which osmo-gsm-tester.py)" "$trial_dir" $OSMO_GSM_TESTER_OPTS ; then
+ exit_code="0"
+fi
+
+# no need to keep extracted binaries
+rm -rf "$trial_dir/inst" || true
+
+# tar up all results for archiving (optional)
+cd "$trial_dir"
+journalctl -u ofono -o short-precise --since "${time_start}" > "$(readlink last_run)/ofono.log"
+tar czf "$base/${trial_dir}-run.tgz" "$(readlink last_run)"
+tar czf "$base/${trial_dir}-bin.tgz" *.md5 *.tgz
+
+exit $exit_code
diff --git a/ttcn3/paths.conf b/ttcn3/paths.conf
new file mode 100644
index 0000000..27c5818
--- /dev/null
+++ b/ttcn3/paths.conf
@@ -0,0 +1,3 @@
+state_dir: '/var/tmp/osmo-gsm-tester/state'
+suites_dir: './suites'
+scenarios_dir: './scenarios'
diff --git a/ttcn3/resources.conf.prod b/ttcn3/resources.conf.prod
new file mode 120000
index 0000000..3e40e89
--- /dev/null
+++ b/ttcn3/resources.conf.prod
@@ -0,0 +1 @@
+../example/resources.conf.prod
\ No newline at end of file
diff --git a/ttcn3/resources.conf.rnd b/ttcn3/resources.conf.rnd
new file mode 120000
index 0000000..6f98474
--- /dev/null
+++ b/ttcn3/resources.conf.rnd
@@ -0,0 +1 @@
+../example/resources.conf.rnd
\ No newline at end of file
diff --git a/ttcn3/scenarios/sysmo.conf b/ttcn3/scenarios/sysmo.conf
new file mode 120000
index 0000000..5374081
--- /dev/null
+++ b/ttcn3/scenarios/sysmo.conf
@@ -0,0 +1 @@
+../../example/scenarios/sysmo.conf
\ No newline at end of file
diff --git a/ttcn3/scenarios/trx.conf b/ttcn3/scenarios/trx.conf
new file mode 120000
index 0000000..d72ddb2
--- /dev/null
+++ b/ttcn3/scenarios/trx.conf
@@ -0,0 +1 @@
+../../example/scenarios/trx.conf
\ No newline at end of file
diff --git a/ttcn3/suites/ttcn3_bts_tests/scripts/BTS_Tests.cfg.tmpl b/ttcn3/suites/ttcn3_bts_tests/scripts/BTS_Tests.cfg.tmpl
new file mode 100644
index 0000000..03d2721
--- /dev/null
+++ b/ttcn3/suites/ttcn3_bts_tests/scripts/BTS_Tests.cfg.tmpl
@@ -0,0 +1,29 @@
+[ORDERED_INCLUDE]
+"/osmo-ttcn3-hacks/Common.cfg"
+"/osmo-ttcn3-hacks/bts/BTS_Tests.default"
+
+[LOGGING]
+
+[TESTPORT_PARAMETERS]
+*.BTSVTY.CTRL_HOSTNAME := "${btsvty_ctrl_hostname}"
+
+[MODULE_PARAMETERS]
+BTS_Tests.mp_rsl_ip := "172.18.9.10"
+BTS_Tests.mp_bb_trxc_ip := "127.0.0.1"
+%if pcu_available:
+BTS_Tests.mp_pcu_socket := "/data/unix_pcu/pcu_bts"
+%else:
+BTS_Tests.mp_pcu_socket := ""
+%endif
+BTS_Tests.mp_bb_trxc_port := -1
+L1CTL_PortType.m_l1ctl_sock_path := "/data/unix_l2/osmocom_l2"
+BTS_Tests.mp_ctrl_ip := "${btsvty_ctrl_hostname}"
+BTS_Tests.mp_rxlev_exp := 1
+BTS_Tests.mp_tolerance_rxlev := 10;
+BTS_Tests.mp_tolerance_rxqual := 1;
+BTS_Tests.mp_trx0_arfcn := 868
+
+[MAIN_CONTROLLER]
+
+[EXECUTE]
+BTS_Tests.control
diff --git a/ttcn3/suites/ttcn3_bts_tests/scripts/run_ttcn3_docker.sh b/ttcn3/suites/ttcn3_bts_tests/scripts/run_ttcn3_docker.sh
new file mode 100755
index 0000000..daac3e0
--- /dev/null
+++ b/ttcn3/suites/ttcn3_bts_tests/scripts/run_ttcn3_docker.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+set -x
+
+RUNDIR="$1"
+JUNIT_TTCN3_DST_FILE="$2"
+BSC_RSL_ADDR="$3"
+L2_SOCKET_PATH="$4"
+PCU_SOCKET_PATH="$5"
+
+# Absolute path to this script
+SCRIPT=$(readlink -f "$0")
+# Absolute path this script is in
+SCRIPTPATH=$(dirname "$SCRIPT")
+
+VOL_BASE_DIR="$RUNDIR/logs"
+rm -rf "$VOL_BASE_DIR"
+mkdir -p "$VOL_BASE_DIR"
+
+if [ "x$BUILD_TAG" = "x" ]; then
+ BUILD_TAG=nonjenkins
+fi
+
+REPO_USER="registry.sysmocom.de"
+SUITE_NAME="ttcn3-bts-test"
+NET_NAME=$SUITE_NAME
+DOCKER_NAME="$BUILD_TAG-$SUITE_NAME"
+
+network_create() {
+ NET=$1
+ echo Creating network $NET_NAME
+ docker network create --subnet $NET $NET_NAME
+}
+
+network_remove() {
+ echo Removing network $NET_NAME
+ docker network remove $NET_NAME
+}
+
+child_ps=0
+forward_kill() {
+ sig="$1"
+ echo "Caught signal SIG$sig!"
+ if [ "$child_ps" != "0" ]; then
+ echo "Killing $child_ps with SIG$sig!"
+ docker kill ${DOCKER_NAME}
+ fi
+ exit 130
+}
+forward_kill_int() {
+ forward_kill "INT"
+}
+forward_kill_term() {
+ forward_kill "TERM"
+}
+# Don't use 'set -e', otherwise traps are not triggered!
+trap forward_kill_int INT
+trap forward_kill_term TERM
+
+network_create 172.18.9.0/24
+
+mkdir $VOL_BASE_DIR/bts-tester
+echo "SCRIPTPATH=$SCRIPTPATH PWD=$PWD"
+cp $RUNDIR/BTS_Tests.cfg $VOL_BASE_DIR/bts-tester/
+
+echo Starting container with BTS testsuite
+docker kill ${DOCKER_NAME}
+if [ "x$PCU_SOCKET_PATH" != "x" ]; then
+ MOUNT_PCU_SOCKET_OPT="--mount type=bind,source=$(dirname "$PCU_SOCKET_PATH"),destination=/data/unix_pcu"
+else
+ MOUNT_PCU_SOCKET_OPT=""
+fi
+docker run --rm \
+ --network $NET_NAME --ip 172.18.9.10 \
+ -p ${BSC_RSL_ADDR}:3003:3003 \
+ -e "TTCN3_PCAP_PATH=/data" \
+ --mount type=bind,source=$VOL_BASE_DIR/bts-tester,destination=/data \
+ --mount type=bind,source="$(dirname "$L2_SOCKET_PATH")",destination=/data/unix_l2 \
+ $MOUNT_PCU_SOCKET_OPT \
+ --name ${DOCKER_NAME} \
+ $REPO_USER/${SUITE_NAME} &
+child_ps=$!
+echo "$$: waiting for $child_ps"
+wait "$child_ps"
+child_exit_code="$?"
+echo "ttcn3 docker exited with code $child_exit_code"
+
+network_remove
+
+echo "Copying TTCN3 junit file to $JUNIT_TTCN3_DST_FILE"
+cp $VOL_BASE_DIR/bts-tester/junit-xml-*.log $JUNIT_TTCN3_DST_FILE
+sed -i "s#classname='BTS_Tests'#classname='$(basename $JUNIT_TTCN3_DST_FILE)'#g" $JUNIT_TTCN3_DST_FILE
+
+exit $child_exit_code
diff --git a/ttcn3/suites/ttcn3_bts_tests/suite.conf b/ttcn3/suites/ttcn3_bts_tests/suite.conf
new file mode 100644
index 0000000..2bb0cac
--- /dev/null
+++ b/ttcn3/suites/ttcn3_bts_tests/suite.conf
@@ -0,0 +1,7 @@
+resources:
+ ip_address:
+ - times: 8 # msc, bsc, hlr, stp, mgw, sgsn, ggsn
+ bts:
+ - times: 1
+ osmocon_phone:
+ - times: 1
diff --git a/ttcn3/suites/ttcn3_bts_tests/ttcn3_bts_tests.py b/ttcn3/suites/ttcn3_bts_tests/ttcn3_bts_tests.py
new file mode 100755
index 0000000..2684bf7
--- /dev/null
+++ b/ttcn3/suites/ttcn3_bts_tests/ttcn3_bts_tests.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+import os
+from mako.template import Template
+
+from osmo_gsm_tester.testenv import *
+
+hlr_dummy = suite.hlr()
+mgw_dummy = suite.mgw()
+stp_dummy = suite.stp()
+msc_dummy = suite.msc(hlr_dummy, mgw_dummy, stp_dummy)
+ggsn_dummy = suite.ggsn()
+sgsn_dummy = suite.sgsn(hlr_dummy, ggsn_dummy)
+bsc = suite.bsc(msc_dummy, mgw_dummy, stp_dummy)
+bts = suite.bts()
+osmocon = suite.osmocon()
+
+bts.set_num_trx(1)
+bts.set_trx_phy_channel(0, 0, 'CCCH+SDCCH4')
+bts.set_trx_phy_channel(0, 1, 'TCH/F')
+bts.set_trx_phy_channel(0, 2, 'TCH/F')
+bts.set_trx_phy_channel(0, 3, 'TCH/F_PDCH')
+bts.set_trx_phy_channel(0, 4, 'TCH/F_TCH/H_PDCH')
+bts.set_trx_phy_channel(0, 5, 'TCH/H')
+bts.set_trx_phy_channel(0, 6, 'SDCCH8')
+bts.set_trx_phy_channel(0, 7, 'PDCH')
+
+print('Starting CNI')
+hlr_dummy.start()
+stp_dummy.start()
+msc_dummy.start()
+mgw_dummy.start()
+
+nat_rsl_ip = suite.ip_address().get('addr')
+bsc.set_rsl_ip(nat_rsl_ip)
+bsc.bts_add(bts)
+sgsn_dummy.bts_add(bts)
+
+bsc.start()
+bts.start(keepalive=True)
+
+print('Starting osmocon')
+osmocon.start()
+
+own_dir = os.path.dirname(os.path.realpath(__file__))
+script_file = os.path.join(own_dir, 'scripts', 'run_ttcn3_docker.sh')
+bts_tmpl_file = os.path.join(own_dir, 'scripts', 'BTS_Tests.cfg.tmpl')
+script_run_dir = test.get_run_dir().new_dir('ttcn3')
+bts_cfg_file = os.path.join(str(script_run_dir), 'BTS_Tests.cfg')
+junit_ttcn3_dst_file = os.path.join(str(suite.trial.get_run_dir()), 'trial-') + suite.name() + '.xml'
+if bts.bts_type() == 'osmo-bts-trx':
+ pcu_available = True
+ pcu_sk = bts.pcu_socket_path()
+else: # PCU unix socket not available locally
+ pcu_available = False
+ pcu_sk = ''
+docker_cmd = (script_file, str(script_run_dir), junit_ttcn3_dst_file, nat_rsl_ip, osmocon.l2_socket_path(), pcu_sk)
+
+print('Creating template')
+mytemplate = Template(filename=bts_tmpl_file)
+r = mytemplate.render(btsvty_ctrl_hostname=bts.remote_addr(), pcu_available=pcu_available)
+with open(bts_cfg_file, 'w') as f:
+ f.write(r)
+
+
+print('Starting TTCN3 tests')
+proc = process.Process('ttcn3', script_run_dir, docker_cmd)
+try:
+ proc.launch()
+ print('Starting TTCN3 launched, waiting until it finishes')
+ proc.wait(timeout=3600)
+except Exception as e:
+ proc.terminate()
+ raise e
+
+if proc.result != 0:
+ raise RuntimeError("run_ttcn3_docker.sh exited with error code %d" % proc.result)
+
+print('Done')
diff --git a/update_version.sh b/update_version.sh
new file mode 100755
index 0000000..3d5fe42
--- /dev/null
+++ b/update_version.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -e
+git describe --abbrev=8 --dirty | sed 's/v\([^-]*\)-\([^-]*\)-\(.*\)/\1.dev\2.\3/' > version
+cat version
+echo "# osmo-gsm-tester version.
+# Automatically generated by update_version.sh.
+# Gets imported by __init__.py.
+
+_version = '$(cat version)'" \
+ > src/osmo_gsm_tester/_version.py
diff --git a/utils/modem-netns-setup.py b/utils/modem-netns-setup.py
new file mode 100755
index 0000000..e0645d1
--- /dev/null
+++ b/utils/modem-netns-setup.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+# Pau Espin Pedrol <pespin@sysmocom.de>
+# MIT
+
+# manage netns for ofono modems
+
+import os
+import sys
+import subprocess
+import usb.core
+import usb.util
+from pprint import pprint
+
+def get_path_ids(bus, port_numbers):
+ port_numbers = [str(port) for port in port_numbers]
+ ports = '.'.join(port_numbers)
+ return '{}-{}'.format(bus, ports)
+
+def get_usb_dir(bus, port_numbers):
+ return '/sys/bus/usb/devices/' + get_path_ids(bus, port_numbers) + '/'
+
+def get_net_from_usb(bus, port_numbers):
+ net_ifaces = []
+ path = get_usb_dir(bus, port_numbers)
+ path_ids = get_path_ids(bus, port_numbers)
+
+ usb_interfaces = [f for f in os.listdir(path) if f.startswith(path_ids)]
+ for usb_iface in usb_interfaces:
+ listdir = [f for f in os.listdir(path + usb_iface) if f == ('net')]
+ if listdir:
+ # found a net iface
+ net_ifaces += os.listdir(path + usb_iface + '/net/')
+ return net_ifaces
+
+def move_modem_to_netns(usb_path_id, net_li):
+
+ if len(net_li) == 0:
+ print("%s: Device has no net ifaces, skipping" %(usb_path_id))
+ return
+
+ if not os.path.exists("/var/run/netns/%s" % usb_path_id):
+ print("%s: Creating netns" % (usb_path_id))
+ subprocess.check_call(["ip", "netns", "add", usb_path_id])
+ else:
+ print("%s: netns already exists" % (usb_path_id))
+
+ for netif in net_li:
+ print("%s: Moving iface %s to netns" % (usb_path_id, netif))
+ subprocess.check_call(["ip", "link", "set", netif, "netns", usb_path_id])
+ # iface Must be set up AFTER pdp ctx is activated, otherwise we get no DHCP response.
+ #print("%s: Setting up iface %s" % (usb_path_id, netif))
+ #subprocess.check_call(["ip", "netns", "exec", usb_path_id, "ip", "link", "set", "dev", netif, "up"])
+ #subprocess.check_call(["ip", "netns", "exec", usb_path_id, "udhcpc", "-i", netif])
+
+def delete_modem_netns(usb_path_id):
+ if os.path.exists("/var/run/netns/%s" % usb_path_id):
+ print("%s: Deleting netns" % (usb_path_id))
+ subprocess.check_call(["ip", "netns", "delete", usb_path_id])
+ else:
+ print("%s: netns doesn't exist" % (usb_path_id))
+
+def print_help():
+ print("Usage: %s start|stop" % sys.argv[0])
+ exit(1)
+
+
+if __name__ == '__main__':
+
+ if len(sys.argv) != 2:
+ print_help()
+
+ USB_DEVS = [dev for dev in usb.core.find(find_all=True)]
+ RESULT = {}
+ for device in USB_DEVS:
+ result = {}
+ if not device.port_numbers:
+ continue
+
+ usb_path_id = get_path_ids(device.bus, device.port_numbers)
+ net_li = get_net_from_usb(device.bus, device.port_numbers)
+
+ if sys.argv[1] == "start":
+ move_modem_to_netns(usb_path_id, net_li)
+ elif sys.argv[1] == "stop":
+ delete_modem_netns(usb_path_id)
+ else:
+ print_help()
diff --git a/utils/osmo-gsm-tester_netns_exec.sh b/utils/osmo-gsm-tester_netns_exec.sh
new file mode 100755
index 0000000..336b746
--- /dev/null
+++ b/utils/osmo-gsm-tester_netns_exec.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+netns="$1"
+shift
+#TODO: Later on I may want to call myself with specific ENV and calling sudo in order to run inside the netns but with dropped privileges
+ip netns exec $netns "$@"
diff --git a/utils/osmo-gsm-tester_setcap_net_admin.sh b/utils/osmo-gsm-tester_setcap_net_admin.sh
new file mode 100755
index 0000000..60e527a
--- /dev/null
+++ b/utils/osmo-gsm-tester_setcap_net_admin.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+/sbin/setcap cap_net_admin+ep "$1"
diff --git a/utils/osmo-gsm-tester_setcap_net_raw.sh b/utils/osmo-gsm-tester_setcap_net_raw.sh
new file mode 100755
index 0000000..1f3a727
--- /dev/null
+++ b/utils/osmo-gsm-tester_setcap_net_raw.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+/sbin/setcap cap_net_raw+ep "$1"
diff --git a/utils/show_usb_device.py b/utils/show_usb_device.py
new file mode 100755
index 0000000..9136234
--- /dev/null
+++ b/utils/show_usb_device.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+# Alexander Couzens <lynxis@fe80.eu>
+# MIT
+
+# show usb device with their net and serial devices
+
+import os
+import usb.core
+import usb.util
+from pprint import pprint
+
+def get_path_ids(bus, port_numbers):
+ port_numbers = [str(port) for port in port_numbers]
+ ports = '.'.join(port_numbers)
+ return '{}-{}'.format(bus, ports)
+
+def get_usb_dir(bus, port_numbers):
+ return '/sys/bus/usb/devices/' + get_path_ids(bus, port_numbers) + '/'
+
+def get_usbmisc_from_usb(bus, port_numbers):
+ usbmisc_ifaces = []
+ path = get_usb_dir(bus, port_numbers)
+ path_ids = get_path_ids(bus, port_numbers)
+
+ usb_interfaces = [f for f in os.listdir(path) if f.startswith(path_ids)]
+ for usb_iface in usb_interfaces:
+ listdir = [f for f in os.listdir(path + usb_iface) if f == ('usbmisc')]
+ if listdir:
+ # found a net iface
+ usbmisc_ifaces += os.listdir(path + usb_iface + '/usbmisc/')
+ return usbmisc_ifaces
+
+def get_net_from_usb(bus, port_numbers):
+ net_ifaces = []
+ path = get_usb_dir(bus, port_numbers)
+ path_ids = get_path_ids(bus, port_numbers)
+
+ usb_interfaces = [f for f in os.listdir(path) if f.startswith(path_ids)]
+ for usb_iface in usb_interfaces:
+ listdir = [f for f in os.listdir(path + usb_iface) if f == ('net')]
+ if listdir:
+ # found a net iface
+ net_ifaces += os.listdir(path + usb_iface + '/net/')
+ return net_ifaces
+
+def get_serial_from_usb(bus, port_numbers):
+ serial_ifaces = []
+ path = get_usb_dir(bus, port_numbers)
+ path_ids = get_path_ids(bus, port_numbers)
+
+ usb_interfaces = [f for f in os.listdir(path) if f.startswith(path_ids)]
+ for usb_iface in usb_interfaces:
+ serial_ifaces += [f for f in os.listdir(path + usb_iface) if f.startswith('tty')]
+ return serial_ifaces
+
+def get_product(bus, port_numbers):
+ usb_dir = get_usb_dir(bus, port_numbers)
+ try:
+ product = open(os.path.join(usb_dir, 'product')).read().strip()
+ except OSError as exp:
+ product = "Unknown"
+ return product
+
+def get_manuf(bus, port_numbers):
+ usb_dir = get_usb_dir(bus, port_numbers)
+ try:
+ manuf = open(os.path.join(usb_dir, 'manufacturer')).read().strip()
+ except OSError:
+ manuf = "Unknown"
+ return manuf
+
+def get_name(bus, port_numbers):
+ manuf = get_manuf(bus, port_numbers)
+ product = get_product(bus, port_numbers)
+ return "%s %s" % (manuf, product)
+
+if __name__ == '__main__':
+ USB_DEVS = [dev for dev in usb.core.find(find_all=True)]
+ RESULT = {}
+ for device in USB_DEVS:
+ result = {}
+ if not device.port_numbers:
+ continue
+
+ # retrieve manuf + product from /sys because non-root user can not ask the usb device
+ result['name'] = get_name(device.bus, device.port_numbers)
+ result['path'] = get_usb_dir(device.bus, device.port_numbers)
+ result['net'] = get_net_from_usb(device.bus, device.port_numbers)
+ result['cdc'] = get_usbmisc_from_usb(device.bus, device.port_numbers)
+ result['serial'] = get_serial_from_usb(device.bus, device.port_numbers)
+
+ # only show device which have serial or net devices
+ if result['net'] or result['serial']:
+ RESULT[device] = result
+
+ pprint(RESULT)
+