diff -Nru testrepository-0.0.5/BSD testrepository-0.0.18/BSD --- testrepository-0.0.5/BSD 2009-12-18 04:04:01.000000000 +0000 +++ testrepository-0.0.18/BSD 2013-01-12 10:14:25.000000000 +0000 @@ -9,18 +9,18 @@ 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -3. Neither the name of Robert Collins nor the names of Subunit contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. +3. Neither the name of Robert Collins nor the names of Testrepository + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS'' -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND TESTREPOSITORY CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff -Nru testrepository-0.0.5/.bzrignore testrepository-0.0.18/.bzrignore --- testrepository-0.0.5/.bzrignore 2010-01-10 11:15:10.000000000 +0000 +++ testrepository-0.0.18/.bzrignore 2013-01-12 10:29:01.000000000 +0000 @@ -3,3 +3,6 @@ test.xml build .testrepository +__pycache__ +testrepository.egg-info +./doc/_build diff -Nru testrepository-0.0.5/COPYING testrepository-0.0.18/COPYING --- testrepository-0.0.5/COPYING 2010-01-10 11:13:50.000000000 +0000 +++ testrepository-0.0.18/COPYING 2013-07-15 11:43:40.000000000 +0000 @@ -28,6 +28,8 @@ for distributions such as Debian that wish to list all the copyright holders in their metadata: * Robert Collins , 2009 +* Hewlett-Packard Development Company, L.P., 2013 +* IBM Corp., 2013 Code that has been incorporated into Testrepository from other projects will diff -Nru testrepository-0.0.5/debian/changelog testrepository-0.0.18/debian/changelog --- testrepository-0.0.5/debian/changelog 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/changelog 2014-07-17 16:27:15.000000000 +0000 @@ -1,3 +1,74 @@ +testrepository (0.0.18-1ubuntu2) precise; urgency=low + + * No-change backport to precise + + -- Lars Butler (larsbutler) Thu, 17 Jul 2014 16:25:35 +0000 + +testrepository (0.0.18-1ubuntu1) trusty; urgency=low + + * New upstream release. + + -- Chuck Short Wed, 27 Nov 2013 18:48:51 -0500 + +testrepository (0.0.17-1ubuntu1) trusty; urgency=low + + * Merge from Debian unstable. Remaining changes: + - Add python3 support. + + -- Chuck Short Mon, 28 Oct 2013 11:37:29 -0400 + +testrepository (0.0.17-1) unstable; urgency=low + + * New upstream release. + + -- Thomas Goirand Sat, 20 Jul 2013 06:09:43 +0000 + +testrepository (0.0.17-0ubuntu1) saucy; urgency=low + + * New upstream release. + + -- Chuck Short Fri, 06 Sep 2013 22:50:17 -0400 + +testrepository (0.0.15-0ubuntu3) saucy; urgency=low + + * debian/patches/fix-python3-print.patch: Fix python3 bug. + + -- Chuck Short Tue, 09 Jul 2013 10:39:51 -0500 + +testrepository (0.0.15-0ubuntu2) saucy; urgency=low + + * Build for python2/python3. + + -- Chuck Short Thu, 13 Jun 2013 08:56:31 -0500 + +testrepository (0.0.15-0ubuntu1) saucy; urgency=low + + * New upstream release. + + -- Chuck Short Thu, 06 Jun 2013 10:51:25 -0500 + +testrepository (0.0.14-2) unstable; urgency=low + + * Added missing build-depends: python-setuptools. + * Ran wrap-and-sort to clean debian/control. + * Removed X-Python-Version: >= 2.6 which now makes no sense. + + -- Thomas Goirand Sat, 01 Jun 2013 06:27:20 +0000 + +testrepository (0.0.14-1) unstable; urgency=low + + * New upstream release. + * Now using format 1.0 parsable debian/copyright. + * Switched from CDBS to dh_python2. + * Now using compat and debhelper 9. + * Now using python module team as maintainer. Added myself as uploader. + * Switching to 3.0 (quilt) format. + * Bumped Standard-Version to 3.9.4. + * Fixed Homepage: filed to use the pypi website. + * Added a watch file. + + -- Thomas Goirand Fri, 22 Feb 2013 14:28:42 +0000 + testrepository (0.0.5-1.1) unstable; urgency=low * Non-maintainer upload. @@ -42,3 +113,4 @@ * New upstream release. -- Robert Collins Sun, 10 Jan 2010 22:16:09 +1100 + diff -Nru testrepository-0.0.5/debian/compat testrepository-0.0.18/debian/compat --- testrepository-0.0.5/debian/compat 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/compat 2013-09-07 05:43:40.000000000 +0000 @@ -1 +1 @@ -7 +9 diff -Nru testrepository-0.0.5/debian/control testrepository-0.0.18/debian/control --- testrepository-0.0.5/debian/control 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/control 2013-09-07 05:43:40.000000000 +0000 @@ -1,27 +1,34 @@ Source: testrepository -Maintainer: Robert Collins +Maintainer: Ubuntu Developers +XSBC-Original-Maintainer: Debian Python Modules Team +Uploaders: Robert Collins , + Thomas Goirand Section: python Priority: optional -Standards-Version: 3.8.4 -Build-Depends-Indep: - python-central (>= 0.6.7) -Build-Depends: - cdbs (>= 0.4.51), - debhelper (>= 7), - python (>= 2.4), - python-testtools, - python-testresources, - python-testscenarios, - python-subunit -XS-Python-Version: all -Homepage: https://launchpad.net/testrepository +Build-Depends: debhelper (>= 9), + python-all (>= 2.6.6-3~), + python-fixtures, + python-setuptools, + python-subunit, + python-testresources, + python-testscenarios, + python-testtools, + python3-all, + python3-fixtures, + python3-setuptools, + python3-testresources, + python3-testscenarios, + python3-testtools, + python3-subunit +Standards-Version: 3.9.4 +Homepage: https://pypi.python.org/pypi/testrepository Package: testrepository Architecture: all XB-Python-Version: ${python:Versions} -Depends: ${python:Depends}, - ${misc:Depends}, - python-testrepository (>= ${source:Version}) +Depends: python-testrepository (>= ${source:Version}) | python3-testrepository (>= ${source:Version}), + ${misc:Depends}, + ${python:Depends} Provides: ${python:Provides} Description: Test result manager Testrepository provides a database of test results and supports easy workflows @@ -36,11 +43,25 @@ Package: python-testrepository Architecture: all XB-Python-Version: ${python:Versions} -Depends: ${python:Depends}, - ${misc:Depends}, - python-subunit +Depends: python-subunit, ${misc:Depends}, ${python:Depends} Provides: ${python:Provides} -Description: Database of test results - python library +Description: Database of test results - python library (python2) + Testrepository provides a database of test results and supports easy workflows + to be built on top of that database. For instance, running just failing tests + or getting the last test run back to examine again (without running the tests + again). Testrepository is compatible with any test suite that can output + subunit. This includes any TAP test suite and any pyunit compatible test + suite. + . + The python-testrepository package contains the Python testrepository + library, which can be used for programmatic access to the database. + +Package: python3-testrepository +Architecture: all +XB-Python-Version: ${python3:Versions} +Depends: python3-subunit, ${misc:Depends}, ${python3:Depends} +Provides: ${python3:Provides} +Description: Database of test results - python library (python3) Testrepository provides a database of test results and supports easy workflows to be built on top of that database. For instance, running just failing tests or getting the last test run back to examine again (without running the tests diff -Nru testrepository-0.0.5/debian/copyright testrepository-0.0.18/debian/copyright --- testrepository-0.0.5/debian/copyright 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/copyright 2013-09-07 05:43:40.000000000 +0000 @@ -1,16 +1,64 @@ -This is python-testrepository, packaged for debian by Robert Collins. +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: testrepository +Upstream-Contact: Robert Collins +Source: https://launchpad.net/testrepository -Homepage is https://launchpad.net/testrepository +Files: debian/* +Copyright: (c) 2010-2011, Robert Collins + (c) 2011, Jakub Wilk + (c) 2013, Thomas Goirand +License: Apache-2.0-or-BSD-3-clauses -Copyright (c) 2009, 2010 Robert Collins +Files: * +Copyright: (c) 2009-2013, Robert Collins +License: Apache-2.0 -Licensed under either the Apache License, Version 2.0 or the BSD 3-clause -license at the users choice. A copy of both licenses are available in the -project source as Apache-2.0 and BSD. You may not use this file except in -compliance with one of these two licences. - -Unless required by applicable law or agreed to in writing, software -distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -license you chose for the specific language governing permissions and -limitations under that license. +License: Apache-2.0-or-BSD-3-clauses + Licensed under either the Apache License, Version 2.0 or the BSD 3-clause + license at the users choice. A copy of both licenses are available in the + project source as Apache-2.0 and BSD. + . + Apache-2.0 license: + . + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian-based systems the full text of the Apache version 2.0 license + can be found in `/usr/share/common-licenses/Apache-2.0'. + . + BSD License: + . + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + . + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + . + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + . + 3. Neither the name of Robert Collins nor the names of Subunit contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + . + THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS'' + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. diff -Nru testrepository-0.0.5/debian/pycompat testrepository-0.0.18/debian/pycompat --- testrepository-0.0.5/debian/pycompat 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/pycompat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -2 diff -Nru testrepository-0.0.5/debian/python3-testrepository.install testrepository-0.0.18/debian/python3-testrepository.install --- testrepository-0.0.5/debian/python3-testrepository.install 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/debian/python3-testrepository.install 2013-06-13 13:53:32.000000000 +0000 @@ -0,0 +1 @@ +usr/lib/python3* diff -Nru testrepository-0.0.5/debian/python-testrepository.install testrepository-0.0.18/debian/python-testrepository.install --- testrepository-0.0.5/debian/python-testrepository.install 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/python-testrepository.install 2013-09-07 05:43:40.000000000 +0000 @@ -1 +1 @@ -usr/lib/* +usr/lib/python2* diff -Nru testrepository-0.0.5/debian/rules testrepository-0.0.18/debian/rules --- testrepository-0.0.5/debian/rules 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/rules 2013-09-07 05:43:40.000000000 +0000 @@ -1,5 +1,33 @@ #!/usr/bin/make -f -include /usr/share/cdbs/1/rules/debhelper.mk -DEB_PYTHON_SYSTEM = pycentral -include /usr/share/cdbs/1/class/python-distutils.mk +PYTHONS:=$(shell pyversions -vr) +PYTHON3S:=$(shell py3versions -vr) + +%: + dh $@ --with python2,python3 + +override_dh_auto_build: + set -e && for pyvers in $(PYTHONS); do \ + python$$pyvers setup.py build; \ + done + set -e && for pyvers in $(PYTHON3S); do \ + python$$pyvers setup.py build; \ + done + +override_dh_clean: + dh_clean + find . -iname '*.pyc' -delete + rm -rf .testrepository .testr.conf build + cp debian/SOURCES.txt.backup testrepository.egg-info/SOURCES.txt + +override_dh_auto_test: + echo "Nothing to do" + +override_dh_install: + set -e && for pyvers in $(PYTHONS); do \ + python$$pyvers setup.py install --install-layout=deb --root=debian/tmp; \ + done + set -e && for pyvers in $(PYTHON3S); do \ + python$$pyvers setup.py install --install-layout=deb --root=debian/tmp; \ + done + dh_install diff -Nru testrepository-0.0.5/debian/source/format testrepository-0.0.18/debian/source/format --- testrepository-0.0.5/debian/source/format 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/debian/source/format 2013-09-07 05:43:40.000000000 +0000 @@ -0,0 +1 @@ +3.0 (quilt) diff -Nru testrepository-0.0.5/debian/SOURCES.txt.backup testrepository-0.0.18/debian/SOURCES.txt.backup --- testrepository-0.0.5/debian/SOURCES.txt.backup 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/debian/SOURCES.txt.backup 2013-09-07 05:43:40.000000000 +0000 @@ -0,0 +1,89 @@ +.bzrignore +.testr.conf +Apache-2.0 +BSD +COPYING +INSTALL.txt +MANIFEST.in +Makefile +NEWS +README.txt +setup.py +testr +doc/DESIGN.txt +doc/DEVELOPERS.txt +doc/MANUAL.txt +doc/index.txt +testrepository/__init__.py +testrepository/results.py +testrepository/setuptools_command.py +testrepository/testcommand.py +testrepository/testlist.py +testrepository/utils.py +testrepository.egg-info/PKG-INFO +testrepository.egg-info/SOURCES.txt +testrepository.egg-info/dependency_links.txt +testrepository.egg-info/entry_points.txt +testrepository.egg-info/requires.txt +testrepository.egg-info/top_level.txt +testrepository/arguments/__init__.py +testrepository/arguments/command.py +testrepository/arguments/doubledash.py +testrepository/arguments/path.py +testrepository/arguments/string.py +testrepository/commands/__init__.py +testrepository/commands/commands.py +testrepository/commands/failing.py +testrepository/commands/help.py +testrepository/commands/init.py +testrepository/commands/last.py +testrepository/commands/list_tests.py +testrepository/commands/load.py +testrepository/commands/quickstart.py +testrepository/commands/run.py +testrepository/commands/slowest.py +testrepository/commands/stats.py +testrepository/repository/__init__.py +testrepository/repository/file.py +testrepository/repository/memory.py +testrepository/repository/samba_buildfarm.py +testrepository/tests/__init__.py +testrepository/tests/monkeypatch.py +testrepository/tests/stubpackage.py +testrepository/tests/test_arguments.py +testrepository/tests/test_commands.py +testrepository/tests/test_matchers.py +testrepository/tests/test_monkeypatch.py +testrepository/tests/test_repository.py +testrepository/tests/test_results.py +testrepository/tests/test_setup.py +testrepository/tests/test_stubpackage.py +testrepository/tests/test_testcommand.py +testrepository/tests/test_testr.py +testrepository/tests/test_ui.py +testrepository/tests/arguments/__init__.py +testrepository/tests/arguments/test_command.py +testrepository/tests/arguments/test_doubledash.py +testrepository/tests/arguments/test_path.py +testrepository/tests/arguments/test_string.py +testrepository/tests/commands/__init__.py +testrepository/tests/commands/test_commands.py +testrepository/tests/commands/test_failing.py +testrepository/tests/commands/test_help.py +testrepository/tests/commands/test_init.py +testrepository/tests/commands/test_last.py +testrepository/tests/commands/test_list_tests.py +testrepository/tests/commands/test_load.py +testrepository/tests/commands/test_quickstart.py +testrepository/tests/commands/test_run.py +testrepository/tests/commands/test_slowest.py +testrepository/tests/commands/test_stats.py +testrepository/tests/repository/__init__.py +testrepository/tests/repository/test_file.py +testrepository/tests/ui/__init__.py +testrepository/tests/ui/test_cli.py +testrepository/tests/ui/test_decorator.py +testrepository/ui/__init__.py +testrepository/ui/cli.py +testrepository/ui/decorator.py +testrepository/ui/model.py \ No newline at end of file diff -Nru testrepository-0.0.5/debian/testrepository.install testrepository-0.0.18/debian/testrepository.install --- testrepository-0.0.5/debian/testrepository.install 2014-07-19 23:30:40.000000000 +0000 +++ testrepository-0.0.18/debian/testrepository.install 2013-09-07 05:43:40.000000000 +0000 @@ -1 +1 @@ -usr/bin/testr usr/bin/ +testr usr/bin/ diff -Nru testrepository-0.0.5/debian/undirty.diff testrepository-0.0.18/debian/undirty.diff --- testrepository-0.0.5/debian/undirty.diff 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/debian/undirty.diff 2013-09-07 05:43:40.000000000 +0000 @@ -0,0 +1,14 @@ +Description: Removes modification in egginfo folder after build + This patch helps to build twice. +Author: Thomas Goirand + +--- testrepository-0.0.9.orig/testrepository.egg-info/SOURCES.txt ++++ testrepository-0.0.9/testrepository.egg-info/SOURCES.txt +@@ -7,6 +7,7 @@ MANIFEST.in + Makefile + NEWS + README.txt ++setup.cfg + setup.py + testr + doc/DESIGN.txt diff -Nru testrepository-0.0.5/debian/watch testrepository-0.0.18/debian/watch --- testrepository-0.0.5/debian/watch 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/debian/watch 2013-09-07 05:43:40.000000000 +0000 @@ -0,0 +1,2 @@ +version=3 +https://pypi.python.org/packages/source/t/testrepository/testrepository-(.*).tar.gz diff -Nru testrepository-0.0.5/doc/DESIGN.txt testrepository-0.0.18/doc/DESIGN.txt --- testrepository-0.0.5/doc/DESIGN.txt 2010-09-11 20:48:03.000000000 +0000 +++ testrepository-0.0.18/doc/DESIGN.txt 2012-12-28 19:53:00.000000000 +0000 @@ -47,5 +47,6 @@ Threads/concurrency ~~~~~~~~~~~~~~~~~~~ -In general using any public interface +In general using any public interface is fine, but keeping syncronisation +needs to a minimum for code readability. diff -Nru testrepository-0.0.5/doc/DEVELOPERS.txt testrepository-0.0.18/doc/DEVELOPERS.txt --- testrepository-0.0.5/doc/DEVELOPERS.txt 2010-01-10 11:13:50.000000000 +0000 +++ testrepository-0.0.18/doc/DEVELOPERS.txt 2013-04-08 10:36:03.000000000 +0000 @@ -40,3 +40,15 @@ Generally just ``make`` is all that is needed to run all the tests. However if dropping into pdb, it is currently more convenient to use ``python -m testtools.run testrepository.tests.test_suite``. + +Diagnosing issues +----------------- + +The cli UI will drop into pdb when an error is thrown if TESTR_PDB is set in +the environment. This can be very useful for diagnosing problems. + +Releasing +--------- + +Update NEWS and testrepository/__init__.py version numbers. Release to pypi. +Pivot the next milestone on LP to version, and make a new next milestone. diff -Nru testrepository-0.0.5/doc/index.txt testrepository-0.0.18/doc/index.txt --- testrepository-0.0.5/doc/index.txt 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/doc/index.txt 2012-12-18 22:28:01.000000000 +0000 @@ -0,0 +1,25 @@ +.. Test Repository documentation master file, created by + sphinx-quickstart on Mon Dec 3 23:24:00 2012. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Test Repository's documentation! +=========================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + MANUAL + DESIGN + DEVELOPERS + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` + diff -Nru testrepository-0.0.5/doc/MANUAL.txt testrepository-0.0.18/doc/MANUAL.txt --- testrepository-0.0.5/doc/MANUAL.txt 2010-12-07 03:16:22.000000000 +0000 +++ testrepository-0.0.18/doc/MANUAL.txt 2013-11-03 19:58:46.000000000 +0000 @@ -8,9 +8,12 @@ that can be represented as a subunit stream can be inserted into a repository. Typical workflow is to have a repository into which test runs are inserted, and -then to query the repository to find out about issues that need addressing. For -instance, using the sample subunit stream included with Test repository:: +then to query the repository to find out about issues that need addressing. +testr can fully automate this, but lets start with the low level facilities, +using the sample subunit stream included with testr:: + # Note that there is a .testr.conf already: + ls .testr.conf # Create a store to manage test results in. $ testr init # add a test result (shows failures) @@ -23,31 +26,57 @@ $ testr failing Most commands in testr have comprehensive online help, and the commands:: - $ testr help + + $ testr help [command] $ testr commands Will be useful to explore the system. +Configuration +~~~~~~~~~~~~~ + +testr is configured via the '.testr.conf' file which needs to be in the same +directory that testr is run from. testr includes online help for all the +options that can be set within it:: + + $ testr help run + +Python +------ + +If your test suite is written in Python, the simplest - and usually correct +configuration is:: + + [DEFAULT] + test_command=python -m subunit.run discover . $LISTOPT $IDOPTION + test_id_option=--load-list $IDFILE + test_list_option=--list + Running tests ~~~~~~~~~~~~~ -Test Repository can be taught how to run your tests by setting up a .testr.conf -file in your cwd. A file like:: +testr is taught how to run your tests by interepreting your .testr.conf file. +For instance:: [DEFAULT] test_command=foo $IDOPTION test_id_option=--bar $IDFILE will cause 'testr run' to run 'foo' and process it as 'testr load' would. -Likewise 'testr run --failing' will run 'foo --bar failing.list' and process it -as 'testr load' would. failing.list will be a newline separated list of the -test ids that your test runner outputs. Arguments passed to run are passed -through to your test runner command line. To pass options through to your test -running, use a ``--`` before your options. For instance, -``testr run quux -- bar --no-plugins`` would run -``foo quux bar --no-plugins`` using the above config example. Shell variables -are expanded in these commands on platforms that have a shell. The command -help for ``testr run`` describes the available options for .testr.conf. +Likewise 'testr run --failing' will automatically create a list file listing +just the failing tests, and then run 'foo --bar failing.list' and process it as +'testr load' would. failing.list will be a newline separated list of the +test ids that your test runner outputs. If there are no failing tests, no test +execution will happen at all. + +Arguments passed to 'testr run' are used to filter test ids that will be run - +testr will query the runner for test ids and then apply each argument as a +regex filter. Tests that match any of the given filters will be run. Arguments +passed to run after a ``--`` are passed through to your test runner command +line. For instance, using the above config example ``testr run quux -- bar +--no-plugins`` would query for test ids, filter for those that match 'quux' and +then run ``foo bar --load-list tempfile.list --no-plugins``. Shell variables +are expanded in these commands on platforms that have a shell. Having setup a .testr.conf, a common workflow then becomes:: @@ -62,6 +91,35 @@ partial test run were to be interrupted, the failing tests that aren't run are not lost). +Another common use case is repeating a failure that occured on a remote +machine (e.g. during a jenkins test run). There are two common ways to do +approach this. + +Firstly, if you have a subunit stream from the run you can just load it:: + + $ testr load < failing-stream + # Run the failed tests + $ testr run --failing + +The streams generated by test runs are in .testrepository/ named for their test +id - e.g. .testrepository/0 is the first stream. + +If you do not have a stream (because the test runner didn't output subunit or +you don't have access to the .testrepository) you may be able to use a list +file. If you can get a file that contains one test id per line, you can run +the named tests like this: + + $ testr run --load-list FILENAME + +This can also be useful when dealing with sporadically failing tests, or tests +that only fail in combination with some other test - you can bisect the tests +that were run to get smaller and smaller (or larger and larger) test subsets +until the error is pinpointed. + +``testr run --until-failure`` will run your test suite again and again and +again stopping only when interrupted or a failure occurs. This is useful +for repeating timing-related test failures. + Listing tests ~~~~~~~~~~~~~ @@ -79,11 +137,19 @@ will be passed through, if a test list is being supplied test_option can be used via $IDOPTION. -The output of the test command when this option is supplied should be a series -of test ids, in any order, `\n' separated on stdout. +The output of the test command when this option is supplied should be a subunit +test enumeration. For subunit v1 that is a series of test ids, in any order, +``\n`` separated on stdout. For v2 use the subunit protocol and emit one event +per test with each test having status 'exists'. To test whether this is working the `testr list-tests` command can be useful. +You can also use this to see what tests will be run by a given testr run +command. For instance, the tests that ``testr run myfilter`` will run are shown +by ``testr list-tests myfilter``. As with 'run', arguments to 'list-tests' are +used to regex filter the tests of the test runner, and arguments after a '--' +are passed to the test runner. + Parallel testing ~~~~~~~~~~~~~~~~ @@ -95,7 +161,225 @@ This will first list the tests, partition the tests into one partition per CPU on the machine, and then invoke multiple test runners at the same time, with each test runner getting one partition. Currently the partitioning algorithm -is a simple round-robin, and the CPU detection is only implemented for Linux. +is simple round-robin for tests that testr has not seen run before, and +equal-time buckets for tests that testr has seen run. NB: This uses the anydbm +Python module to store the duration of each test. On some platforms (to date +only OSX) there is no bulk-update API and performance may be impacted if you +have many (10's of thousands) of tests. + +To determine how many CPUs are present in the machine, testrepository will +use the multiprocessing Python module (present since 2.6). On operating systems +where this is not implemented, or if you need to control the number of workers +that are used, the --concurrency option will let you do so:: + + $ testr run --parallel --concurrency=2 + +A more granular interface is available too - if you insert into .testr.conf:: + + test_run_concurrency=foo bar + +Then when testr needs to determine concurrency, it will run that command and +read the first line from stdout, cast that to an int, and use that as the +number of partitions to create. A count of 0 is interpreted to mean one +partition per test. For instance in .test.conf:: + + test_run_concurrency=echo 2 + +Would tell testr to use concurrency of 2. + +When running tests in parallel, testrepository tags each test with a tag for +the worker that executed the test. The tags are of the form ``worker-%d`` +and are usually used to reproduce test isolation failures, where knowing +exactly what test ran on a given backend is important. The %d that is +substituted in is the partition number of tests from the test run - all tests +in a single run with the same worker-N ran in the same test runner instance. + +To find out which slave a failing test ran on just look at the 'tags' line in +its test error:: + + ====================================================================== + label: testrepository.tests.ui.TestDemo.test_methodname + tags: foo worker-0 + ---------------------------------------------------------------------- + error text + +And then find tests with that tag:: + + $ testr last --subunit | subunit-filter -s --xfail --with-tag=worker-3 | subunit-ls > slave-3.list + +Grouping Tests +~~~~~~~~~~~~~~ + +In certain scenarios you may want to group tests of a certain type together +so that they will be run by the same backend. The group_regex option in +.testr.conf permits this. When set, tests are grouped by the group(0) of any +regex match. Tests with no match are not grouped. + +For example, extending the python sample .testr.conf from the configuration +section with a group regex that will group python tests cases together by +class (the last . splits the class and test method):: + + [DEFAULT] + test_command=python -m subunit.run discover . $LISTOPT $IDOPTION + test_id_option=--load-list $IDFILE + test_list_option=--list + group_regex=([^\.]+\.)+ + + +Remote or isolated test environments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A common problem with parallel test running is test runners that use global +resources such as well known ports, well known database names or predictable +directories on disk. + +One way to solve this is to setup isolated environments such as chroots, +containers or even separate machines. Such environments typically require +some coordination when being used to run tests, so testr provides an explicit +model for working with them. + +The model testr has is intended to support both developers working +incrementally on a change and CI systems running tests in a one-off setup, +for both statically and dynamically provisioned environments. + +The process testr follows is: + +1. The user should perform any one-time or once-per-session setup. For instance, + checking out source code, creating a template container, sourcing your cloud + credentials. +2. Execute testr run. +3. testr queries for concurrency. +4. testr will make a callout request to provision that many instances. + The provisioning callout needs to synchronise source code and do any other + per-instance setup at this stage. +5. testr will make callouts to execute tests, supplying files that should be + copied into the execution environment. Note that instances may be used for + more than one command execution. +6. testr will callout to dispose of the instances after the test run completes. + +Instances may be expensive to create and dispose of. testr does not perform +any caching, but the callout pattern is intended to facilitate external +caching - the provisioning callout can be used to pull environments out of +a cache, and the dispose to just return it to the cache. + +Configuring environment support +------------------------------- + +There are three callouts that testrepository depends on - configured in +.testr.conf as usual. For instance:: + + instance_provision=foo -c $INSTANCE_COUNT + instance_dispose=bar $INSTANCE_IDS + instance_execute=quux $INSTANCE_ID $FILES -- $COMMAND + +These should operate as follows: + +* instance_provision should start up the number of instances provided in the + $INSTANCE_COUNT parameter. It should print out on stdout the instance ids + that testr should supply to the dispose and execute commands. There should + be no other output on stdout (stderr is entirely up for grabs). An exit code + of non-zero will cause testr to consider the command to have failed. A + provisioned instance should be able to execute the list tests command and + execute tests commands that testr will run via the instance_execute callout. + Its possible to lazy-provision things if you desire - testr doesn't care - + but to reduce latency we suggest performing any rsync or other code + synchronisation steps during the provision step, as testr may make multiple + calls to one environment, and re-doing costly operations on each command + execution would impair performance. + +* instance_dispose should take a list of instance ids and get rid of them + this might mean putting them back in a pool of instances, or powering them + off, or terminating them - whatever makes sense for your project. + +* instance_execute should accept an instance id, a list of files that need to + be copied into the instance and a command to run within the instance. It + needs to copy those files into the instance (it may adjust their paths if + desired). If the paths are adjusted, the same paths within $COMMAND should be + adjusted to match. Execution that takes place with a shared filesystem can + obviously skip file copying or adjusting (and the $FILES parameter). When the + instance_execute terminates, it should use the exit code that the command + used within the instance. Stdout and stderr from instance_execute are + presumed to be that of $COMMAND. In particular, stdout is where the subunit + test output, and subunit test listing output, are expected, and putting other + output into stdout can lead to surprising results - such as corrupting the + subunit stream. + instance_execute is invoked for both test listing and test executing + callouts. + +Hiding tests +~~~~~~~~~~~~ + +Some test runners (for instance, zope.testrunner) report pseudo tests having to +do with bringing up the test environment rather than being actual tests that +can be executed. These are only relevant to a test run when they fail - the +rest of the time they tend to be confusing. For instance, the same 'test' may +show up on multiple parallel test runs, which will inflate the 'executed tests' +count depending on the number of worker threads that were used. Scheduling such +'tests' to run is also a bit pointless, as they are only ever executed +implicitly when preparing (or finishing with) a test environment to run other +tests in. + +testr can ignore such tests if they are tagged, using the filter_tags +configuration option. Tests tagged with any tag in that (space separated) list +will only be included in counts and reports if the test failed (or errored). + +Automated test isolation bisection +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As mentioned above, its possible to manually analyze test isolation issues by +interrogating the repository for which tests ran on which worker, and then +creating a list file with those tests, re-running only half of them, checking +the error still happens, rinse and repeat. + +However that is tedious. testr can perform this analysis for you:: + + $ testr run --analyze-isolation + +will perform that analysis for you. (This requires that your test runner is +(mostly) deterministic on test ordering). The process is: + +1. The last run in the repository is used as a basis for analysing against - + tests are only cross checked against tests run in the same worker in that + run. This means that failures accrued from several different runs would not + be processed with the right basis tests - you should do a full test run to + seed your repository. This can be local, or just testr load a full run from + your Jenkins or other remote run environment. + +2. Each test that is currently listed as a failure is run in a test process + given just that id to run. + +3. Tests that fail are excluded from analysis - they are broken on their own. + +4. The remaining failures are then individually analysed one by one. + +5. For each failing, it gets run in one work along with the first 1/2 of the + tests that were previously run prior to it. + +6. If the test now passes, that set of prior tests are discarded, and the + other half of the tests is promoted to be the full list. If the test fails + then other other half of the tests are discarded and the current set + promoted. + +7. Go back to running the failing test along with 1/2 of the current list of + priors unless the list only has 1 test in it. If the failing test still + failed with that test, we have found the isolation issue. If it did not + then either the isolation issue is racy, or it is a 3-or-more test + isolation issue. Neither of those cases are automated today. + +Forcing isolation +~~~~~~~~~~~~~~~~~ + +Sometimes it is useful to force a separate test runner instance for each test +executed. The ``--isolated`` flag will cause testr to execute a separate runner +per test:: + + $ testr run --isolated + +In this mode testr first determines tests to run (either automatically listed, +using the failing set, or a user supplied load-list), and then spawns one test +runner per test it runs. To avoid cross-test-runner interactions concurrency +is disabled in this mode. ``--analyze-isolation`` supercedes ``--isolated`` if +they are both supplied. Repositories ~~~~~~~~~~~~ @@ -118,3 +402,21 @@ * repo.conf: This file contains user configuration settings for the repository. ``testr repo-config`` will dump a repo configration and ``test help repo-config`` has online help for all the repository settings. + +setuptools integration +~~~~~~~~~~~~~~~~~~~~~~ + +testrepository provides a setuptools commands for ease of integration with +setuptools-based workflows: + +* testr: + ``python setup.py testr`` will run testr in parallel mode + Options that would normally be passed to testr run can be added to the + testr-options argument. + ``python setup.py testr --testr-options="--failing"`` will append --failing + to the test run. +* testr --coverage: + ``python setup.py testr --coverage`` will run testr in code coverage mode. This + assumes the installation of the python coverage module. +* ``python testr --coverage --omit=ModuleThatSucks.py`` will append + --omit=ModuleThatSucks.py to the coverage report command. diff -Nru testrepository-0.0.5/MANIFEST.in testrepository-0.0.18/MANIFEST.in --- testrepository-0.0.5/MANIFEST.in 2010-02-28 10:59:30.000000000 +0000 +++ testrepository-0.0.18/MANIFEST.in 2013-01-08 20:17:25.000000000 +0000 @@ -1,4 +1,5 @@ include .bzrignore +include .testr.conf include Apache-2.0 include BSD include COPYING diff -Nru testrepository-0.0.5/NEWS testrepository-0.0.18/NEWS --- testrepository-0.0.5/NEWS 2011-02-26 07:12:07.000000000 +0000 +++ testrepository-0.0.18/NEWS 2013-11-04 23:52:12.000000000 +0000 @@ -5,6 +5,338 @@ NEXT (In development) +++++++++++++++++++++ +0.0.18 +++++++ + +CHANGES +------- + +* ``run`` now accepts ``--isolated`` as a parameter, which will cause each + selected test to be run independently. This can be useful to both workaround + isolation bugs and detect tests that can not be run independently. + (Robert Collins) + +INTERNALS +--------- + +* ``capture_ids`` in test_run now returns a list of captures, permitting tests + that need to test multiple runs to do so. (Robert Collins) + +0.0.17 +++++++ + +CHANGES +------- + +* Restore the ability to import testrepository.repository.memory on Python 2.6. + (Robert Collins) + +0.0.16 +++++++ + +CHANGES +------- + +* A new testr.conf option ``group_regex`` can be used for grouping + tests so that they get run in the same backend runner. (Matthew Treinish) + +* Fix Python 3.* support for entrypoints; the initial code was Python3 + incompatible. (Robert Collins, Clark Boylan, #1187192) + +* Switch to using multiprocessing to determine CPU counts. + (Chris Jones, #1092276) + +* The cli UI now has primitive differentiation between multiple stream types. + This is not yet exposed to the end user, but is sufficient to enable the + load command to take interactive input without it reading from the raw + subunit stream on stdin. (Robert Collins) + +* The scheduler can now groups tests together permitting co-dependent tests to + always be scheduled onto the same backend. Note that this does not force + co-dependent tests to be executed, so partial test runs (e.g. --failing) + may still fail. (Matthew Treinish, Robert Collins) + +* When test listing fails, testr will now report an error rather than + incorrectly trying to run zero tests. A test listing failure is detected by + the returncode of the test listing process. (Robert Collins, #1185231) + +0.0.15 +++++++ + +CHANGES +------- + +* Expects subunit v2 if the local library has v2 support in the subunit + library. This should be seamless if the system under test shares the + Python libraries. If it doesn't, either arrange to use ``subunit-2to1`` + or upgrade the subunit libraries for the system under test. + (Robert Collins) + +* ``--full-results`` is now a no-op, use ``--subunit`` to get unfiltered + output. (Robert Collins) + +0.0.14 +++++++ + +IMPROVEMENTS +------------ + +* First cut at full Python 3 support. The 'works for me' release. + (Robert Collins) + +0.0.13 +++++++ + +IMPROVEMENTS +------------ + +* ``setup.py testr`` was not indicating test failures via it's return code. + (Monty Taylor) + +0.0.12 +++++++ + +IMPROVEMENTS +------------ + +* There is now a setuptools extension provided by ``testrespository`` making it + easy to invoke testr from setup.py driven workflows. + (Monty Taylor, Robert Collins) + +INTERNALS +--------- + +* BSD license file incorrectly claimed copyright by subunit contributors. + (Monty Taylor) + +* .testr.conf is now shipped in the source distribution to aid folk wanting to + validate that testrepository works correctly on their machine. + (Robert Collins) + +0.0.11 +++++++ + +IMPROVEMENTS +------------ + +* Fix another incompatability with Mac OS X - gdbm dbm modules don't support + get. (Robert Collins, #1094330) + +0.0.10 +++++++ + +IMPROVEMENTS +------------ + +* It's now possible to configure ``test_run_concurrency`` in ``.testr.conf`` + to have concurrency defined by a callout. (Robert Collins) + +* Testr supports running tests in arbitrary environments. See ``Remote or + isolated test environments`` in MANUAL.txt / ``testr help run`` + (Robert Collins) + +INTERNALS +--------- + +* TestCommand is now a fixture. This is used to ensure cached test instances + are disposed of - if using the object to run or list tests, you will need + to adjust your calls. (Robert Collins) + +* ``TestCommand`` now offers, and ``TestListingFixture`` consumes a small + protocol for obtaining and releasing test execution instances. + (Robert Collins) + +0.0.9 ++++++ + +IMPROVEMENTS +------------ + +* On OSX the ``anydbm`` module by default returns an implementation that + doesn't support update(). Workaround that by falling back to a loop. + (Robert Collins, #1091500) + +* ``testr --analyze-improvements`` now honours test regex filters and only + analyzes matching tests. (Robert Collins) + +0.0.8 ++++++ + +CHANGES +------- + +* As a side effect of fixing bug #597060 additional arguments passed to testr + run or testr list are only passed to the underlying test runner if they are + preceeded by '--'. (Robert Collins, #597060) + +* ``testr run --failing`` will no longer run any tests at all if there are + no failing tests. (Robert Collins, #904400) + +IMPROVEMENTS +------------ + +* ``AbstractArgument`` now handles the case where additional arguments are + present that the argument type cannot parse, but enough have been parsed for + it to be valid. This allows optional arguments to be in the middle of a + grammar. (Robert Collins) + +* ``cli.UI`` now passed '--' down to the argument layer for handling rather + than implicitly stripping it. (Robert Collins) + +* ``DoubledashArgument`` added to allow fine grained control over the impact of + -- in command lines. (Robert Collins) + +* New argument type ``ExistingPathArgument`` for use when commands want to take + the name of a file. (Robert Collins) + +* ``testr`` will now show the version. (Robert Collins) + +* ``testr last`` when just one test run has been run works again. + (Robert Collins) + +* ``testr help command`` now shows the docstring for commands (Robert Collins) + +* ``testr --help command`` or ``testr command --help`` now shows the options + for the command. (Robert Collins) + +* ``testr run --analyze-isolation`` will search the current failing tests for + spurious failures caused by interactions with other tests. + (Robert Collins, #684069) + +* ``testr run --until-failure`` will repeat a test run until interrupted by + ctrl-C or until a failure occurs. (Robert Collins, #680995) + +* ``Repository.get_test_run`` now raises KeyError if asked for a missing or + nonexistant test run. (Robert Collins) + +* Sphinx has been added to tie the documentation toghether (And it is available + on testrepository.readthedocs.org). (Robert Collins) + +* ``StringArgument`` now rejects '--' - it should be handled by the use of a + ``DoubledashArgument`` where one is expected. This is a bit awkward and does + not permit passing '--' down to a child process, so further work may be + needed - file a bug if this affects you. (Robert Collins) + +* ``test failing --subunit`` now exits 0 unless there was a problem generating + the stream. This is consistent with the general processing model of subunit + generators. (Robert Collins) + +* ``testr last`` now supports ``--subunit`` and when passed will output + the stored subunit stream. Note that the exit code is always 0 when this + is done (unless an exception occurs reading the stream) - subunit consumers + should parse the subunit to determine success/failure. (Robert Collins) + +* ``testr load`` now supports passing filenames to subunit streams to load. + (Robert Collins, #620386) + +* ``testr run`` will now fail a test run if the test process exits non-zero. + As a side effect of this change, if the test program closes its stdout but + does not exit, ``testr run`` will hang (waiting for the test program to + exit). (Robert Collins) + +* ``testr run --load-list FILENAME`` will limit the tests run to the test ids + supplied in the list file FILENAME. This is useful for manually specifying + the tests to run, or running testr subordinate to testr (e.g. on remote + machines). (Robert Collins, partial fix for #597060) + +* ``testr run foo`` now applies foo as a regex filter against the tests + found by doing a listing of the test runners tests. Likewise + ``testr list-tests foo`` will apply foo as a filter against the found tests. + This makes it easy to limit the tests that will be requested for running by + the backend test process - simply pass one or more regex filters into testr + run. (Robert Collins, #597060) + +* Test tags are now shown in failures. Of particular interest for folk debgging + cross-test interactions will be the worker-N tags which indicate which + backend test process executed a given test. (Robert Collins) + +0.0.7 ++++++ + +CHANGES +------- + +* testrepository is now distributed via distribute rather than distutils, + allowing installation via pip into virtualenv environments. (Robert Collins) + +IMPROVEMENTS +------------ + +* stream loading will now synthesise datestamps before demultiplexing rather + than on insertion into the repository. This fixes erroneously short times + being recorded on non timestamped streams. Additionally, moving the automatic + addition of timestamp material in front of the demuxer has removed the skew + that caused test times to be reported as longer than the stream could + indicate (by the amount of time the test runner took to start outputting + subunit). This time may be something we want to track later, but the prior + mechanism was inconsistent between the current run and reporting on prior + runs, which lead to a very confusing UI. Now it is consistent, but totally + ignores that overhead. + (Robert Collins, #1048126, #980950) + +* ``testr run`` now accepts a --concurrency option, allowing command line + override of the number of workers spawned. This allows conccurency on + operating systems where autodetection is not yet implemented, or just + debugging problems with concurrent test suites. (Robert Collins, #957145) + +* ''test_id_list_default'' would prevent ''test_list_option'' being used in + previous releases. For Python environments where the context to load tests + from is always needed this was not an issue (and thus not uncovered). However + given a test runner which wants either a global context or a list of specific + tests with no global context, there was no way to achieve that with this bug. + (Robert Collins, #1027042) + + +0.0.6 ++++++ + +CHANGES +------- + +* Now relies on subunit 0.0.8 or better and testtools 0.9.15 or better. + +IMPROVEMENTS +------------ + +* Much better handling of unicode input from subunit streams. Specifically, + we won't crash when we can't figure something out. (Francesco Banconi, + Martin Packman, #955006) + +* Parallel tests now record their worker thread number as tags in tests. + This makes identifying test ordering problems much easier. + (Benji York, #974622) + +* Python2.7 changed the interface for DBM, this has been worked around. + (Robert Collins, #775214, #961103) + +* Subunit 0.0.7 Changes its TestResultFilter implementation, requiring the + subclass in testrepository.filter to be come more robust. + (Robert Collins) + +* A horrible thinko in the testrepository test suite came to light and has been + fixed. How the tests ever ran is a mystery. (Robert Collins, #881497) + +* ''failing'', ''run'' and ''load'' now both take a ''--subunit'' option, which + displays output in raw subunit format. If ''--full-results'' is passed too, + then all subunit information is displayed. (Brad Crittenden, #949950) + +* Setting ''filter_tags'' in ''.testr.conf'' will cause tests tagged with those + tags to be hidden unless the fail/error. This requires Subunit 0.0.8. If + an older version of subunit is configured, testr will return an error. + (Robert Collins, #914166) + +* ``testr`` will drop into PDB from its command line UI if the environment + variable TESTR_PDB is set. (Robert Collins) + +* Test partitioning now handles a corner case where multiple tests have a + reported duration of 0. Previously they could all accumulate into one + partition, now they split across partitions (the length of a partition is + used as a tie breaker if two partitions have the same duration). + (Robert Collins, #914359) + +* The test 'test_outputs_results_to_stdout' was sensitive to changes in + testtools and has been made more generic. (Robert Collins) + 0.0.5 +++++ diff -Nru testrepository-0.0.5/PKG-INFO testrepository-0.0.18/PKG-INFO --- testrepository-0.0.5/PKG-INFO 2011-02-26 07:18:09.000000000 +0000 +++ testrepository-0.0.18/PKG-INFO 2013-11-05 14:40:31.000000000 +0000 @@ -1,6 +1,6 @@ -Metadata-Version: 1.0 +Metadata-Version: 1.1 Name: testrepository -Version: 0.0.5 +Version: 0.0.18 Summary: A repository of test results. Home-page: https://launchpad.net/testrepository Author: Robert Collins @@ -37,6 +37,9 @@ Quick Start ~~~~~~~~~~~ + Create a config file:: + $ touch .testr.conf + Create a repository:: $ testr init @@ -49,13 +52,24 @@ $ testr failing Delete a repository:: - $ testr delete + $ rm -rf .testrepository Documentation ~~~~~~~~~~~~~ More detailed documentation including design and implementation details, a user manual, and guidelines for development of Test Repository itself can be - found in the doc/ directory. + found at https://testrepository.readthedocs.org/en/latest, or in the source + tree at doc/ (run make -C doc html). +Keywords: subunit unittest testrunner Platform: UNKNOWN +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing diff -Nru testrepository-0.0.5/README.txt testrepository-0.0.18/README.txt --- testrepository-0.0.5/README.txt 2010-01-10 11:32:56.000000000 +0000 +++ testrepository-0.0.18/README.txt 2013-01-12 10:27:36.000000000 +0000 @@ -29,6 +29,9 @@ Quick Start ~~~~~~~~~~~ +Create a config file:: + $ touch .testr.conf + Create a repository:: $ testr init @@ -41,11 +44,12 @@ $ testr failing Delete a repository:: - $ testr delete + $ rm -rf .testrepository Documentation ~~~~~~~~~~~~~ More detailed documentation including design and implementation details, a user manual, and guidelines for development of Test Repository itself can be -found in the doc/ directory. +found at https://testrepository.readthedocs.org/en/latest, or in the source +tree at doc/ (run make -C doc html). diff -Nru testrepository-0.0.5/setup.cfg testrepository-0.0.18/setup.cfg --- testrepository-0.0.5/setup.cfg 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/setup.cfg 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1,5 @@ +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff -Nru testrepository-0.0.5/setup.py testrepository-0.0.18/setup.py --- testrepository-0.0.5/setup.py 2010-11-24 18:01:54.000000000 +0000 +++ testrepository-0.0.18/setup.py 2013-04-08 10:37:19.000000000 +0000 @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# Copyright (c) 2009, 2010 Testrepository Contributors +# Copyright (c) 2009-2013 Testrepository Contributors # # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the @@ -13,7 +13,7 @@ # license you chose for the specific language governing permissions and # limitations under that license. -from distutils.core import setup +from setuptools import setup import email import os @@ -41,7 +41,7 @@ def get_version(): - """Return the version of testtools that we are building.""" + """Return the version of testrepository that we are building.""" version = '.'.join( str(component) for component in testrepository.__version__[0:3]) phase = testrepository.__version__[3] @@ -59,7 +59,7 @@ return version + '-r%s' % revno -description = file(os.path.join(os.path.dirname(__file__), 'README.txt'), 'rb').read() +description = open(os.path.join(os.path.dirname(__file__), 'README.txt'), 'rt').read() setup(name='testrepository', @@ -68,6 +68,18 @@ url='https://launchpad.net/testrepository', description='A repository of test results.', long_description=description, + keywords="subunit unittest testrunner", + classifiers = [ + 'Development Status :: 6 - Mature', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Topic :: Software Development :: Quality Assurance', + 'Topic :: Software Development :: Testing', + ], scripts=['testr'], version=get_version(), packages=['testrepository', @@ -80,4 +92,23 @@ 'testrepository.tests.repository', 'testrepository.tests.ui', 'testrepository.ui', - ]) + ], + install_requires=[ + 'fixtures', + 'python-subunit >= 0.0.10', + 'testtools >= 0.9.30', + ], + extras_require = dict( + test=[ + 'bzr', + 'pytz', + 'testresources', + 'testscenarios', + ] + ), + entry_points={ + 'distutils.commands': [ + 'testr = testrepository.setuptools_command:Testr', + ], + }, + ) diff -Nru testrepository-0.0.5/.testr.conf testrepository-0.0.18/.testr.conf --- testrepository-0.0.5/.testr.conf 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/.testr.conf 2013-05-29 04:01:02.000000000 +0000 @@ -0,0 +1,5 @@ +[DEFAULT] +test_command=${PYTHON:-python} -m subunit.run $LISTOPT $IDOPTION testrepository.tests.test_suite +test_id_option=--load-list $IDFILE +test_list_option=--list +;filter_tags=worker-0 diff -Nru testrepository-0.0.5/testrepository/arguments/doubledash.py testrepository-0.0.18/testrepository/arguments/doubledash.py --- testrepository-0.0.5/testrepository/arguments/doubledash.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/arguments/doubledash.py 2012-12-18 22:28:01.000000000 +0000 @@ -0,0 +1,29 @@ +# +# Copyright (c) 2012 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""An Argument that checks for '--'.""" + +from testrepository.arguments import AbstractArgument + + +class DoubledashArgument(AbstractArgument): + """An argument that captures '--'.""" + + def __init__(self): + super(DoubledashArgument, self).__init__('doubledash', min=0) + + def _parse_one(self, arg): + if arg != '--': + raise ValueError('not a doubledash %r' % (arg,)) + return arg diff -Nru testrepository-0.0.5/testrepository/arguments/__init__.py testrepository-0.0.18/testrepository/arguments/__init__.py --- testrepository-0.0.5/testrepository/arguments/__init__.py 2010-02-28 10:59:30.000000000 +0000 +++ testrepository-0.0.18/testrepository/arguments/__init__.py 2013-02-06 09:16:47.000000000 +0000 @@ -29,6 +29,10 @@ containing their argument types - no __init__ is needed in that directory.) """ +import sys + +from testtools.compat import reraise + class AbstractArgument(object): """A argument that a command may need. @@ -83,13 +87,22 @@ """ count = 0 result = [] + error = None while len(argv) > count and ( - count < self.maximum_count or self.maximum_count is None): + self.maximum_count is None or count < self.maximum_count): arg = argv[count] count += 1 - result.append(self._parse_one(arg)) + try: + result.append(self._parse_one(arg)) + except ValueError: + # argument rejected this element + error = sys.exc_info() + count -= 1 + break if count < self.minimum_count: - raise ValueError('not enough arguments present in %s' % argv) + if error is not None: + reraise(error[0], error[1], error[2]) + raise ValueError('not enough arguments present/matched in %s' % argv) del argv[:count] return result diff -Nru testrepository-0.0.5/testrepository/arguments/path.py testrepository-0.0.18/testrepository/arguments/path.py --- testrepository-0.0.5/testrepository/arguments/path.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/arguments/path.py 2012-12-18 22:28:01.000000000 +0000 @@ -0,0 +1,30 @@ +# +# Copyright (c) 2012 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""An Argument that gets the name of an existing path.""" + +import os.path + +from testrepository.arguments import AbstractArgument + + +class ExistingPathArgument(AbstractArgument): + """An argument that stores a string verbatim.""" + + def _parse_one(self, arg): + if arg == '--': + raise ValueError('-- is not a valid argument') + if not os.path.exists(arg): + raise ValueError('No such path %r' % (arg,)) + return arg diff -Nru testrepository-0.0.5/testrepository/arguments/string.py testrepository-0.0.18/testrepository/arguments/string.py --- testrepository-0.0.5/testrepository/arguments/string.py 2010-02-28 10:59:30.000000000 +0000 +++ testrepository-0.0.18/testrepository/arguments/string.py 2012-12-18 22:28:01.000000000 +0000 @@ -21,4 +21,6 @@ """An argument that stores a string verbatim.""" def _parse_one(self, arg): + if arg == '--': + raise ValueError('-- is not a valid argument') return arg diff -Nru testrepository-0.0.5/testrepository/commands/failing.py testrepository-0.0.18/testrepository/commands/failing.py --- testrepository-0.0.5/testrepository/commands/failing.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/failing.py 2013-04-10 10:14:01.000000000 +0000 @@ -16,10 +16,11 @@ import optparse -from testtools import MultiTestResult, TestResult +import testtools +from testtools import ExtendedToStreamDecorator, MultiTestResult from testrepository.commands import Command -from testrepository.results import TestResultFilter +from testrepository.testcommand import TestCommand class failing(Command): @@ -30,6 +31,10 @@ full run combined with any failures in subsequent partial runs, minus any passes that have occured in a run more recent than a given failure. Deleted tests will only be detected on full runs with this approach. + + Without --subunit, the process exit code will be non-zero if the test run + was not successful. With --subunit, the process exit code is non-zero if + the subunit stream could not be generated successfully. """ options = [ @@ -40,44 +45,42 @@ "--list", action="store_true", default=False, help="Show only a list of failing tests."), ] + # Can be assigned to to inject a custom command factory. + command_factory = TestCommand - def _list_subunit(self, run): - # TODO only failing tests. + def _show_subunit(self, run): stream = run.get_subunit_stream() self.ui.output_stream(stream) - if stream: - return 1 - else: - return 0 + return 0 - def _make_result(self, repo, list_result): + def _make_result(self, repo): + testcommand = self.command_factory(self.ui, repo) if self.ui.options.list: - return list_result - output_result = self.ui.make_result(repo.latest_id) - filtered = TestResultFilter(output_result, filter_skip=True) - return MultiTestResult(list_result, filtered) + list_result = testtools.StreamSummary() + return list_result, list_result + else: + return self.ui.make_result(repo.latest_id, testcommand) def run(self): repo = self.repository_factory.open(self.ui.here) run = repo.get_failing() if self.ui.options.subunit: - return self._list_subunit(run) + return self._show_subunit(run) case = run.get_test() failed = False - list_result = TestResult() - result = self._make_result(repo, list_result) + result, summary = self._make_result(repo) result.startTestRun() try: case.run(result) finally: result.stopTestRun() - failed = not list_result.wasSuccessful() + failed = not summary.wasSuccessful() if failed: result = 1 else: result = 0 if self.ui.options.list: failing_tests = [ - test for test, _ in list_result.errors + list_result.failures] + test for test, _ in summary.errors + summary.failures] self.ui.output_tests(failing_tests) return result diff -Nru testrepository-0.0.5/testrepository/commands/help.py testrepository-0.0.18/testrepository/commands/help.py --- testrepository-0.0.5/testrepository/commands/help.py 2010-01-10 11:32:56.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/help.py 2012-12-18 22:28:01.000000000 +0000 @@ -14,8 +14,12 @@ """Get help on a command.""" +import testrepository from testrepository.arguments import command -from testrepository.commands import Command +from testrepository.commands import ( + Command, + get_command_parser, + ) class help(Command): """Get help on a command.""" @@ -24,15 +28,17 @@ def run(self): if not self.ui.arguments['command_name']: - help = """testr -- a free test repository + version = '.'.join(map(str, testrepository.__version__)) + help = """testr %s -- a free test repository https://launchpad.net/testrepository/ testr commands -- list commands testr quickstart -- starter documentation testr help [command] -- help system -""" +""" % version else: cmd = self.ui.arguments['command_name'][0] - help = cmd.__doc__ + parser = get_command_parser(cmd) + help = parser.format_help() self.ui.output_rest(help) return 0 diff -Nru testrepository-0.0.5/testrepository/commands/__init__.py testrepository-0.0.18/testrepository/commands/__init__.py --- testrepository-0.0.5/testrepository/commands/__init__.py 2010-12-06 06:53:32.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/__init__.py 2013-01-26 20:01:41.000000000 +0000 @@ -32,10 +32,13 @@ needed in that directory.) """ +from inspect import getdoc +from optparse import OptionParser import os import sys import subunit +from testtools.compat import _u from testrepository.repository import file @@ -73,6 +76,7 @@ name = name.replace('_', '-') names.add(name) names.discard('--init--') + names.discard('--pycache--') names = sorted(names) for name in names: yield _find_command(name) @@ -185,3 +189,25 @@ if not result: return 0 return result + + +def get_command_parser(cmd): + """Return an OptionParser for cmd. + + This populates the parser with the commands options and sets its usage + string based on the arguments and docstring the command has. + + Global options are not provided (as they are UI specific). + + :return: An OptionParser instance. + """ + parser = OptionParser() + for option in cmd.options: + parser.add_option(option) + usage = _u('%%prog %(cmd)s [options] %(args)s\n\n%(help)s') % { + 'args': _u(' ').join(map(lambda x:x.summary(), cmd.args)), + 'cmd': getattr(cmd, 'name', cmd), + 'help': getdoc(cmd), + } + parser.set_usage(usage) + return parser diff -Nru testrepository-0.0.5/testrepository/commands/last.py testrepository-0.0.18/testrepository/commands/last.py --- testrepository-0.0.5/testrepository/commands/last.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/last.py 2013-03-15 16:31:12.000000000 +0000 @@ -14,8 +14,12 @@ """Show the last run loaded into a repository.""" +import optparse + +import testtools + from testrepository.commands import Command -from testrepository.results import TestResultFilter +from testrepository.testcommand import TestCommand class last(Command): @@ -23,21 +27,43 @@ Failing tests are shown on the console and a summary of the run is printed at the end. + + Without --subunit, the process exit code will be non-zero if the test run + was not successful. With --subunit, the process exit code is non-zero if + the subunit stream could not be generated successfully. """ + options = [ + optparse.Option( + "--subunit", action="store_true", + default=False, help="Show output as a subunit stream."), + ] + # Can be assigned to to inject a custom command factory. + command_factory = TestCommand + def run(self): repo = self.repository_factory.open(self.ui.here) - run_id = repo.latest_id() - case = repo.get_test_run(run_id).get_test() + testcommand = self.command_factory(self.ui, repo) + latest_run = repo.get_latest_run() + if self.ui.options.subunit: + stream = latest_run.get_subunit_stream() + self.ui.output_stream(stream) + # Exits 0 if we successfully wrote the stream. + return 0 + case = latest_run.get_test() + try: + previous_run = repo.get_test_run(repo.latest_id() - 1) + except KeyError: + previous_run = None failed = False - output_result = self.ui.make_result(lambda: run_id) - result = TestResultFilter(output_result, filter_skip=True) + result, summary = self.ui.make_result( + latest_run.get_id, testcommand, previous_run=previous_run) result.startTestRun() try: case.run(result) finally: result.stopTestRun() - failed = not result.wasSuccessful() + failed = not summary.wasSuccessful() if failed: return 1 else: diff -Nru testrepository-0.0.5/testrepository/commands/list_tests.py testrepository-0.0.18/testrepository/commands/list_tests.py --- testrepository-0.0.5/testrepository/commands/list_tests.py 2010-12-19 09:11:49.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/list_tests.py 2013-02-07 08:23:49.000000000 +0000 @@ -14,10 +14,12 @@ """List the tests from a project and show them.""" -from cStringIO import StringIO +from io import BytesIO from testtools import TestResult +from testtools.compat import _b +from testrepository.arguments.doubledash import DoubledashArgument from testrepository.arguments.string import StringArgument from testrepository.commands import Command from testrepository.testcommand import testrconf_help, TestCommand @@ -27,22 +29,36 @@ __doc__ = """Lists the tests for a project. """ + testrconf_help - args = [StringArgument('testargs', 0, None)] + args = [StringArgument('testfilters', 0, None), DoubledashArgument(), + StringArgument('testargs', 0, None)] # Can be assigned to to inject a custom command factory. command_factory = TestCommand def run(self): testcommand = self.command_factory(self.ui, None) ids = None - cmd = testcommand.get_run_command(ids, self.ui.arguments['testargs']) - cmd.setUp() + filters = None + if self.ui.arguments['testfilters']: + filters = self.ui.arguments['testfilters'] + testcommand.setUp() try: - ids = cmd.list_tests() - stream = StringIO() - for id in ids: - stream.write('%s\n' % id) - stream.seek(0) - self.ui.output_stream(stream) - return 0 + cmd = testcommand.get_run_command( + ids, self.ui.arguments['testargs'], test_filters=filters) + cmd.setUp() + try: + # Ugh. + # List tests if the fixture has not already needed to to filter. + if filters is None: + ids = cmd.list_tests() + else: + ids = cmd.test_ids + stream = BytesIO() + for id in ids: + stream.write(('%s\n' % id).encode('utf8')) + stream.seek(0) + self.ui.output_stream(stream) + return 0 + finally: + cmd.cleanUp() finally: - cmd.cleanUp() + testcommand.cleanUp() diff -Nru testrepository-0.0.5/testrepository/commands/load.py testrepository-0.0.18/testrepository/commands/load.py --- testrepository-0.0.5/testrepository/commands/load.py 2011-01-14 00:14:13.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/load.py 2013-07-13 11:46:33.000000000 +0000 @@ -14,14 +14,41 @@ """Load data into a repository.""" +from functools import partial +from operator import methodcaller import optparse +import threading -import subunit -from testtools import ConcurrentTestSuite, MultiTestResult +from extras import try_import +v2_avail = try_import('subunit.ByteStreamToStreamResult') +import subunit.test_results +import testtools + +from testrepository.arguments.path import ExistingPathArgument from testrepository.commands import Command from testrepository.repository import RepositoryNotFound -from testrepository.results import TestResultFilter +from testrepository.testcommand import TestCommand + +class InputToStreamResult(object): + """Generate Stream events from stdin. + + Really a UI responsibility? + """ + + def __init__(self, stream): + self.source = stream + self.stop = False + + def run(self, result): + while True: + if self.stop: + return + char = self.source.read(1) + if not char: + return + if char == b'a': + result.status(test_id='stdin', test_status='fail') class load(Command): @@ -33,8 +60,9 @@ Unless the stream is a partial stream, any existing failures are discarded. """ - input_streams = ['subunit+'] + input_streams = ['subunit+', 'interactive?'] + args = [ExistingPathArgument('streams', min=0, max=None)] options = [ optparse.Option("--partial", action="store_true", default=False, help="The stream being loaded was a partial run."), @@ -42,7 +70,14 @@ "--force-init", action="store_true", default=False, help="Initialise the repository if it does not exist already"), + optparse.Option("--subunit", action="store_true", + default=False, help="Display results in subunit format."), + optparse.Option("--full-results", action="store_true", + default=False, + help="No-op - deprecated and kept only for backwards compat."), ] + # Can be assigned to to inject a custom command factory. + command_factory = TestCommand def run(self): path = self.ui.here @@ -53,28 +88,73 @@ repo = self.repository_factory.initialise(path) else: raise - run_id = None + testcommand = self.command_factory(self.ui, repo) # Not a full implementation of TestCase, but we only need to iterate # back to it. Needs to be a callable - its a head fake for # testsuite.add. - cases = lambda:self.ui.iter_streams('subunit') - def make_tests(suite): - streams = list(suite)[0] - for stream in streams(): - yield subunit.ProtocolTestCase(stream) - case = ConcurrentTestSuite(cases, make_tests) + # XXX: Be nice if we could declare that the argument, which is a path, + # is to be an input stream - and thus push this conditional down into + # the UI object. + if self.ui.arguments.get('streams'): + opener = partial(open, mode='rb') + streams = map(opener, self.ui.arguments['streams']) + else: + streams = self.ui.iter_streams('subunit') + def make_tests(): + for pos, stream in enumerate(streams): + if v2_avail: + # Calls StreamResult API. + case = subunit.ByteStreamToStreamResult( + stream, non_subunit_name='stdout') + else: + # Calls TestResult API. + case = subunit.ProtocolTestCase(stream) + def wrap_result(result): + # Wrap in a router to mask out startTestRun/stopTestRun from the + # ExtendedToStreamDecorator. + result = testtools.StreamResultRouter( + result, do_start_stop_run=False) + # Wrap that in ExtendedToStreamDecorator to convert v1 calls to + # StreamResult. + return testtools.ExtendedToStreamDecorator(result) + # Now calls StreamResult API :). + case = testtools.DecorateTestCaseResult(case, wrap_result, + methodcaller('startTestRun'), + methodcaller('stopTestRun')) + case = testtools.DecorateTestCaseResult(case, + lambda result:testtools.StreamTagger( + [result], add=['worker-%d' % pos])) + yield (case, str(pos)) + case = testtools.ConcurrentStreamTestSuite(make_tests) + # One unmodified copy of the stream to repository storage inserter = repo.get_inserter(partial=self.ui.options.partial) - output_result = self.ui.make_result(lambda: run_id) - # XXX: We want to *count* skips, but not show them. - filtered = TestResultFilter(output_result, filter_skip=False) - filtered.startTestRun() - inserter.startTestRun() + # One copy of the stream to the UI layer after performing global + # filters. + try: + previous_run = repo.get_latest_run() + except KeyError: + previous_run = None + output_result, summary_result = self.ui.make_result( + inserter.get_id, testcommand, previous_run=previous_run) + result = testtools.CopyStreamResult([inserter, output_result]) + runner_thread = None + result.startTestRun() try: - case.run(MultiTestResult(inserter, filtered)) + # Convert user input into a stdin event stream + interactive_streams = list(self.ui.iter_streams('interactive')) + if interactive_streams: + case = InputToStreamResult(interactive_streams[0]) + runner_thread = threading.Thread( + target=case.run, args=(result,)) + runner_thread.daemon = True + runner_thread.start() + case.run(result) finally: - run_id = inserter.stopTestRun() - filtered.stopTestRun() - if not filtered.wasSuccessful(): + result.stopTestRun() + if interactive_streams and runner_thread: + runner_thread.stop = True + runner_thread.join(10) + if not summary_result.wasSuccessful(): return 1 else: return 0 diff -Nru testrepository-0.0.5/testrepository/commands/quickstart.py testrepository-0.0.18/testrepository/commands/quickstart.py --- testrepository-0.0.5/testrepository/commands/quickstart.py 2010-01-10 11:32:56.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/quickstart.py 2012-12-28 05:14:06.000000000 +0000 @@ -52,6 +52,9 @@ Quick Start ~~~~~~~~~~~ +Create a config file:: + $ touch .testr.conf + Create a repository:: $ testr init @@ -64,14 +67,15 @@ $ testr failing Delete a repository:: - $ testr delete + $ rm -rf .testrepository Documentation ~~~~~~~~~~~~~ More detailed documentation including design and implementation details, a user manual, and guidelines for development of Test Repository itself can be -found in the doc/ directory. +found at https://testrepository.readthedocs.org/en/latest, or in the source +tree at doc/ (run make -C doc html). """ self.ui.output_rest(help) return 0 diff -Nru testrepository-0.0.5/testrepository/commands/run.py testrepository-0.0.18/testrepository/commands/run.py --- testrepository-0.0.5/testrepository/commands/run.py 2010-12-19 09:08:32.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/run.py 2013-11-03 19:58:46.000000000 +0000 @@ -1,11 +1,11 @@ # -# Copyright (c) 2010 Testrepository Contributors -# +# Copyright (c) 2010-2012 Testrepository Contributors +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -14,19 +14,106 @@ """Run a projects tests and load them into testrepository.""" -import ConfigParser -from cStringIO import StringIO +from io import BytesIO +from math import ceil import optparse -import os.path -import string +import re -from testtools import TestResult +from extras import try_import +import subunit +v2_avail = try_import('subunit.ByteStreamToStreamResult') +import testtools +from testtools import ( + TestByTestResult, + ) +from testtools.compat import _b +from testrepository.arguments.doubledash import DoubledashArgument from testrepository.arguments.string import StringArgument from testrepository.commands import Command from testrepository.commands.load import load from testrepository.ui import decorator from testrepository.testcommand import TestCommand, testrconf_help +from testrepository.testlist import parse_list + + +LINEFEED = _b('\n')[0] + + +class ReturnCodeToSubunit(object): + """Converts a process return code to a subunit error on the process stdout. + + The ReturnCodeToSubunit object behaves as a readonly stream, supplying + the read, readline and readlines methods. If the process exits non-zero a + synthetic test is added to the output, making the error accessible to + subunit stream consumers. If the process closes its stdout and then does + not terminate, reading from the ReturnCodeToSubunit stream will hang. + + This class will be deleted at some point, allowing parsing to read from the + actual fd and benefit from select for aggregating non-subunit output. + """ + + def __init__(self, process): + """Adapt a process to a readable stream. + + :param process: A subprocess.Popen object that is + generating subunit. + """ + self.proc = process + self.done = False + self.source = self.proc.stdout + self.lastoutput = LINEFEED + + def _append_return_code_as_test(self): + if self.done is True: + return + self.source = BytesIO() + returncode = self.proc.wait() + if returncode != 0: + if self.lastoutput != LINEFEED: + # Subunit V1 is line orientated, it has to start on a fresh + # line. V2 needs to start on any fresh utf8 character border + # - which is not guaranteed in an arbitrary stream endpoint, so + # injecting a \n gives us such a guarantee. + self.source.write(_b('\n')) + if v2_avail: + stream = subunit.StreamResultToBytes(self.source) + stream.status(test_id='process-returncode', test_status='fail', + file_name='traceback', mime_type='test/plain;charset=utf8', + file_bytes=('returncode %d' % returncode).encode('utf8')) + else: + self.source.write(_b('test: process-returncode\n' + 'failure: process-returncode [\n' + ' returncode %d\n' + ']\n' % returncode)) + self.source.seek(0) + self.done = True + + def read(self, count=-1): + if count == 0: + return _b('') + result = self.source.read(count) + if result: + self.lastoutput = result[-1] + return result + self._append_return_code_as_test() + return self.source.read(count) + + def readline(self): + result = self.source.readline() + if result: + self.lastoutput = result[-1] + return result + self._append_return_code_as_test() + return self.source.readline() + + def readlines(self): + result = self.source.readlines() + if result: + self.lastoutput = result[-1][-1] + self._append_return_code_as_test() + result.extend(self.source.readlines()) + return result class run(Command): @@ -38,40 +125,241 @@ default=False, help="Run only tests known to be failing."), optparse.Option("--parallel", action="store_true", default=False, help="Run tests in parallel processes."), + optparse.Option("--concurrency", action="store", type="int", default=0, + help="How many processes to use. The default (0) autodetects your CPU count."), + optparse.Option("--load-list", default=None, + help="Only run tests listed in the named file."), optparse.Option("--partial", action="store_true", - default=False, help="Only some tests will be run. Implied by --failing."), + default=False, + help="Only some tests will be run. Implied by --failing."), + optparse.Option("--subunit", action="store_true", + default=False, help="Display results in subunit format."), + optparse.Option("--full-results", action="store_true", + default=False, + help="No-op - deprecated and kept only for backwards compat."), + optparse.Option("--until-failure", action="store_true", + default=False, + help="Repeat the run again and again until failure occurs."), + optparse.Option("--analyze-isolation", action="store_true", + default=False, + help="Search the last test run for 2-test test isolation interactions."), + optparse.Option("--isolated", action="store_true", + default=False, + help="Run each test id in a separate test runner."), ] - args = [StringArgument('testargs', 0, None)] + args = [StringArgument('testfilters', 0, None), DoubledashArgument(), + StringArgument('testargs', 0, None)] # Can be assigned to to inject a custom command factory. command_factory = TestCommand + def _find_failing(self, repo): + run = repo.get_failing() + case = run.get_test() + ids = [] + def gather_errors(test_dict): + if test_dict['status'] == 'fail': + ids.append(test_dict['id']) + result = testtools.StreamToDict(gather_errors) + result.startTestRun() + try: + case.run(result) + finally: + result.stopTestRun() + return ids + def run(self): repo = self.repository_factory.open(self.ui.here) + if self.ui.options.failing or self.ui.options.analyze_isolation: + ids = self._find_failing(repo) + else: + ids = None + if self.ui.options.load_list: + list_ids = set() + # Should perhaps be text.. currently does its own decode. + with open(self.ui.options.load_list, 'rb') as list_file: + list_ids = set(parse_list(list_file.read())) + if ids is None: + # Use the supplied list verbatim + ids = list_ids + else: + # We have some already limited set of ids, just reduce to ids + # that are both failing and listed. + ids = list_ids.intersection(ids) + if self.ui.arguments['testfilters']: + filters = self.ui.arguments['testfilters'] + else: + filters = None testcommand = self.command_factory(self.ui, repo) - if self.ui.options.failing: - # Run only failing tests - run = repo.get_failing() + testcommand.setUp() + try: + if not self.ui.options.analyze_isolation: + cmd = testcommand.get_run_command(ids, self.ui.arguments['testargs'], + test_filters = filters) + if self.ui.options.isolated: + result = 0 + cmd.setUp() + try: + ids = cmd.list_tests() + finally: + cmd.cleanUp() + for test_id in ids: + cmd = testcommand.get_run_command([test_id], + self.ui.arguments['testargs'], test_filters=filters) + run_result = self._run_tests(cmd) + if run_result > result: + result = run_result + return result + else: + return self._run_tests(cmd) + else: + # Where do we source data about the cause of conflicts. + # XXX: Should instead capture the run id in with the failing test + # data so that we can deal with failures split across many partial + # runs. + latest_run = repo.get_latest_run() + # Stage one: reduce the list of failing tests (possibly further + # reduced by testfilters) to eliminate fails-on-own tests. + spurious_failures = set() + for test_id in ids: + cmd = testcommand.get_run_command([test_id], + self.ui.arguments['testargs'], test_filters = filters) + if not self._run_tests(cmd): + # If the test was filtered, it won't have been run. + if test_id in repo.get_test_ids(repo.latest_id()): + spurious_failures.add(test_id) + # This is arguably ugly, why not just tell the system that + # a pass here isn't a real pass? [so that when we find a + # test that is spuriously failing, we don't forget + # that it is actually failng. + # Alternatively, perhaps this is a case for data mining: + # when a test starts passing, keep a journal, and allow + # digging back in time to see that it was a failure, + # what it failed with etc... + # The current solution is to just let it get marked as + # a pass temporarily. + if not spurious_failures: + # All done. + return 0 + # spurious-failure -> cause. + test_conflicts = {} + for spurious_failure in spurious_failures: + candidate_causes = self._prior_tests( + latest_run, spurious_failure) + bottom = 0 + top = len(candidate_causes) + width = top - bottom + while width: + check_width = int(ceil(width / 2.0)) + cmd = testcommand.get_run_command( + candidate_causes[bottom:bottom + check_width] + + [spurious_failure], + self.ui.arguments['testargs']) + self._run_tests(cmd) + # check that the test we're probing still failed - still + # awkward. + found_fail = [] + def find_fail(test_dict): + if test_dict['id'] == spurious_failure: + found_fail.append(True) + checker = testtools.StreamToDict(find_fail) + checker.startTestRun() + try: + repo.get_failing().get_test().run(checker) + finally: + checker.stopTestRun() + if found_fail: + # Our conflict is in bottom - clamp the range down. + top = bottom + check_width + if width == 1: + # found the cause + test_conflicts[ + spurious_failure] = candidate_causes[bottom] + width = 0 + else: + width = top - bottom + else: + # Conflict in the range we did not run: discard bottom. + bottom = bottom + check_width + if width == 1: + # there will be no more to check, so we didn't + # reproduce the failure. + width = 0 + else: + width = top - bottom + if spurious_failure not in test_conflicts: + # Could not determine cause + test_conflicts[spurious_failure] = 'unknown - no conflicts' + if test_conflicts: + table = [('failing test', 'caused by test')] + for failure, causes in test_conflicts.items(): + table.append((failure, causes)) + self.ui.output_table(table) + return 3 + return 0 + finally: + testcommand.cleanUp() + + def _prior_tests(self, run, failing_id): + """Calculate what tests from the test run run ran before test_id. + + Tests that ran in a different worker are not included in the result. + """ + if not getattr(self, '_worker_to_test', False): + # TODO: switch to route codes? case = run.get_test() - result = TestResult() - result.startTestRun() + # Use None if there is no worker-N tag + # If there are multiple, map them all. + # (worker-N -> [testid, ...]) + worker_to_test = {} + # (testid -> [workerN, ...]) + test_to_worker = {} + def map_test(test_dict): + tags = test_dict['tags'] + id = test_dict['id'] + workers = [] + for tag in tags: + if tag.startswith('worker-'): + workers.append(tag) + if not workers: + workers = [None] + for worker in workers: + worker_to_test.setdefault(worker, []).append(id) + test_to_worker.setdefault(id, []).extend(workers) + mapper = testtools.StreamToDict(map_test) + mapper.startTestRun() try: - case.run(result) + case.run(mapper) finally: - result.stopTestRun() - ids = [failure[0].id() for failure in result.failures] - ids.extend([error[0].id() for error in result.errors]) - else: - ids = None - cmd = testcommand.get_run_command(ids, self.ui.arguments['testargs']) + mapper.stopTestRun() + self._worker_to_test = worker_to_test + self._test_to_worker = test_to_worker + failing_workers = self._test_to_worker[failing_id] + prior_tests = [] + for worker in failing_workers: + worker_tests = self._worker_to_test[worker] + prior_tests.extend(worker_tests[:worker_tests.index(failing_id)]) + return prior_tests + + def _run_tests(self, cmd): + """Run the tests cmd was parameterised with.""" cmd.setUp() try: - run_procs = [('subunit', proc.stdout) for proc in cmd.run_tests()] - options = {} - if self.ui.options.failing: - options['partial'] = True - load_ui = decorator.UI(input_streams=run_procs, options=options, - decorated=self.ui) - load_cmd = load(load_ui) - return load_cmd.execute() + def run_tests(): + run_procs = [('subunit', ReturnCodeToSubunit(proc)) for proc in cmd.run_tests()] + options = {} + if (self.ui.options.failing or self.ui.options.analyze_isolation + or self.ui.options.isolated): + options['partial'] = True + load_ui = decorator.UI(input_streams=run_procs, options=options, + decorated=self.ui) + load_cmd = load(load_ui) + return load_cmd.execute() + if not self.ui.options.until_failure: + return run_tests() + else: + result = run_tests() + while not result: + result = run_tests() + return result finally: cmd.cleanUp() diff -Nru testrepository-0.0.5/testrepository/commands/slowest.py testrepository-0.0.18/testrepository/commands/slowest.py --- testrepository-0.0.5/testrepository/commands/slowest.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/slowest.py 2013-02-07 03:34:22.000000000 +0000 @@ -0,0 +1,71 @@ +# +# Copyright (c) 2010 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""Show the longest running tests in the repository.""" + +import math +from operator import itemgetter +import optparse + +from testrepository.commands import Command + + +class slowest(Command): + """Show the slowest tests from the last test run. + + This command shows a table, with the longest running + tests at the top. + """ + + DEFAULT_ROWS_SHOWN = 10 + TABLE_HEADER = ('Test id', 'Runtime (s)') + + options = [ + optparse.Option( + "--all", action="store_true", + default=False, help="Show timing for all tests."), + ] + + @staticmethod + def format_times(times): + times = list(times) + precision = 3 + digits_before_point = int( + math.log10(times[0][1])) + 1 + min_length = digits_before_point + precision + 1 + def format_time(time): + # Limit the number of digits after the decimal + # place, and also enforce a minimum width + # based on the longest duration + return "%*.*f" % (min_length, precision, time) + times = [(name, format_time(time)) for name, time in times] + return times + + def run(self): + repo = self.repository_factory.open(self.ui.here) + try: + latest_id = repo.latest_id() + except KeyError: + return 3 + # what happens when there is no timing info? + test_times = repo.get_test_times(repo.get_test_ids(latest_id)) + known_times =list( test_times['known'].items()) + known_times.sort(key=itemgetter(1), reverse=True) + if len(known_times) > 0: + if not self.ui.options.all: + known_times = known_times[:self.DEFAULT_ROWS_SHOWN] + known_times = self.format_times(known_times) + rows = [self.TABLE_HEADER] + known_times + self.ui.output_table(rows) + return 0 diff -Nru testrepository-0.0.5/testrepository/commands/stats.py testrepository-0.0.18/testrepository/commands/stats.py --- testrepository-0.0.5/testrepository/commands/stats.py 2010-01-10 11:32:56.000000000 +0000 +++ testrepository-0.0.18/testrepository/commands/stats.py 2012-01-10 13:30:21.000000000 +0000 @@ -14,7 +14,6 @@ """Report stats about a repository. Current vestigial.""" -from cStringIO import StringIO from testrepository.commands import Command diff -Nru testrepository-0.0.5/testrepository/__init__.py testrepository-0.0.18/testrepository/__init__.py --- testrepository-0.0.5/testrepository/__init__.py 2011-02-26 07:12:21.000000000 +0000 +++ testrepository-0.0.18/testrepository/__init__.py 2013-11-04 23:55:48.000000000 +0000 @@ -33,4 +33,4 @@ # established at this point, and setup.py will use a version of next-$(revno). # If the releaselevel is 'final', then the tarball will be major.minor.micro. # Otherwise it is major.minor.micro~$(revno). -__version__ = (0, 0, 5, 'final', 0) +__version__ = (0, 0, 18, 'final', 0) diff -Nru testrepository-0.0.5/testrepository/repository/file.py testrepository-0.0.18/testrepository/repository/file.py --- testrepository-0.0.5/testrepository/repository/file.py 2010-12-27 03:50:06.000000000 +0000 +++ testrepository-0.0.18/testrepository/repository/file.py 2013-04-01 09:18:36.000000000 +0000 @@ -14,18 +14,21 @@ """Persistent storage of test results.""" -from cStringIO import StringIO +from io import BytesIO try: import anydbm as dbm except ImportError: import dbm import errno +from operator import methodcaller import os.path import sys import tempfile import subunit from subunit import TestProtocolClient +import testtools +from testtools.compat import _b from testrepository.repository import ( AbstractRepository, @@ -33,6 +36,7 @@ AbstractTestRun, RepositoryNotFound, ) +from testrepository.utils import timedelta_to_seconds def atomicish_rename(source, target): @@ -47,7 +51,7 @@ """Create a repository at url/path.""" base = os.path.join(os.path.expanduser(url), '.testrepository') os.mkdir(base) - stream = file(os.path.join(base, 'format'), 'wb') + stream = open(os.path.join(base, 'format'), 'wt') try: stream.write('1\n') finally: @@ -60,8 +64,8 @@ path = os.path.expanduser(url) base = os.path.join(path, '.testrepository') try: - stream = file(os.path.join(base, 'format'), 'rb') - except (IOError, OSError), e: + stream = open(os.path.join(base, 'format'), 'rt') + except (IOError, OSError) as e: if e.errno == errno.ENOENT: raise RepositoryNotFound(url) raise @@ -95,7 +99,7 @@ return value def _next_stream(self): - next_content = file(os.path.join(self.base, 'next-stream'), 'rb').read() + next_content = open(os.path.join(self.base, 'next-stream'), 'rt').read() try: return int(next_content) except ValueError: @@ -112,20 +116,24 @@ def get_failing(self): try: - run_subunit_content = file( + run_subunit_content = open( os.path.join(self.base, "failing"), 'rb').read() except IOError: err = sys.exc_info()[1] if err.errno == errno.ENOENT: - run_subunit_content = '' + run_subunit_content = _b('') else: raise - return _DiskRun(run_subunit_content) + return _DiskRun(None, run_subunit_content) def get_test_run(self, run_id): - run_subunit_content = file( - os.path.join(self.base, str(run_id)), 'rb').read() - return _DiskRun(run_subunit_content) + try: + run_subunit_content = open( + os.path.join(self.base, str(run_id)), 'rb').read() + except IOError as e: + if e.errno == errno.ENOENT: + raise KeyError("No such run.") + return _DiskRun(run_id, run_subunit_content) def _get_inserter(self, partial): return _Inserter(self, partial) @@ -137,7 +145,13 @@ try: result = {} for test_id in test_ids: - duration = db.get(test_id, None) + if type(test_id) != str: + test_id = test_id.encode('utf8') + # gdbm does not support get(). + try: + duration = db[test_id] + except KeyError: + duration = None if duration is not None: result[test_id] = float(duration) return result @@ -153,7 +167,7 @@ # term. Likewise we don't fsync - this data isn't valuable enough to # force disk IO. prefix = self._path('next-stream') - stream = file(prefix + '.new', 'wb') + stream = open(prefix + '.new', 'wt') try: stream.write('%d\n' % value) finally: @@ -164,18 +178,33 @@ class _DiskRun(AbstractTestRun): """A test run that was inserted into the repository.""" - def __init__(self, subunit_content): + def __init__(self, run_id, subunit_content): """Create a _DiskRun with the content subunit_content.""" + self._run_id = run_id self._content = subunit_content + assert type(subunit_content) is bytes + + def get_id(self): + return self._run_id def get_subunit_stream(self): - return StringIO(self._content) + return BytesIO(self._content) def get_test(self): - return subunit.ProtocolTestCase(self.get_subunit_stream()) + case = subunit.ProtocolTestCase(self.get_subunit_stream()) + def wrap_result(result): + # Wrap in a router to mask out startTestRun/stopTestRun from the + # ExtendedToStreamDecorator. + result = testtools.StreamResultRouter(result, do_start_stop_run=False) + # Wrap that in ExtendedToStreamDecorator to convert v1 calls to + # StreamResult. + return testtools.ExtendedToStreamDecorator(result) + return testtools.DecorateTestCaseResult( + case, wrap_result, methodcaller('startTestRun'), + methodcaller('stopTestRun')) -class _SafeInserter(TestProtocolClient): +class _SafeInserter(object): def __init__(self, repository, partial=False): # XXX: Perhaps should factor into a decorator and use an unaltered @@ -189,13 +218,25 @@ self._times = {} self._test_start = None self._time = None - TestProtocolClient.__init__(self, stream) + subunit_client = testtools.StreamToExtendedDecorator( + TestProtocolClient(stream)) + self.hook = testtools.CopyStreamResult([ + subunit_client, + testtools.StreamToDict(self._handle_test)]) + self._stream = stream + + def _handle_test(self, test_dict): + start, stop = test_dict['timestamps'] + if None in (start, stop): + return + self._times[test_dict['id']] = str(timedelta_to_seconds(stop - start)) def startTestRun(self): - pass + self.hook.startTestRun() + self._run_id = None def stopTestRun(self): - # TestProtocolClient.stopTestRun(self) + self.hook.stopTestRun() self._stream.flush() self._stream.close() run_id = self._name() @@ -204,36 +245,30 @@ # May be too slow, but build and iterate. db = dbm.open(self._repository._path('times.dbm'), 'c') try: - db.update(self._times) + db_times = {} + for key, value in self._times.items(): + if type(key) != str: + key = key.encode('utf8') + db_times[key] = value + if getattr(db, 'update', None): + db.update(db_times) + else: + for key, value in db_times.items(): + db[key] = value finally: db.close() - return run_id + self._run_id = run_id + + def status(self, *args, **kwargs): + self.hook.status(*args, **kwargs) def _cancel(self): """Cancel an insertion.""" self._stream.close() os.unlink(self.fname) - def startTest(self, test): - result = TestProtocolClient.startTest(self, test) - self._test_start = self._time - return result - - def stopTest(self, test): - result = TestProtocolClient.stopTest(self, test) - if None in (self._test_start, self._time): - return result - duration_delta = self._time - self._test_start - duration_seconds = ((duration_delta.microseconds + - (duration_delta.seconds + duration_delta.days * 24 * 3600) - * 10**6) / float(10**6)) - self._times[test.id()] = str(duration_seconds) - return result - - def time(self, timestamp): - result = TestProtocolClient.time(self, timestamp) - self._time = timestamp - return result + def get_id(self): + return self._run_id class _FailingInserter(_SafeInserter): @@ -249,34 +284,36 @@ return self._repository._allocate() def stopTestRun(self): - run_id = _SafeInserter.stopTestRun(self) + super(_Inserter, self).stopTestRun() # XXX: locking (other inserts may happen while we update the failing # file). # Combine failing + this run : strip passed tests, add failures. # use memory repo to aggregate. a bit awkward on layering ;). - import memory + # Should just pull the failing items aside as they happen perhaps. + # Or use a router and avoid using a memory object at all. + from testrepository.repository import memory repo = memory.Repository() if self.partial: # Seed with current failing - inserter = repo.get_inserter() + inserter = testtools.ExtendedToStreamDecorator(repo.get_inserter()) inserter.startTestRun() failing = self._repository.get_failing() failing.get_test().run(inserter) inserter.stopTestRun() - inserter= repo.get_inserter(partial=True) + inserter= testtools.ExtendedToStreamDecorator(repo.get_inserter(partial=True)) inserter.startTestRun() - run = self._repository.get_test_run(run_id) + run = self._repository.get_test_run(self.get_id()) run.get_test().run(inserter) inserter.stopTestRun() # and now write to failing inserter = _FailingInserter(self._repository) - inserter.startTestRun() + _inserter = testtools.ExtendedToStreamDecorator(inserter) + _inserter.startTestRun() try: - try: - repo.get_failing().get_test().run(inserter) - except: - inserter._cancel() - raise - finally: - inserter.stopTestRun() - return run_id + repo.get_failing().get_test().run(_inserter) + except: + inserter._cancel() + raise + else: + _inserter.stopTestRun() + return self.get_id() diff -Nru testrepository-0.0.5/testrepository/repository/__init__.py testrepository-0.0.18/testrepository/repository/__init__.py --- testrepository-0.0.5/testrepository/repository/__init__.py 2010-12-14 07:12:02.000000000 +0000 +++ testrepository-0.0.18/testrepository/repository/__init__.py 2013-04-01 09:05:39.000000000 +0000 @@ -27,7 +27,8 @@ the initialize function in the appropriate repository module. """ -import subunit.test_results +from testtools import StreamToDict, TestResult + class AbstractRepositoryFactory(object): """Interface for making or opening repositories.""" @@ -72,14 +73,16 @@ Repository implementations should implement _get_inserter. + get_inserter() does not add timing data to streams: it should be + provided by the caller of get_inserter (e.g. commands.load). + :param partial: If True, the stream being inserted only executed some tests rather than all the projects tests. :return an inserter: Inserters meet the extended TestResult protocol that testtools 0.9.2 and above offer. The startTestRun and stopTestRun methods in particular must be called. """ - return subunit.test_results.AutoTimingTestResultDecorator( - self._get_inserter(partial)) + return self._get_inserter(partial) def _get_inserter(self): """Get an inserter for get_inserter. @@ -88,6 +91,13 @@ """ raise NotImplementedError(self._get_inserter) + def get_latest_run(self): + """Return the latest run. + + Equivalent to get_test_run(latest_id()). + """ + return self.get_test_run(self.latest_id()) + def get_test_run(self, run_id): """Retrieve a TestRun object for run_id. @@ -124,9 +134,40 @@ """Return the run id for the most recently inserted test run.""" raise NotImplementedError(self.latest_id) + def get_test_ids(self, run_id): + """Return the test ids from the specified run. + + :param run_id: the id of the test run to query. + :return: a list of test ids for the tests that + were part of the specified test run. + """ + run = self.get_test_run(run_id) + ids = [] + def gather(test_dict): + ids.append(test_dict['id']) + result = StreamToDict(gather) + result.startTestRun() + try: + run.get_test().run(result) + finally: + result.stopTestRun() + return ids + class AbstractTestRun(object): - """A test run that has been stored in a repository.""" + """A test run that has been stored in a repository. + + Should implement the StreamResult protocol as well + as the testrepository specific methods documented here. + """ + + def get_id(self): + """Get the id of the test run. + + Sometimes test runs will not have an id, e.g. test runs for + 'failing'. In that case, this should return None. + """ + raise NotImplementedError(self.get_id) def get_subunit_stream(self): """Get a subunit stream for this test run.""" @@ -136,7 +177,9 @@ """Get a testtools.TestCase-like object that can be run. :return: A TestCase like object which can be run to get the individual - tests reported to a testtools.TestResult. + tests reported to a testtools.StreamResult/TestResult. + (Clients of repository should provide an ExtendedToStreamDecorator + decorator to permit either API to be used). """ raise NotImplementedError(self.get_test) diff -Nru testrepository-0.0.5/testrepository/repository/memory.py testrepository-0.0.18/testrepository/repository/memory.py --- testrepository-0.0.5/testrepository/repository/memory.py 2010-12-14 07:12:02.000000000 +0000 +++ testrepository-0.0.18/testrepository/repository/memory.py 2013-07-17 09:03:54.000000000 +0000 @@ -14,9 +14,14 @@ """In memory storage of test results.""" -from cStringIO import StringIO +from extras import try_import + +OrderedDict = try_import('collections.OrderedDict', dict) +from io import BytesIO +from operator import methodcaller import subunit +import testtools from testrepository.repository import ( AbstractRepository, @@ -53,7 +58,7 @@ def __init__(self): # Test runs: self._runs = [] - self._failing = {} # id -> test + self._failing = OrderedDict() # id -> test self._times = {} # id -> duration def count(self): @@ -63,6 +68,8 @@ return _Failures(self) def get_test_run(self, run_id): + if run_id < 0: + raise KeyError("No such run.") return self._runs[run_id] def latest_id(self): @@ -90,21 +97,32 @@ def __init__(self, repository): self._repository = repository + def get_id(self): + return None + def get_subunit_stream(self): - result = StringIO() + result = BytesIO() serialiser = subunit.TestProtocolClient(result) self.run(serialiser) result.seek(0) return result def get_test(self): - return self + def wrap_result(result): + # Wrap in a router to mask out startTestRun/stopTestRun from the + # ExtendedToStreamDecorator. + result = testtools.StreamResultRouter(result, do_start_stop_run=False) + # Wrap that in ExtendedToStreamDecorator to convert v1 calls to + # StreamResult. + return testtools.ExtendedToStreamDecorator(result) + return testtools.DecorateTestCaseResult( + self, wrap_result, methodcaller('startTestRun'), + methodcaller('stopTestRun')) def run(self, result): - for outcome, test, details in self._repository._failing.itervalues(): - result.startTest(test) - getattr(result, 'add' + outcome)(test, details=details) - result.stopTest(test) + # Speaks original. + for case in self._repository._failing.values(): + case.run(result) class _Inserter(AbstractTestRun): @@ -113,78 +131,69 @@ def __init__(self, repository, partial): self._repository = repository self._partial = partial - self._outcomes = [] - self._time = None - self._test_start = None + self._tests = [] + # Subunit V1 stream for get_subunit_stream + self._subunit = None def startTestRun(self): - pass + self._subunit = BytesIO() + serialiser = subunit.TestProtocolClient(self._subunit) + serialiser = testtools.StreamToExtendedDecorator(serialiser) + self._hook = testtools.CopyStreamResult([ + testtools.StreamToDict(self._handle_test), + serialiser]) + self._hook.startTestRun() + + def _handle_test(self, test_dict): + self._tests.append(test_dict) + start, stop = test_dict['timestamps'] + if None in (start, stop): + return + duration_delta = stop - start + duration_seconds = ((duration_delta.microseconds + + (duration_delta.seconds + duration_delta.days * 24 * 3600) + * 10**6) / 10.0**6) + self._repository._times[test_dict['id']] = duration_seconds def stopTestRun(self): + self._hook.stopTestRun() self._repository._runs.append(self) + self._run_id = len(self._repository._runs) - 1 if not self._partial: - self._repository._failing = {} - for record in self._outcomes: - test_id = record[1].id() - if record[0] in ('Failure', 'Error'): - self._repository._failing[test_id] = record + self._repository._failing = OrderedDict() + for test_dict in self._tests: + test_id = test_dict['id'] + if test_dict['status'] == 'fail': + case = testtools.testresult.real.test_dict_to_case(test_dict) + self._repository._failing[test_id] = case else: self._repository._failing.pop(test_id, None) - return len(self._repository._runs) - 1 - - def startTest(self, test): - self._test_start = self._time - - def stopTest(self, test): - if None in (self._test_start, self._time): - return - duration_delta = self._time - self._test_start - duration_seconds = ((duration_delta.microseconds + - (duration_delta.seconds + duration_delta.days * 24 * 3600) - * 10**6) / 10**6) - self._repository._times[test.id()] = duration_seconds + return self._run_id - def _addOutcome(self, outcome, test, details): - self._outcomes.append((outcome, test, details)) + def status(self, *args, **kwargs): + self._hook.status(*args, **kwargs) - def addSuccess(self, test, details=None): - self._addOutcome('Success', test, details) - - def addFailure(self, test, err=None, details=None): - # Don't support old interface for now. - assert err is None - self._addOutcome('Failure', test, details) - - def addError(self, test, err=None, details=None): - assert err is None - self._addOutcome('Error', test, details) - - def addExpectedFailure(self, test, err=None, details=None): - assert err is None - self._addOutcome('ExpectedFailure', test, details) - - def addUnexpectedSuccess(self, details=None): - self._addOutcome('UnexpectedSccess', test, details) - - def addSkip(self, test, reason=None, details=None): - assert reason is None - self._addOutcome('Skip', test, details) + def get_id(self): + return self._run_id def get_subunit_stream(self): - result = StringIO() - serialiser = subunit.TestProtocolClient(result) - self.run(serialiser) - result.seek(0) - return result + self._subunit.seek(0) + return self._subunit def get_test(self): - return self + def wrap_result(result): + # Wrap in a router to mask out startTestRun/stopTestRun from the + # ExtendedToStreamDecorator. + result = testtools.StreamResultRouter(result, do_start_stop_run=False) + # Wrap that in ExtendedToStreamDecorator to convert v1 calls to + # StreamResult. + return testtools.ExtendedToStreamDecorator(result) + return testtools.DecorateTestCaseResult( + self, wrap_result, methodcaller('startTestRun'), + methodcaller('stopTestRun')) def run(self, result): - for outcome, test, details in self._outcomes: - result.startTest(test) - getattr(result, 'add' + outcome)(test, details=details) - result.stopTest(test) - - def time(self, timestamp): - self._time = timestamp + # Speaks original. + for test_dict in self._tests: + case = testtools.testresult.real.test_dict_to_case(test_dict) + case.run(result) diff -Nru testrepository-0.0.5/testrepository/results.py testrepository-0.0.18/testrepository/results.py --- testrepository-0.0.5/testrepository/results.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/results.py 2013-07-10 20:28:21.000000000 +0000 @@ -1,13 +1,49 @@ -from subunit import test_results +# +# Copyright (c) 2010 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. -class TestResultFilter(test_results.TestResultFilter): - """Test result filter.""" +from testtools import StreamSummary - def _filtered(self): - super(TestResultFilter, self)._filtered() - # XXX: This is really crappy. It assumes that the test result we - # actually care about is decorated twice. Probably the more correct - # thing to do is fix subunit so that incrementing 'testsRun' on a test - # result increments them on the decorated test result. - self.decorated.decorated.testsRun += 1 +from testrepository.utils import timedelta_to_seconds + + +class SummarizingResult(StreamSummary): + + def __init__(self): + super(SummarizingResult, self).__init__() + + def startTestRun(self): + super(SummarizingResult, self).startTestRun() + self._first_time = None + self._last_time = None + + def status(self, *args, **kwargs): + if kwargs.get('timestamp') is not None: + timestamp = kwargs['timestamp'] + if self._last_time is None: + self._first_time = timestamp + self._last_time = timestamp + if timestamp < self._first_time: + self._first_time = timestamp + if timestamp > self._last_time: + self._last_time = timestamp + super(SummarizingResult, self).status(*args, **kwargs) + + def get_num_failures(self): + return len(self.failures) + len(self.errors) + + def get_time_taken(self): + if None in (self._last_time, self._first_time): + return None + return timedelta_to_seconds(self._last_time - self._first_time) diff -Nru testrepository-0.0.5/testrepository/setuptools_command.py testrepository-0.0.18/testrepository/setuptools_command.py --- testrepository-0.0.5/testrepository/setuptools_command.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/setuptools_command.py 2013-07-10 08:17:11.000000000 +0000 @@ -0,0 +1,94 @@ +# +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""setuptools/distutils commands to run testr via setup.py + +Currently provides 'testr' which runs tests using testr. You can pass +--coverage which will also export PYTHON='coverage run --source ' +and automatically combine the coverage from each testr backend test runner +after the run completes. + +To use, just use setuptools/distribute and depend on testr, and it should be +picked up automatically (as the commands are exported in the testrepository +package metadata. +""" + +from distutils import cmd +import distutils.errors +import os +import sys + +from testrepository import commands + + +class Testr(cmd.Command): + + description = "Run unit tests using testr" + + user_options = [ + ('coverage', None, "Replace PYTHON with coverage and merge coverage " + "from each testr worker."), + ('testr-args=', 't', "Run 'testr' with these args"), + ('omit=', 'o', 'Files to omit from coverage calculations'), + ('slowest', None, "Show slowest test times after tests complete."), + ] + + boolean_options = ['coverage', 'slowest'] + + def _run_testr(self, *args): + return commands.run_argv([sys.argv[0]] + list(args), + sys.stdin, sys.stdout, sys.stderr) + + def initialize_options(self): + self.testr_args = None + self.coverage = None + self.omit = "" + self.slowest = None + + def finalize_options(self): + if self.testr_args is None: + self.testr_args = [] + else: + self.testr_args = self.testr_args.split() + if self.omit: + self.omit = "--omit=%s" % self.omit + + def run(self): + """Set up testr repo, then run testr""" + if not os.path.isdir(".testrepository"): + self._run_testr("init") + + if self.coverage: + self._coverage_before() + testr_ret = self._run_testr("run", "--parallel", *self.testr_args) + if testr_ret: + raise distutils.errors.DistutilsError( + "testr failed (%d)" % testr_ret) + if self.slowest: + print ("Slowest Tests") + self._run_testr("slowest") + if self.coverage: + self._coverage_after() + + def _coverage_before(self): + package = self.distribution.get_name() + if package.startswith('python-'): + package = package[7:] + options = "--source %s --parallel-mode" % package + os.environ['PYTHON'] = ("coverage run %s" % options) + + def _coverage_after(self): + os.system("coverage combine") + os.system("coverage html -d ./cover %s" % self.omit) diff -Nru testrepository-0.0.5/testrepository/testcommand.py testrepository-0.0.18/testrepository/testcommand.py --- testrepository-0.0.5/testrepository/testcommand.py 2010-12-27 18:35:56.000000000 +0000 +++ testrepository-0.0.18/testrepository/testcommand.py 2013-07-17 02:50:44.000000000 +0000 @@ -14,18 +14,27 @@ """The test command that test repository knows how to run.""" -import ConfigParser -from fixtures import Fixture +from extras import try_imports + +from collections import defaultdict +ConfigParser = try_imports(['ConfigParser', 'configparser']) import itertools import operator import os.path import re -import string import subprocess import sys import tempfile +import multiprocessing from textwrap import dedent +from fixtures import Fixture + +from testrepository.testlist import ( + parse_enumeration, + write_list, + ) + testrconf_help = dedent(""" Configuring via .testr.conf: --- @@ -39,6 +48,10 @@ that have a shell. The full list of options and variables for .testr.conf: + * filter_tags -- a list of tags which should be used to filter test counts. + This is useful for stripping out non-test results from the subunit stream + such as Zope test layers. These filtered items are still considered for + test failures. * test_command -- command line to run to execute tests. * test_id_option -- the value to substitute into test_command when specific test ids should be run. @@ -49,6 +62,18 @@ test_command should output on stdout all the test ids that would have been run if every other option and argument was honoured, one per line. This is required for parallel testing, and is substituted into $LISTOPT. + * test_run_concurrency -- Optional call out to establish concurrency. + Should return one line containing the number of concurrent test runner + processes to run. + * instance_provision -- provision one or more test run environments. + Accepts $INSTANCE_COUNT for the number of instances desired. + * instance_execute -- execute a test runner process in a given environment. + Accepts $INSTANCE_ID, $FILES and $COMMAND. Paths in $FILES should be + synchronised into the test runner environment filesystem. $COMMAND can + be adjusted if the paths are synched with different names. + * instance_dispose -- dispose of one or more test running environments. + Accepts $INSTANCE_IDS. + * group_regex -- If set group tests by the matched section of the test id. * $IDOPTION -- the variable to use to trigger running some specific tests. * $IDFILE -- A file created before the test command is run and deleted afterwards which contains a list of test ids, one per line. This can @@ -56,19 +81,70 @@ * $IDLIST -- A list of the test ids to run, separated by spaces. IDLIST defaults to an empty string when no test ids are known and no explicit default is provided. This will not handle test ids with spaces. + + See the testrepository manual for example .testr.conf files in different + programming languages. + """) +class CallWhenProcFinishes(object): + """Convert a process object to trigger a callback when returncode is set. + + This just wraps the entire object and when the returncode attribute access + finds a set value, calls the callback. + """ + + def __init__(self, process, callback): + """Adapt process + + :param process: A subprocess.Popen object. + :param callback: The process to call when the process completes. + """ + self._proc = process + self._callback = callback + self._done = False + + @property + def stdin(self): + return self._proc.stdin + + @property + def stdout(self): + return self._proc.stdout + + @property + def stderr(self): + return self._proc.stderr + + @property + def returncode(self): + result = self._proc.returncode + if not self._done and result is not None: + self._done = True + self._callback() + return result + + def wait(self): + return self._proc.wait() + + +compiled_re_type = type(re.compile('')) + class TestListingFixture(Fixture): """Write a temporary file to disk with test ids in it.""" def __init__(self, test_ids, cmd_template, listopt, idoption, ui, - repository, parallel=True, listpath=None): + repository, parallel=True, listpath=None, parser=None, + test_filters=None, instance_source=None, group_callback=None): """Create a TestListingFixture. :param test_ids: The test_ids to use. May be None indicating that - no ids filtering is requested: run whatever the test program - chooses to. + no ids are known and they should be discovered by listing or + configuration if they must be known to run tests. Test ids are + needed to run tests when filtering or partitioning is needed: if + the run concurrency is > 1 partitioning is needed, and filtering is + needed if the user has passed in filters. :param cmd_template: string to be filled out with IDFILE. :param listopt: Option to substitute into LISTOPT to cause test listing @@ -81,6 +157,22 @@ --parallel run recursively. :param listpath: The file listing path to use. If None, a unique path is created. + :param parser: An options parser for reading options from. + :param test_filters: An optional list of test filters to apply. Each + filter should be a string suitable for passing to re.compile. + filters are applied using search() rather than match(), so if + anchoring is needed it should be included in the regex. + The test ids used for executing are the union of all the individual + filters: to take the intersection instead, craft a single regex that + matches all your criteria. Filters are automatically applied by + run_tests(), or can be applied by calling filter_tests(test_ids). + :param instance_source: A source of test run instances. Must support + obtain_instance(max_concurrency) -> id and release_instance(id) + calls. + :param group_callback: If supplied, should be a function that accepts a + test id and returns a group id. A group id is an arbitrary value + used as a dictionary key in the scheduler. All test ids with the + same group id are scheduled onto the same backend test process. """ self.test_ids = test_ids self.template = cmd_template @@ -90,33 +182,70 @@ self.repository = repository self.parallel = parallel self._listpath = listpath + self._parser = parser + self.test_filters = test_filters + self._group_callback = group_callback + self._instance_source = instance_source def setUp(self): super(TestListingFixture, self).setUp() variable_regex = '\$(IDOPTION|IDFILE|IDLIST|LISTOPT)' variables = {} + list_variables = {'LISTOPT': self.listopt} cmd = self.template + try: + default_idstr = self._parser.get('DEFAULT', 'test_id_list_default') + list_variables['IDLIST'] = default_idstr + # In theory we should also support casting this into IDFILE etc - + # needs this horrible class refactored. + except ConfigParser.NoOptionError as e: + if e.message != "No option 'test_id_list_default' in section: 'DEFAULT'": + raise + default_idstr = None + def list_subst(match): + return list_variables.get(match.groups(1)[0], '') + self.list_cmd = re.sub(variable_regex, list_subst, cmd) + nonparallel = (not self.parallel or not + getattr(self.ui, 'options', None) or not + getattr(self.ui.options, 'parallel', None)) + if nonparallel: + self.concurrency = 1 + else: + self.concurrency = self.ui.options.concurrency + if not self.concurrency: + self.concurrency = self.callout_concurrency() + if not self.concurrency: + self.concurrency = self.local_concurrency() + if not self.concurrency: + self.concurrency = 1 if self.test_ids is None: + if self.concurrency == 1: + if default_idstr: + self.test_ids = default_idstr.split() + if self.concurrency != 1 or self.test_filters is not None: + # Have to be able to tell each worker what to run / filter + # tests. + self.test_ids = self.list_tests() + if self.test_ids is None: + # No test ids to supply to the program. self.list_file_name = None name = '' - self.test_ids = [] + idlist = '' else: + self.test_ids = self.filter_tests(self.test_ids) name = self.make_listfile() variables['IDFILE'] = name - idlist = ' '.join(self.test_ids) + idlist = ' '.join(self.test_ids) variables['IDLIST'] = idlist def subst(match): return variables.get(match.groups(1)[0], '') - if not self.test_ids: + if self.test_ids is None: # No test ids, no id option. idoption = '' else: idoption = re.sub(variable_regex, subst, self.idoption) variables['IDOPTION'] = idoption self.cmd = re.sub(variable_regex, subst, cmd) - # and once more with list option support. - variables['LISTOPT'] = self.listopt - self.list_cmd = re.sub(variable_regex, subst, cmd) def make_listfile(self): name = None @@ -128,7 +257,7 @@ fd, name = tempfile.mkstemp() stream = os.fdopen(fd, 'wb') self.list_file_name = name - stream.write('\n'.join(list(self.test_ids) + [''])) + write_list(stream, self.test_ids) stream.close() except: if name: @@ -137,56 +266,103 @@ self.addCleanup(os.unlink, name) return name + def filter_tests(self, test_ids): + """Filter test_ids by the test_filters. + + :return: A list of test ids. + """ + if self.test_filters is None: + return test_ids + filters = map(re.compile, self.test_filters) + def include(test_id): + for pred in filters: + if pred.search(test_id): + return True + return list(filter(include, test_ids)) + def list_tests(self): """List the tests returned by list_cmd. :return: A list of test ids. """ if '$LISTOPT' not in self.template: - raise ValueError("LISTOPT not configured in .testr.conf.") - self.ui.output_values([('running', self.list_cmd)]) - run_proc = self.ui.subprocess_Popen(self.list_cmd, shell=True, - stdout=subprocess.PIPE, stdin=subprocess.PIPE) - out, err = run_proc.communicate() - # Should we raise on non-zero exit? - ids = [id for id in out.split('\n') if id] - return ids + raise ValueError("LISTOPT not configured in .testr.conf") + instance, list_cmd = self._per_instance_command(self.list_cmd) + try: + self.ui.output_values([('running', list_cmd)]) + run_proc = self.ui.subprocess_Popen(list_cmd, shell=True, + stdout=subprocess.PIPE, stdin=subprocess.PIPE) + out, err = run_proc.communicate() + if run_proc.returncode != 0: + raise ValueError( + "Non-zero exit code (%d) from test listing." + " stdout=%r, stderr=%r" % (run_proc.returncode, out, err)) + ids = parse_enumeration(out) + return ids + finally: + if instance: + self._instance_source.release_instance(instance) + + def _per_instance_command(self, cmd): + """Customise cmd to with an instance-id. + + :param concurrency: The number of instances to ask for (used to avoid + death-by-1000 cuts of latency. + """ + if self._instance_source is None: + return None, cmd + instance = self._instance_source.obtain_instance(self.concurrency) + if instance is not None: + try: + instance_prefix = self._parser.get( + 'DEFAULT', 'instance_execute') + variables = { + 'INSTANCE_ID': instance.decode('utf8'), + 'COMMAND': cmd, + # --list-tests cannot use FILES, so handle it being unset. + 'FILES': getattr(self, 'list_file_name', None) or '', + } + variable_regex = '\$(INSTANCE_ID|COMMAND|FILES)' + def subst(match): + return variables.get(match.groups(1)[0], '') + cmd = re.sub(variable_regex, subst, instance_prefix) + except ConfigParser.NoOptionError: + # Per-instance execution environment not configured. + pass + return instance, cmd def run_tests(self): """Run the tests defined by the command and ui. :return: A list of spawned processes. """ - if not self.ui.options.parallel or not self.parallel: - concurrency = 1 - else: - concurrency = self.local_concurrency() - if not concurrency: - concurrency = 1 - if concurrency == 1: - self.ui.output_values([('running', self.cmd)]) - run_proc = self.ui.subprocess_Popen(self.cmd, shell=True, + result = [] + test_ids = self.test_ids + if self.concurrency == 1 and (test_ids is None or test_ids): + # Have to customise cmd here, as instances are allocated + # just-in-time. XXX: Indicates this whole region needs refactoring. + instance, cmd = self._per_instance_command(self.cmd) + self.ui.output_values([('running', cmd)]) + run_proc = self.ui.subprocess_Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE) # Prevent processes stalling if they read from stdin; we could # pass this through in future, but there is no point doing that # until we have a working can-run-debugger-inline story. run_proc.stdin.close() - return [run_proc] - result = [] - if not self.test_ids: - # Discover the tests to run - test_ids = self.list_tests() - else: - # Use the already requested tests. - test_ids = self.test_ids - test_id_groups = self.partition_tests(test_ids, concurrency) + if instance: + return [CallWhenProcFinishes(run_proc, + lambda:self._instance_source.release_instance(instance))] + else: + return [run_proc] + test_id_groups = self.partition_tests(test_ids, self.concurrency) for test_ids in test_id_groups: if not test_ids: # No tests in this partition continue fixture = self.useFixture(TestListingFixture(test_ids, self.template, self.listopt, self.idoption, self.ui, - self.repository, parallel=False)) + self.repository, parallel=False, parser=self._parser, + instance_source=self._instance_source)) result.extend(fixture.run_tests()) return result @@ -204,40 +380,92 @@ partitions = [list() for i in range(concurrency)] timed_partitions = [[0.0, partition] for partition in partitions] time_data = self.repository.get_test_times(test_ids) - timed = time_data['known'] - unknown = time_data['unknown'] + timed_tests = time_data['known'] + unknown_tests = time_data['unknown'] + # Group tests: generate group_id -> test_ids. + group_ids = defaultdict(list) + if self._group_callback is None: + group_callback = lambda _:None + else: + group_callback = self._group_callback + for test_id in test_ids: + group_id = group_callback(test_id) or test_id + group_ids[group_id].append(test_id) + # Time groups: generate three sets of groups: + # - fully timed dict(group_id -> time), + # - partially timed dict(group_id -> time) and + # - unknown (set of group_id) + # We may in future treat partially timed different for scheduling, but + # at least today we just schedule them after the fully timed groups. + timed = {} + partial = {} + unknown = [] + for group_id, group_tests in group_ids.items(): + untimed_ids = unknown_tests.intersection(group_tests) + group_time = sum([timed_tests[test_id] + for test_id in untimed_ids.symmetric_difference(group_tests)]) + if not untimed_ids: + timed[group_id] = group_time + elif group_time: + partial[group_id] = group_time + else: + unknown.append(group_id) # Scheduling is NP complete in general, so we avoid aiming for # perfection. A quick approximation that is sufficient for our general # needs: - # sort the tests by time - # allocate to partitions by putting each test in to the partition with - # the current lowest time. - queue = sorted(timed.items(), key=operator.itemgetter(1), reverse=True) - for test_id, duration in queue: - timed_partitions[0][0] = timed_partitions[0][0] + duration - timed_partitions[0][1].append(test_id) - timed_partitions.sort(key=operator.itemgetter(0)) - # Assign tests with unknown times in round robin fashion to the partitions. - for partition, test_id in itertools.izip(itertools.cycle(partitions), unknown): - partition.append(test_id) + # sort the groups by time + # allocate to partitions by putting each group in to the partition with + # the current (lowest time, shortest length[in tests]) + def consume_queue(groups): + queue = sorted( + groups.items(), key=operator.itemgetter(1), reverse=True) + for group_id, duration in queue: + timed_partitions[0][0] = timed_partitions[0][0] + duration + timed_partitions[0][1].extend(group_ids[group_id]) + timed_partitions.sort(key=lambda item:(item[0], len(item[1]))) + consume_queue(timed) + consume_queue(partial) + # Assign groups with entirely unknown times in round robin fashion to + # the partitions. + for partition, group_id in zip(itertools.cycle(partitions), unknown): + partition.extend(group_ids[group_id]) return partitions + def callout_concurrency(self): + """Callout for user defined concurrency.""" + try: + concurrency_cmd = self._parser.get( + 'DEFAULT', 'test_run_concurrency') + except ConfigParser.NoOptionError: + return None + run_proc = self.ui.subprocess_Popen(concurrency_cmd, shell=True, + stdout=subprocess.PIPE, stdin=subprocess.PIPE) + out, err = run_proc.communicate() + if run_proc.returncode: + raise ValueError( + "test_run_concurrency failed: exit code %d, stderr=%r" % ( + run_proc.returncode, err)) + return int(out.strip()) + def local_concurrency(self): - if sys.platform == 'linux2': - concurrency = None - for line in file('/proc/cpuinfo', 'rb'): - if line.startswith('processor'): - concurrency = int(line[line.find(':')+1:]) + 1 - return concurrency - # No concurrency logic known. - return None + try: + return multiprocessing.cpu_count() + except NotImplementedError: + # No concurrency logic known. + return None -class TestCommand(object): +class TestCommand(Fixture): """Represents the test command defined in .testr.conf. :ivar run_factory: The fixture to use to execute a command. :ivar oldschool: Use failing.list rather than a unique file path. + + TestCommand is a Fixture. Many uses of it will not require it to be setUp, + but calling get_run_command does require it: the fixture state is used to + track test environment instances, which are disposed of when cleanUp + happens. This is not done per-run-command, because test bisection (amongst + other things) uses multiple get_run_command configurations. """ run_factory = TestListingFixture @@ -251,36 +479,70 @@ :param repository: A testrepository.repository.Repository used for determining test times when partitioning tests. """ + super(TestCommand, self).__init__() self.ui = ui self.repository = repository + self._instances = None + self._allocated_instances = None + + def setUp(self): + super(TestCommand, self).setUp() + self._instances = set() + self._allocated_instances = set() + self.addCleanup(self._dispose_instances) + + def _dispose_instances(self): + instances = self._instances + if instances is None: + return + self._instances = None + self._allocated_instances = None + try: + dispose_cmd = self.get_parser().get('DEFAULT', 'instance_dispose') + except (ValueError, ConfigParser.NoOptionError): + return + variable_regex = '\$INSTANCE_IDS' + dispose_cmd = re.sub(variable_regex, ' '.join(sorted(instance.decode('utf') for instance in instances)), + dispose_cmd) + self.ui.output_values([('running', dispose_cmd)]) + run_proc = self.ui.subprocess_Popen(dispose_cmd, shell=True) + run_proc.communicate() + if run_proc.returncode: + raise ValueError('Disposing of instances failed, return %d' % + run_proc.returncode) - def get_run_command(self, test_ids=None, testargs=()): - """Get the command that would be run to run tests.""" + def get_parser(self): + """Get a parser with the .testr.conf in it.""" parser = ConfigParser.ConfigParser() + # This possibly should push down into UI. + if self.ui.here == 'memory:': + return parser if not parser.read(os.path.join(self.ui.here, '.testr.conf')): raise ValueError("No .testr.conf config file") + return parser + + def get_run_command(self, test_ids=None, testargs=(), test_filters=None): + """Get the command that would be run to run tests. + + See TestListingFixture for the definition of test_ids and test_filters. + """ + if self._instances is None: + raise TypeError('TestCommand not setUp') + parser = self.get_parser() try: command = parser.get('DEFAULT', 'test_command') - except ConfigParser.NoOptionError, e: + except ConfigParser.NoOptionError as e: if e.message != "No option 'test_command' in section: 'DEFAULT'": raise raise ValueError("No test_command option present in .testr.conf") elements = [command] + list(testargs) cmd = ' '.join(elements) - if test_ids is None: - try: - idlist = parser.get('DEFAULT', 'test_id_list_default') - test_ids = idlist.split() - except ConfigParser.NoOptionError, e: - if e.message != "No option 'test_id_list_default' in section: 'DEFAULT'": - raise - test_ids = None idoption = '' if '$IDOPTION' in command: # IDOPTION is used, we must have it configured. try: idoption = parser.get('DEFAULT', 'test_id_option') - except ConfigParser.NoOptionError, e: + except ConfigParser.NoOptionError as e: if e.message != "No option 'test_id_option' in section: 'DEFAULT'": raise raise ValueError("No test_id_option option present in .testr.conf") @@ -289,15 +551,75 @@ # LISTOPT is used, test_list_option must be configured. try: listopt = parser.get('DEFAULT', 'test_list_option') - except ConfigParser.NoOptionError, e: + except ConfigParser.NoOptionError as e: if e.message != "No option 'test_list_option' in section: 'DEFAULT'": raise raise ValueError("No test_list_option option present in .testr.conf") + try: + group_regex = parser.get('DEFAULT', 'group_regex') + except ConfigParser.NoOptionError: + group_regex = None + if group_regex: + def group_callback(test_id, regex=re.compile(group_regex)): + match = regex.match(test_id) + if match: + return match.group(0) + else: + group_callback = None if self.oldschool: listpath = os.path.join(self.ui.here, 'failing.list') result = self.run_factory(test_ids, cmd, listopt, idoption, - self.ui, self.repository, listpath=listpath) + self.ui, self.repository, listpath=listpath, parser=parser, + test_filters=test_filters, instance_source=self, + group_callback=group_callback) else: result = self.run_factory(test_ids, cmd, listopt, idoption, - self.ui, self.repository) + self.ui, self.repository, parser=parser, + test_filters=test_filters, instance_source=self, + group_callback=group_callback) return result + + def get_filter_tags(self): + parser = self.get_parser() + try: + tags = parser.get('DEFAULT', 'filter_tags') + except ConfigParser.NoOptionError as e: + if e.message != "No option 'filter_tags' in section: 'DEFAULT'": + raise + return set() + return set([tag.strip() for tag in tags.split()]) + + def obtain_instance(self, concurrency): + """If possible, get one or more test run environment instance ids. + + Note this is not threadsafe: calling it from multiple threads would + likely result in shared results. + """ + while len(self._instances) < concurrency: + try: + cmd = self.get_parser().get('DEFAULT', 'instance_provision') + except ConfigParser.NoOptionError: + # Instance allocation not configured + return None + variable_regex = '\$INSTANCE_COUNT' + cmd = re.sub(variable_regex, + str(concurrency - len(self._instances)), cmd) + self.ui.output_values([('running', cmd)]) + proc = self.ui.subprocess_Popen( + cmd, shell=True, stdout=subprocess.PIPE) + out, _ = proc.communicate() + if proc.returncode: + raise ValueError('Provisioning instances failed, return %d' % + proc.returncode) + new_instances = set([item.strip() for item in out.split()]) + self._instances.update(new_instances) + # Cached first. + available_instances = self._instances - self._allocated_instances + # We only ask for instances when one should be available. + result = available_instances.pop() + self._allocated_instances.add(result) + return result + + def release_instance(self, instance_id): + """Return instance_ids to the pool for reuse.""" + self._allocated_instances.remove(instance_id) diff -Nru testrepository-0.0.5/testrepository/testlist.py testrepository-0.0.18/testrepository/testlist.py --- testrepository-0.0.5/testrepository/testlist.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/testlist.py 2013-03-09 10:32:05.000000000 +0000 @@ -0,0 +1,60 @@ +# +# Copyright (c) 2012 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""Handling of lists of tests - common code to --load-list etc.""" + +from io import BytesIO + +from extras import try_import +bytestream_to_streamresult = try_import('subunit.ByteStreamToStreamResult') +stream_result = try_import('testtools.testresult.doubles.StreamResult') + +from testtools.compat import _b, _u + + +def write_list(stream, test_ids): + """Write test_ids out to stream. + + :param stream: A file-like object. + :param test_ids: An iterable of test ids. + """ + # May need utf8 explicitly? + stream.write(_b('\n'.join(list(test_ids) + ['']))) + + +def parse_list(list_bytes): + """Parse list_bytes into a list of test ids.""" + return _v1(list_bytes) + + +def parse_enumeration(enumeration_bytes): + """Parse enumeration_bytes into a list of test_ids.""" + # If subunit v2 is available, use it. + if bytestream_to_streamresult is not None: + return _v2(enumeration_bytes) + else: + return _v1(enumeration_bytes) + + +def _v1(list_bytes): + return [id.strip() for id in list_bytes.decode('utf8').split(_u('\n')) + if id.strip()] + + +def _v2(list_bytes): + parser = bytestream_to_streamresult(BytesIO(list_bytes), + non_subunit_name='stdout') + result = stream_result() + parser.run(result) + return [event[1] for event in result._events if event[2]=='exists'] diff -Nru testrepository-0.0.5/testrepository/tests/arguments/__init__.py testrepository-0.0.18/testrepository/tests/arguments/__init__.py --- testrepository-0.0.5/testrepository/tests/arguments/__init__.py 2010-02-28 10:59:30.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/arguments/__init__.py 2012-12-18 22:28:01.000000000 +0000 @@ -19,6 +19,8 @@ def test_suite(): names = [ 'command', + 'doubledash', + 'path', 'string', ] module_names = ['testrepository.tests.arguments.test_' + name for name in diff -Nru testrepository-0.0.5/testrepository/tests/arguments/test_doubledash.py testrepository-0.0.18/testrepository/tests/arguments/test_doubledash.py --- testrepository-0.0.5/testrepository/tests/arguments/test_doubledash.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/arguments/test_doubledash.py 2012-12-18 22:28:01.000000000 +0000 @@ -0,0 +1,43 @@ +# +# Copyright (c) 2012 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""Tests for the doubledash argument type.""" + +from testrepository.arguments import doubledash +from testrepository.tests import ResourcedTestCase + + +class TestArgument(ResourcedTestCase): + + def test_parses_as_string(self): + arg = doubledash.DoubledashArgument() + result = arg.parse(['--']) + self.assertEqual(['--'], result) + + def test_fixed_name(self): + arg = doubledash.DoubledashArgument() + self.assertEqual('doubledash', arg.name) + + def test_fixed_min_max(self): + arg = doubledash.DoubledashArgument() + self.assertEqual(0, arg.minimum_count) + self.assertEqual(1, arg.maximum_count) + + def test_parses_non_dash_dash_as_nothing(self): + arg = doubledash.DoubledashArgument() + args = ['foo', '--'] + result = arg.parse(args) + self.assertEqual([], result) + self.assertEqual(['foo', '--'], args) + diff -Nru testrepository-0.0.5/testrepository/tests/arguments/test_path.py testrepository-0.0.18/testrepository/tests/arguments/test_path.py --- testrepository-0.0.5/testrepository/tests/arguments/test_path.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/arguments/test_path.py 2013-02-07 04:22:11.000000000 +0000 @@ -0,0 +1,49 @@ +# +# Copyright (c) 2012 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""Tests for the path argument type.""" + +import os +from os.path import join +import tempfile + +from fixtures import TempDir +from testtools.matchers import raises + +from testrepository.arguments import path +from testrepository.tests import ResourcedTestCase + + +class TestArgument(ResourcedTestCase): + + def test_parses_as_string(self): + existingfile = tempfile.NamedTemporaryFile() + self.addCleanup(existingfile.close) + arg = path.ExistingPathArgument('path') + result = arg.parse([existingfile.name]) + self.assertEqual([existingfile.name], result) + + def test_rejects_doubledash(self): + base = self.useFixture(TempDir()).path + arg = path.ExistingPathArgument('path') + self.addCleanup(os.chdir, os.getcwd()) + os.chdir(base) + with open('--', 'wt') as f:pass + self.assertThat(lambda: arg.parse(['--']), raises(ValueError)) + + def test_rejects_missing_file(self): + base = self.useFixture(TempDir()).path + arg = path.ExistingPathArgument('path') + self.assertThat(lambda: arg.parse([join(base, 'foo')]), + raises(ValueError)) diff -Nru testrepository-0.0.5/testrepository/tests/arguments/test_string.py testrepository-0.0.18/testrepository/tests/arguments/test_string.py --- testrepository-0.0.5/testrepository/tests/arguments/test_string.py 2010-02-28 10:59:30.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/arguments/test_string.py 2012-12-18 22:28:01.000000000 +0000 @@ -14,6 +14,8 @@ """Tests for the string argument type.""" +from testtools.matchers import raises + from testrepository.arguments import string from testrepository.tests import ResourcedTestCase @@ -24,3 +26,7 @@ arg = string.StringArgument('name') result = arg.parse(['load']) self.assertEqual(['load'], result) + + def test_rejects_doubledash(self): + arg = string.StringArgument('name') + self.assertThat(lambda: arg.parse(['--']), raises(ValueError)) diff -Nru testrepository-0.0.5/testrepository/tests/commands/__init__.py testrepository-0.0.18/testrepository/tests/commands/__init__.py --- testrepository-0.0.5/testrepository/tests/commands/__init__.py 2010-12-06 05:35:17.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/__init__.py 2012-01-10 13:30:21.000000000 +0000 @@ -27,6 +27,7 @@ 'load', 'quickstart', 'run', + 'slowest', 'stats', ] module_names = ['testrepository.tests.commands.test_' + name for name in diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_failing.py testrepository-0.0.18/testrepository/tests/commands/test_failing.py --- testrepository-0.0.5/testrepository/tests/commands/test_failing.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_failing.py 2013-04-11 11:19:07.000000000 +0000 @@ -17,12 +17,20 @@ import doctest import testtools -from testtools.matchers import DocTestMatches +from testtools.compat import _b +from testtools.matchers import ( + DocTestMatches, + Equals, + ) from testrepository.commands import failing from testrepository.ui.model import UI from testrepository.repository import memory -from testrepository.tests import ResourcedTestCase, Wildcard +from testrepository.tests import ( + ResourcedTestCase, + StubTestCommand, + Wildcard, + ) class TestCommand(ResourcedTestCase): @@ -39,29 +47,24 @@ repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - class Cases(ResourcedTestCase): - def failing(self): - self.fail('foo') - def ok(self): - pass - Cases('failing').run(inserter) - Cases('ok').run(inserter) + inserter.status(test_id='failing', test_status='fail') + inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() self.assertEqual(1, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. self.assertEqual([ ('results', Wildcard), - ('values', [('id', 0), ('tests', 1), ('failures', 1)])], + ('summary', False, 1, None, Wildcard, None, [('id', 0, None), ('failures', 1, None)])], ui.outputs) suite = ui.outputs[0][1] - result = testtools.TestResult() + result = testtools.StreamSummary() result.startTestRun() try: suite.run(result) finally: result.stopTestRun() self.assertEqual(1, result.testsRun) - self.assertEqual(1, len(result.failures)) + self.assertEqual(1, len(result.errors)) def test_with_subunit_shows_subunit_stream(self): ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) @@ -69,42 +72,44 @@ repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - class Cases(ResourcedTestCase): - def failing(self): - self.fail('foo') - def ok(self): - pass - Cases('failing').run(inserter) - Cases('ok').run(inserter) + inserter.status(test_id='failing', test_status='fail') + inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() - self.assertEqual(1, cmd.execute()) + self.assertEqual(0, cmd.execute()) self.assertEqual(1, len(ui.outputs)) self.assertEqual('stream', ui.outputs[0][0]) - self.assertThat(ui.outputs[0][1], DocTestMatches("""...test: ...failing + self.assertThat(ui.outputs[0][1].decode('utf8'), + DocTestMatches("""...test: ...failing ...failure: ...failing...""", doctest.ELLIPSIS)) + def test_with_subunit_no_failures_exit_0(self): + ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + inserter = repo.get_inserter() + inserter.startTestRun() + inserter.status(test_id='ok', test_status='success') + inserter.stopTestRun() + self.assertEqual(0, cmd.execute()) + self.assertEqual(1, len(ui.outputs)) + self.assertEqual('stream', ui.outputs[0][0]) + self.assertThat(ui.outputs[0][1], Equals(_b(''))) + def test_with_list_shows_list_of_tests(self): ui, cmd = self.get_test_ui_and_cmd(options=[('list', True)]) cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - class Cases(ResourcedTestCase): - def failing1(self): - self.fail('foo') - def failing2(self): - self.fail('bar') - def ok(self): - pass - Cases('failing1').run(inserter) - Cases('ok').run(inserter) - Cases('failing2').run(inserter) + inserter.status(test_id='failing1', test_status='fail') + inserter.status(test_id='ok', test_status='success') + inserter.status(test_id='failing2', test_status='fail') inserter.stopTestRun() self.assertEqual(1, cmd.execute(), ui.outputs) self.assertEqual(1, len(ui.outputs)) self.assertEqual('tests', ui.outputs[0][0]) self.assertEqual( - set([Cases('failing1').id(), Cases('failing2').id()]), + set(['failing1', 'failing2']), set([test.id() for test in ui.outputs[0][1]])) def test_uses_get_failing(self): @@ -116,13 +121,8 @@ repo = open(url) inserter = repo.get_inserter() inserter.startTestRun() - class Cases(ResourcedTestCase): - def failing(self): - self.fail('foo') - def ok(self): - pass - Cases('failing').run(inserter) - Cases('ok').run(inserter) + inserter.status(test_id='failing', test_status='fail') + inserter.status(test_id='ok', test_status='success') inserter.stopTestRun() orig = repo.get_failing def get_failing(): @@ -134,4 +134,3 @@ cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) self.assertEqual([True], calls) - diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_help.py testrepository-0.0.18/testrepository/tests/commands/test_help.py --- testrepository-0.0.5/testrepository/tests/commands/test_help.py 2010-01-10 11:32:56.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_help.py 2012-12-18 22:28:01.000000000 +0000 @@ -14,6 +14,10 @@ """Tests for the help command.""" +from inspect import getdoc + +from testtools.matchers import Contains + from testrepository.commands import help, load from testrepository.ui.model import UI from testrepository.tests import ResourcedTestCase @@ -30,7 +34,18 @@ def test_shows_rest_of__doc__(self): ui, cmd = self.get_test_ui_and_cmd(args=['load']) cmd.execute() - self.assertEqual([('rest', load.load.__doc__)], ui.outputs) + expected_doc = getdoc(load.load) + self.assertThat(ui.outputs[-1][1], Contains(expected_doc)) + + def test_shows_cmd_arguments(self): + ui, cmd = self.get_test_ui_and_cmd(args=['load']) + cmd.execute() + self.assertThat(ui.outputs[-1][1], Contains("streams*")) + + def test_shows_cmd_partial(self): + ui, cmd = self.get_test_ui_and_cmd(args=['load']) + cmd.execute() + self.assertThat(ui.outputs[-1][1], Contains("--partial")) def test_shows_general_help_with_no_args(self): ui, cmd = self.get_test_ui_and_cmd() diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_last.py testrepository-0.0.18/testrepository/tests/commands/test_last.py --- testrepository-0.0.5/testrepository/tests/commands/test_last.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_last.py 2013-04-11 11:18:22.000000000 +0000 @@ -15,47 +15,100 @@ """Tests for the last command.""" import testtools +from testtools.compat import _b +from testtools.matchers import Equals from testrepository.commands import last from testrepository.ui.model import UI from testrepository.repository import memory -from testrepository.tests import ResourcedTestCase, Wildcard +from testrepository.tests import ( + ResourcedTestCase, + StubTestCommand, + Wildcard, + ) class TestCommand(ResourcedTestCase): - def get_test_ui_and_cmd(self,args=()): - ui = UI(args=args) + def get_test_ui_and_cmd(self, args=(), options=()): + ui = UI(args=args, options=options) cmd = last.last(ui) ui.set_command(cmd) return ui, cmd - def test_shows_last_run(self): + def test_shows_last_run_first_run(self): ui, cmd = self.get_test_ui_and_cmd() cmd.repository_factory = memory.RepositoryFactory() repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - class Cases(ResourcedTestCase): - def failing(self): - self.fail('foo') - def ok(self): - pass - Cases('failing').run(inserter) - Cases('ok').run(inserter) - id = inserter.stopTestRun() + inserter.status(test_id='failing', test_status='fail') + inserter.status(test_id='ok', test_status='success') + inserter.stopTestRun() + id = inserter.get_id() self.assertEqual(1, cmd.execute()) # We should have seen test outputs (of the failure) and summary data. self.assertEqual([ ('results', Wildcard), - ('values', [('id', id), ('tests', 2), ('failures', 1)])], + ('summary', False, 2, None, Wildcard, Wildcard, + [('id', id, None), ('failures', 1, None)])], ui.outputs) suite = ui.outputs[0][1] - result = testtools.TestResult() + result = testtools.StreamSummary() result.startTestRun() try: suite.run(result) finally: result.stopTestRun() - self.assertEqual(1, result.testsRun) - self.assertEqual(1, len(result.failures)) + self.assertEqual(1, len(result.errors)) + self.assertEqual(2, result.testsRun) + + def _add_run(self, repo): + inserter = repo.get_inserter() + inserter.startTestRun() + inserter.status(test_id='failing', test_status='fail') + inserter.status(test_id='ok', test_status='success') + inserter.stopTestRun() + return inserter.get_id() + + def test_shows_last_run(self): + ui, cmd = self.get_test_ui_and_cmd() + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + self._add_run(repo) + id = self._add_run(repo) + self.assertEqual(1, cmd.execute()) + # We should have seen test outputs (of the failure) and summary data. + self.assertEqual([ + ('results', Wildcard), + ('summary', False, 2, 0, Wildcard, Wildcard, + [('id', id, None), ('failures', 1, 0)])], + ui.outputs) + suite = ui.outputs[0][1] + result = testtools.StreamSummary() + result.startTestRun() + try: + suite.run(result) + finally: + result.stopTestRun() + self.assertEqual(1, len(result.errors)) + self.assertEqual(2, result.testsRun) + + def test_shows_subunit_stream(self): + ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)]) + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + self._add_run(repo) + self.assertEqual(0, cmd.execute()) + # We should have seen test outputs (of the failure) and summary data. + self.assertEqual([ + ('stream', Wildcard), + ], ui.outputs) + self.assertThat(ui.outputs[0][1], Equals(_b("""\ +test: failing +failure: failing [ multipart +] +test: ok +successful: ok [ multipart +] +"""))) diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_list_tests.py testrepository-0.0.18/testrepository/tests/commands/test_list_tests.py --- testrepository-0.0.5/testrepository/tests/commands/test_list_tests.py 2010-12-06 06:21:07.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_list_tests.py 2013-03-15 07:16:02.000000000 +0000 @@ -14,9 +14,14 @@ """Tests for the list_tests command.""" +from io import BytesIO import os.path from subprocess import PIPE +from extras import try_import +import subunit +v2_avail = try_import('subunit.ByteStreamToStreamResult') +from testtools.compat import _b from testtools.matchers import MatchesException from testrepository.commands import list_tests @@ -47,19 +52,16 @@ def config_path(self): return os.path.join(self.tempdir, '.testr.conf') - def set_config(self, bytes): - stream = file(self.config_path(), 'wb') - try: - stream.write(bytes) - finally: - stream.close() + def set_config(self, text): + with open(self.config_path(), 'wt') as stream: + stream.write(text) def setup_repo(self, cmd, ui): repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - make_test('passing', True).run(inserter) - make_test('failing', False).run(inserter) + inserter.status(test_id='passing', test_status='success') + inserter.status(test_id='failing', test_status='fail') inserter.stopTestRun() def test_no_config_file_errors(self): @@ -71,9 +73,17 @@ MatchesException(ValueError('No .testr.conf config file'))) def test_calls_list_tests(self): - ui, cmd = self.get_test_ui_and_cmd(args=('bar', 'quux')) + ui, cmd = self.get_test_ui_and_cmd(args=('--', 'bar', 'quux')) cmd.repository_factory = memory.RepositoryFactory() - ui.proc_outputs = ['returned\n\nvalues\n'] + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='returned', test_status='exists') + stream.status(test_id='values', test_status='exists') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = _b('returned\n\nvalues\n') + ui.proc_outputs = [subunit_bytes] self.setup_repo(cmd, ui) self.set_config( '[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n' @@ -86,5 +96,34 @@ ('popen', (expected_cmd,), {'shell': True, 'stdout': PIPE, 'stdin': PIPE}), ('communicate',), - ('stream', 'returned\nvalues\n'), + ('stream', _b('returned\nvalues\n')), ], ui.outputs) + + def test_filters_use_filtered_list(self): + ui, cmd = self.get_test_ui_and_cmd( + args=('returned', '--', 'bar', 'quux')) + cmd.repository_factory = memory.RepositoryFactory() + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='returned', test_status='exists') + stream.status(test_id='values', test_status='exists') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = _b('returned\nvalues\n') + ui.proc_outputs = [subunit_bytes] + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $LISTOPT $IDOPTION\n' + 'test_id_option=--load-list $IDFILE\n' + 'test_list_option=--list\n') + retcode = cmd.execute() + expected_cmd = 'foo --list bar quux' + self.assertEqual([ + ('values', [('running', expected_cmd)]), + ('popen', (expected_cmd,), + {'shell': True, 'stdout': PIPE, 'stdin': PIPE}), + ('communicate',), + ('stream', _b('returned\n')), + ], ui.outputs) + self.assertEqual(0, retcode) diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_load.py testrepository-0.0.18/testrepository/tests/commands/test_load.py --- testrepository-0.0.5/testrepository/tests/commands/test_load.py 2011-01-14 00:14:13.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_load.py 2013-07-13 11:46:33.000000000 +0000 @@ -1,5 +1,5 @@ # -# Copyright (c) 2009 Testrepository Contributors +# Copyright (c) 2009, 2012 Testrepository Contributors # # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the @@ -14,20 +14,37 @@ """Tests for the load command.""" +from datetime import datetime, timedelta +from io import BytesIO +from tempfile import NamedTemporaryFile + +from extras import try_import +v2_avail = try_import('subunit.ByteStreamToStreamResult') +import subunit +from subunit import iso8601 + import testtools +from testtools.compat import _b +from testtools.content import text_content from testtools.matchers import MatchesException +from testtools.tests.helpers import LoggingResult from testrepository.commands import load from testrepository.ui.model import UI -from testrepository.tests import ResourcedTestCase, Wildcard +from testrepository.tests import ( + ResourcedTestCase, + StubTestCommand, + Wildcard, + ) from testrepository.tests.test_repository import RecordingRepositoryFactory +from testrepository.tests.repository.test_file import HomeDirTempDir from testrepository.repository import memory, RepositoryNotFound class TestCommandLoad(ResourcedTestCase): def test_load_loads_subunit_stream_to_default_repository(self): - ui = UI([('subunit', '')]) + ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) calls = [] @@ -43,8 +60,29 @@ # Results loaded self.assertEqual(1, repo.count()) + def test_load_loads_named_file_if_given(self): + datafile = NamedTemporaryFile() + self.addCleanup(datafile.close) + ui = UI([('subunit', _b(''))], args=[datafile.name]) + cmd = load.load(ui) + ui.set_command(cmd) + calls = [] + cmd.repository_factory = RecordingRepositoryFactory(calls, + memory.RepositoryFactory()) + repo = cmd.repository_factory.initialise(ui.here) + del calls[:] + self.assertEqual(0, cmd.execute()) + # Right repo + self.assertEqual([('open', ui.here)], calls) + # Stream not consumed - otherwise CLI would block when someone runs + # 'testr load foo'. XXX: Be nice if we could declare that the argument, + # which is a path, is to be an input stream. + self.assertTrue('subunit' in ui.input_streams) + # Results loaded + self.assertEqual(1, repo.count()) + def test_load_initialises_repo_if_doesnt_exist_and_init_forced(self): - ui = UI([('subunit', '')], options=[('force_init', True)]) + ui = UI([('subunit', _b(''))], options=[('force_init', True)]) cmd = load.load(ui) ui.set_command(cmd) calls = [] @@ -55,7 +93,7 @@ self.assertEqual([('open', ui.here), ('initialise', ui.here)], calls) def test_load_errors_if_repo_doesnt_exist(self): - ui = UI([('subunit', '')]) + ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) calls = [] @@ -69,7 +107,7 @@ ui.outputs[0][1], MatchesException(RepositoryNotFound('memory:'))) def test_load_returns_0_normally(self): - ui = UI([('subunit', '')]) + ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -77,7 +115,15 @@ self.assertEqual(0, cmd.execute()) def test_load_returns_1_on_failed_stream(self): - ui = UI([('subunit', 'test: foo\nfailure: foo\n')]) + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='fail') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = _b('test: foo\nfailure: foo\n') + ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -85,18 +131,37 @@ self.assertEqual(1, cmd.execute()) def test_load_new_shows_test_failures(self): - ui = UI([('subunit', 'test: foo\nfailure: foo\n')]) + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='fail') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = b'test: foo\nfailure: foo\n' + ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(1, cmd.execute()) self.assertEqual( - [('values', [('id', 0), ('tests', 1), ('failures', 1)])], + [('summary', False, 1, None, Wildcard, None, + [('id', 0, None), ('failures', 1, None)])], ui.outputs[1:]) def test_load_new_shows_test_failure_details(self): - ui = UI([('subunit', 'test: foo\nfailure: foo [\narg\n]\n')]) + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='fail', + file_name="traceback", mime_type='text/plain;charset=utf8', + file_bytes=b'arg\n') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = b'test: foo\nfailure: foo [\narg\n]\n' + ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -105,19 +170,28 @@ suite = ui.outputs[0][1] self.assertEqual([ ('results', Wildcard), - ('values', [('id', 0), ('tests', 1), ('failures', 1)])], + ('summary', False, 1, None, Wildcard, None, + [('id', 0, None), ('failures', 1, None)])], ui.outputs) - result = testtools.TestResult() + result = testtools.StreamSummary() result.startTestRun() try: suite.run(result) finally: result.stopTestRun() self.assertEqual(1, result.testsRun) - self.assertEqual(1, len(result.failures)) + self.assertEqual(1, len(result.errors)) def test_load_new_shows_test_skips(self): - ui = UI([('subunit', 'test: foo\nskip: foo\n')]) + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='skip') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = b'test: foo\nskip: foo\n' + ui = UI([('subunit', subunit_bytes)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -125,22 +199,24 @@ self.assertEqual(0, cmd.execute()) self.assertEqual( [('results', Wildcard), - ('values', [('id', 0), ('tests', 1), ('skips', 1)])], + ('summary', True, 1, None, Wildcard, None, + [('id', 0, None), ('skips', 1, None)])], ui.outputs) def test_load_new_shows_test_summary_no_tests(self): - ui = UI([('subunit', '')]) + ui = UI([('subunit', _b(''))]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) self.assertEqual(0, cmd.execute()) self.assertEqual( - [('results', Wildcard), ('values', [('id', 0), ('tests', 0)])], + [('results', Wildcard), + ('summary', True, 0, None, None, None, [('id', 0, None)])], ui.outputs) def test_load_quiet_shows_nothing(self): - ui = UI([('subunit', '')], [('quiet', True)]) + ui = UI([('subunit', _b(''))], [('quiet', True)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() @@ -148,13 +224,106 @@ self.assertEqual(0, cmd.execute()) self.assertEqual([], ui.outputs) + def test_load_abort_over_interactive_stream(self): + ui = UI([('subunit', b''), ('interactive', b'a\n')]) + cmd = load.load(ui) + ui.set_command(cmd) + cmd.repository_factory = memory.RepositoryFactory() + cmd.repository_factory.initialise(ui.here) + ret = cmd.execute() + self.assertEqual( + [('results', Wildcard), + ('summary', False, 1, None, None, None, + [('id', 0, None), ('failures', 1, None)])], + ui.outputs) + self.assertEqual(1, ret) + def test_partial_passed_to_repo(self): - ui = UI([('subunit', '')], [('quiet', True), ('partial', True)]) + ui = UI([('subunit', _b(''))], [('quiet', True), ('partial', True)]) cmd = load.load(ui) ui.set_command(cmd) cmd.repository_factory = memory.RepositoryFactory() cmd.repository_factory.initialise(ui.here) - self.assertEqual(0, cmd.execute()) + retcode = cmd.execute() self.assertEqual([], ui.outputs) + self.assertEqual(0, retcode) self.assertEqual(True, cmd.repository_factory.repos[ui.here].get_test_run(0)._partial) + + def test_load_timed_run(self): + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + time = datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.Utc()) + stream.status(test_id='foo', test_status='inprogress', timestamp=time) + stream.status(test_id='foo', test_status='success', + timestamp=time+timedelta(seconds=2)) + timed_bytes = buffer.getvalue() + else: + timed_bytes = _b('time: 2011-01-01 00:00:01.000000Z\n' + 'test: foo\n' + 'time: 2011-01-01 00:00:03.000000Z\n' + 'success: foo\n' + 'time: 2011-01-01 00:00:06.000000Z\n') + ui = UI( + [('subunit', timed_bytes)]) + cmd = load.load(ui) + ui.set_command(cmd) + cmd.repository_factory = memory.RepositoryFactory() + cmd.repository_factory.initialise(ui.here) + self.assertEqual(0, cmd.execute()) + # Note that the time here is 2.0, the difference between first and + # second time: directives. That's because 'load' uses a + # ThreadsafeForwardingResult (via ConcurrentTestSuite) that suppresses + # time information not involved in the start or stop of a test. + self.assertEqual( + [('summary', True, 1, None, 2.0, None, [('id', 0, None)])], + ui.outputs[1:]) + + def test_load_second_run(self): + # If there's a previous run in the database, then show information + # about the high level differences in the test run: how many more + # tests, how many more failures, how much longer it takes. + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + time = datetime(2011, 1, 2, 0, 0, 1, tzinfo=iso8601.Utc()) + stream.status(test_id='foo', test_status='inprogress', timestamp=time) + stream.status(test_id='foo', test_status='fail', + timestamp=time+timedelta(seconds=2)) + stream.status(test_id='bar', test_status='inprogress', + timestamp=time+timedelta(seconds=4)) + stream.status(test_id='bar', test_status='fail', + timestamp=time+timedelta(seconds=6)) + timed_bytes = buffer.getvalue() + else: + timed_bytes = _b('time: 2011-01-02 00:00:01.000000Z\n' + 'test: foo\n' + 'time: 2011-01-02 00:00:03.000000Z\n' + 'error: foo\n' + 'time: 2011-01-02 00:00:05.000000Z\n' + 'test: bar\n' + 'time: 2011-01-02 00:00:07.000000Z\n' + 'error: bar\n') + ui = UI( + [('subunit', timed_bytes)]) + cmd = load.load(ui) + ui.set_command(cmd) + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + # XXX: Circumvent the AutoTimingTestResultDecorator so we can get + # predictable times, rather than ones based on the system + # clock. (Would normally expect to use repo.get_inserter()) + inserter = repo._get_inserter(False) + # Insert a run with different results. + inserter.startTestRun() + inserter.status(test_id=self.id(), test_status='inprogress', + timestamp=datetime(2011, 1, 1, 0, 0, 1, tzinfo=iso8601.Utc())) + inserter.status(test_id=self.id(), test_status='fail', + timestamp=datetime(2011, 1, 1, 0, 0, 10, tzinfo=iso8601.Utc())) + inserter.stopTestRun() + self.assertEqual(1, cmd.execute()) + self.assertEqual( + [('summary', False, 2, 1, 6.0, -3.0, + [('id', 1, None), ('failures', 2, 1)])], + ui.outputs[1:]) diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_run.py testrepository-0.0.18/testrepository/tests/commands/test_run.py --- testrepository-0.0.5/testrepository/tests/commands/test_run.py 2010-12-19 09:14:34.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_run.py 2013-11-03 19:58:46.000000000 +0000 @@ -14,14 +14,32 @@ """Tests for the run command.""" +from io import BytesIO import os.path from subprocess import PIPE +import tempfile -from testtools.matchers import MatchesException +from extras import try_import +from fixtures import ( + Fixture, + MonkeyPatch, + ) +import subunit +v2_avail = try_import('subunit.ByteStreamToStreamResult') +from subunit import RemotedTestCase +from testscenarios.scenarios import multiply_scenarios +from testtools.compat import _b +from testtools.matchers import ( + Equals, + HasLength, + MatchesException, + MatchesListwise, + ) from testrepository.commands import run -from testrepository.ui.model import UI +from testrepository.ui.model import UI, ProcessModel from testrepository.repository import memory +from testrepository.testlist import write_list from testrepository.tests import ResourcedTestCase, Wildcard from testrepository.tests.stubpackage import TempDirResource from testrepository.tests.test_testcommand import FakeTestCommand @@ -32,9 +50,11 @@ resources = [('tempdir', TempDirResource())] - def get_test_ui_and_cmd(self, options=(), args=()): + def get_test_ui_and_cmd(self, options=(), args=(), proc_outputs=(), + proc_results=()): self.dirty() - ui = UI(options=options, args=args) + ui = UI(options=options, args=args, proc_outputs=proc_outputs, + proc_results=proc_results) ui.here = self.tempdir cmd = run.run(ui) ui.set_command(cmd) @@ -47,24 +67,23 @@ def config_path(self): return os.path.join(self.tempdir, '.testr.conf') - def set_config(self, bytes): - stream = file(self.config_path(), 'wb') - try: - stream.write(bytes) - finally: - stream.close() + def set_config(self, text): + with open(self.config_path(), 'wt') as stream: + stream.write(text) - def setup_repo(self, cmd, ui): + def setup_repo(self, cmd, ui, failures=True): repo = cmd.repository_factory.initialise(ui.here) inserter = repo.get_inserter() inserter.startTestRun() - make_test('passing', True).run(inserter) - make_test('failing', False).run(inserter) + inserter.status(test_id='passing', test_status='success') + if failures: + inserter.status(test_id='failing1', test_status='fail') + inserter.status(test_id='failing2', test_status='fail') inserter.stopTestRun() def test_no_config_file_errors(self): ui, cmd = self.get_test_ui_and_cmd() - repo = cmd.repository_factory.initialise(ui.here) + cmd.repository_factory.initialise(ui.here) self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) self.assertEqual('error', ui.outputs[0][0]) @@ -73,7 +92,7 @@ def test_no_config_settings_errors(self): ui, cmd = self.get_test_ui_and_cmd() - repo = cmd.repository_factory.initialise(ui.here) + cmd.repository_factory.initialise(ui.here) self.set_config('') self.assertEqual(3, cmd.execute()) self.assertEqual(1, len(ui.outputs)) @@ -96,7 +115,7 @@ ('popen', (expected_cmd,), {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), ('results', Wildcard), - ('values', [('id', 1), ('tests', 0)]) + ('summary', True, 0, -3, None, None, [('id', 1, None)]) ], ui.outputs) # TODO: check the list file is written, and deleted. self.assertEqual(0, result) @@ -108,13 +127,13 @@ self.set_config( '[DEFAULT]\ntest_command=foo $IDLIST\n') self.assertEqual(0, cmd.execute()) - expected_cmd = 'foo failing' + expected_cmd = 'foo failing1 failing2' self.assertEqual([ ('values', [('running', expected_cmd)]), ('popen', (expected_cmd,), {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), ('results', Wildcard), - ('values', [('id', 1), ('tests', 0)]) + ('summary', True, 0, -3, None, None, [('id', 1, None)]), ], ui.outputs) # Failing causes partial runs to be used. self.assertEqual(True, @@ -133,7 +152,7 @@ ('popen', (expected_cmd,), {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), ('results', Wildcard), - ('values', [('id', 1), ('tests', 0)]) + ('summary', True, 0, -3, None, None, [('id', 1, None)]) ], ui.outputs) def test_IDLIST_default_passed_normally(self): @@ -149,7 +168,7 @@ ('popen', (expected_cmd,), {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), ('results', Wildcard), - ('values', [('id', 1), ('tests', 0)]) + ('summary', True, 0, -3, None, None, [('id', 1, None)]) ], ui.outputs) def test_IDFILE_not_passed_normally(self): @@ -165,11 +184,72 @@ ('popen', (expected_cmd,), {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), ('results', Wildcard), - ('values', [('id', 1), ('tests', 0)]) + ('summary', True, 0, -3, None, None, [('id', 1, None)]), + ], ui.outputs) + + def capture_ids(self, list_result=None): + params = [] + def capture_ids(self, ids, args, test_filters=None): + params.append([self, ids, args, test_filters]) + result = Fixture() + result.run_tests = lambda:[] + if list_result is not None: + result.list_tests = lambda:list(list_result) + return result + return params, capture_ids + + def test_load_list_failing_takes_id_intersection(self): + list_file = tempfile.NamedTemporaryFile() + self.addCleanup(list_file.close) + write_list(list_file, ['foo', 'quux', 'failing1']) + # The extra tests - foo, quux - won't match known failures, and the + # unlisted failure failing2 won't match the list. + expected_ids = set(['failing1']) + list_file.flush() + ui, cmd = self.get_test_ui_and_cmd( + options=[('load_list', list_file.name), ('failing', True)]) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + params, capture_ids = self.capture_ids() + self.useFixture(MonkeyPatch( + 'testrepository.testcommand.TestCommand.get_run_command', + capture_ids)) + cmd_result = cmd.execute() + self.assertEqual([ + ('results', Wildcard), + ('summary', True, 0, -3, None, None, [('id', 1, None)]) + ], ui.outputs) + self.assertEqual(0, cmd_result) + self.assertEqual([[Wildcard, expected_ids, [], None]], params) + + def test_load_list_passes_ids(self): + list_file = tempfile.NamedTemporaryFile() + self.addCleanup(list_file.close) + expected_ids = set(['foo', 'quux', 'bar']) + write_list(list_file, expected_ids) + list_file.flush() + ui, cmd = self.get_test_ui_and_cmd( + options=[('load_list', list_file.name)]) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + params, capture_ids = self.capture_ids() + self.useFixture(MonkeyPatch( + 'testrepository.testcommand.TestCommand.get_run_command', + capture_ids)) + cmd_result = cmd.execute() + self.assertEqual([ + ('results', Wildcard), + ('summary', True, 0, -3, None, None, [('id', 1, None)]) ], ui.outputs) + self.assertEqual(0, cmd_result) + self.assertEqual([[Wildcard, expected_ids, [], None]], params) def test_extra_options_passed_in(self): - ui, cmd = self.get_test_ui_and_cmd(args=('bar', 'quux')) + ui, cmd = self.get_test_ui_and_cmd(args=('--', 'bar', 'quux')) cmd.repository_factory = memory.RepositoryFactory() self.setup_repo(cmd, ui) self.set_config( @@ -181,7 +261,7 @@ ('popen', (expected_cmd,), {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), ('results', Wildcard), - ('values', [('id', 1), ('tests', 0)]) + ('summary', True, 0, -3, None, None, [('id', 1, None)]) ], ui.outputs) def test_quiet_passed_down(self): @@ -216,3 +296,246 @@ self.assertEqual(0, result) self.assertEqual(True, cmd.repository_factory.repos[ui.here].get_test_run(1)._partial) + + def test_load_failure_exposed(self): + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='fail') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = b'test: foo\nfailure: foo\n' + ui, cmd = self.get_test_ui_and_cmd(options=[('quiet', True),], + proc_outputs=[subunit_bytes]) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config('[DEFAULT]\ntest_command=foo\n') + result = cmd.execute() + cmd.repository_factory.repos[ui.here].get_test_run(1) + self.assertEqual(1, result) + + def test_process_exit_code_nonzero_causes_synthetic_error_test(self): + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='success') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = b'test: foo\nsuccess: foo\n' + ui, cmd = self.get_test_ui_and_cmd(options=[('quiet', True),], + proc_outputs=[subunit_bytes], + proc_results=[2]) + # 2 is non-zero, and non-zero triggers the behaviour of exiting + # with 1 - but we want to see that it doesn't pass-through the + # value literally. + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config('[DEFAULT]\ntest_command=foo\n') + result = cmd.execute() + self.assertEqual(1, result) + run = cmd.repository_factory.repos[ui.here].get_test_run(1) + self.assertEqual([Wildcard, 'fail'], + [test['status'] for test in run._tests]) + + def test_regex_test_filter(self): + ui, cmd = self.get_test_ui_and_cmd(args=('ab.*cd', '--', 'bar', 'quux')) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_id_option=--load-list $IDFILE\n' + 'test_list_option=--list\n') + params, capture_ids = self.capture_ids() + self.useFixture(MonkeyPatch( + 'testrepository.testcommand.TestCommand.get_run_command', + capture_ids)) + cmd_result = cmd.execute() + self.assertEqual([ + ('results', Wildcard), + ('summary', True, 0, -3, None, None, [('id', 1, None)]) + ], ui.outputs) + self.assertEqual(0, cmd_result) + self.assertThat(params[0][1], Equals(None)) + self.assertThat( + params[0][2], MatchesListwise([Equals('bar'), Equals('quux')])) + self.assertThat(params[0][3], MatchesListwise([Equals('ab.*cd')])) + self.assertThat(params, HasLength(1)) + + def test_regex_test_filter_with_explicit_ids(self): + ui, cmd = self.get_test_ui_and_cmd( + args=('g1', '--', 'bar', 'quux'),options=[('failing', True)]) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_id_option=--load-list $IDFILE\n' + 'test_list_option=--list\n') + params, capture_ids = self.capture_ids() + self.useFixture(MonkeyPatch( + 'testrepository.testcommand.TestCommand.get_run_command', + capture_ids)) + cmd_result = cmd.execute() + self.assertEqual([ + ('results', Wildcard), + ('summary', True, 0, -3, None, None, [('id', 1, None)]) + ], ui.outputs) + self.assertEqual(0, cmd_result) + self.assertThat(params[0][1], Equals(['failing1', 'failing2'])) + self.assertThat( + params[0][2], MatchesListwise([Equals('bar'), Equals('quux')])) + self.assertThat(params[0][3], MatchesListwise([Equals('g1')])) + self.assertThat(params, HasLength(1)) + + def test_until_failure(self): + ui, cmd = self.get_test_ui_and_cmd(options=[('until_failure', True)]) + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='success') + subunit_bytes1 = buffer.getvalue() + buffer.seek(0) + buffer.truncate() + stream.status(test_id='foo', test_status='inprogress') + stream.status(test_id='foo', test_status='fail') + subunit_bytes2 = buffer.getvalue() + else: + subunit_bytes1 = b'test: foo\nsuccess: foo\n' + subunit_bytes2 = b'test: foo\nfailure: foo\n' + ui.proc_outputs = [ + subunit_bytes1, # stream one, works + subunit_bytes2, # stream two, fails + ] + ui.require_proc_stdout = True + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_id_option=--load-list $IDFILE\n' + 'test_list_option=--list\n') + cmd_result = cmd.execute() + expected_cmd = 'foo ' + self.assertEqual([ + ('values', [('running', expected_cmd)]), + ('popen', (expected_cmd,), + {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), + ('results', Wildcard), + ('summary', True, 1, -2, Wildcard, Wildcard, [('id', 1, None)]), + ('values', [('running', expected_cmd)]), + ('popen', (expected_cmd,), + {'shell': True, 'stdin': PIPE, 'stdout': PIPE}), + ('results', Wildcard), + ('summary', False, 1, 0, Wildcard, Wildcard, + [('id', 2, None), ('failures', 1, 1)]) + ], ui.outputs) + self.assertEqual(1, cmd_result) + + def test_failure_no_tests_run_when_no_failures_failures(self): + ui, cmd = self.get_test_ui_and_cmd(options=[('failing', True)]) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui, failures=False) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDOPTION\ntest_id_option=--load-list $IDFILE\n') + cmd.command_factory = FakeTestCommand + result = cmd.execute() + self.assertEqual([ + ('results', Wildcard), + ('summary', True, 0, -1, None, None, [('id', 1, None)]) + ], ui.outputs) + self.assertEqual(0, result) + + def test_isolated_runs_multiple_processes(self): + ui, cmd = self.get_test_ui_and_cmd(options=[('isolated', True)]) + cmd.repository_factory = memory.RepositoryFactory() + self.setup_repo(cmd, ui) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_id_option=--load-list $IDFILE\n' + 'test_list_option=--list\n') + params, capture_ids = self.capture_ids(list_result=['ab', 'cd', 'ef']) + self.useFixture(MonkeyPatch( + 'testrepository.testcommand.TestCommand.get_run_command', + capture_ids)) + cmd_result = cmd.execute() + self.assertEqual([ + ('results', Wildcard), + ('summary', True, 0, -3, None, None, [('id', 1, None)]), + ('results', Wildcard), + ('summary', True, 0, 0, None, None, [('id', 2, None)]), + ('results', Wildcard), + ('summary', True, 0, 0, None, None, [('id', 3, None)]), + ], ui.outputs) + self.assertEqual(0, cmd_result) + # once to list, then 3 each executing one test. + self.assertThat(params, HasLength(4)) + self.assertThat(params[0][1], Equals(None)) + self.assertThat(params[1][1], Equals(['ab'])) + self.assertThat(params[2][1], Equals(['cd'])) + self.assertThat(params[3][1], Equals(['ef'])) + + +def read_all(stream): + return stream.read() + + +def read_single(stream): + return stream.read(1) + + +def readline(stream): + return stream.readline() + + +def readlines(stream): + return _b('').join(stream.readlines()) + + +def accumulate(stream, reader): + accumulator = [] + content = reader(stream) + while content: + accumulator.append(content) + content = reader(stream) + return _b('').join(accumulator) + + +class TestReturnCodeToSubunit(ResourcedTestCase): + + scenarios = multiply_scenarios( + [('readdefault', dict(reader=read_all)), + ('readsingle', dict(reader=read_single)), + ('readline', dict(reader=readline)), + ('readlines', dict(reader=readlines)), + ], + [('noeol', dict(stdout=_b('foo\nbar'))), + ('trailingeol', dict(stdout=_b('foo\nbar\n')))]) + + def test_returncode_0_no_change(self): + proc = ProcessModel(None) + proc.stdout.write(self.stdout) + proc.stdout.seek(0) + stream = run.ReturnCodeToSubunit(proc) + content = accumulate(stream, self.reader) + self.assertEqual(self.stdout, content) + + def test_returncode_nonzero_fail_appended_to_content(self): + proc = ProcessModel(None) + proc.stdout.write(self.stdout) + proc.stdout.seek(0) + proc.returncode = 1 + stream = run.ReturnCodeToSubunit(proc) + content = accumulate(stream, self.reader) + if v2_avail: + buffer = BytesIO() + buffer.write(b'foo\nbar\n') + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='process-returncode', test_status='fail', + file_name='traceback', mime_type='test/plain;charset=utf8', + file_bytes=b'returncode 1') + expected_content = buffer.getvalue() + else: + expected_content = _b('foo\nbar\ntest: process-returncode\n' + 'failure: process-returncode [\n returncode 1\n]\n') + self.assertEqual(expected_content, content) diff -Nru testrepository-0.0.5/testrepository/tests/commands/test_slowest.py testrepository-0.0.18/testrepository/tests/commands/test_slowest.py --- testrepository-0.0.5/testrepository/tests/commands/test_slowest.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/commands/test_slowest.py 2013-03-15 06:57:22.000000000 +0000 @@ -0,0 +1,147 @@ +# +# Copyright (c) 2010 Testrepository Contributors +# +# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause +# license at the users choice. A copy of both licenses are available in the +# project source as Apache-2.0 and BSD. You may not use this file except in +# compliance with one of these two licences. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# license you chose for the specific language governing permissions and +# limitations under that license. + +"""Tests for the "slowest" command.""" + +from datetime import ( + datetime, + timedelta, +) +import pytz + +from testtools import PlaceHolder + +from testrepository.commands import slowest +from testrepository.ui.model import UI +from testrepository.repository import memory +from testrepository.tests import ResourcedTestCase + + +class TestCommand(ResourcedTestCase): + + def get_test_ui_and_cmd(self, options=(), args=()): + ui = UI(options=options, args=args) + cmd = slowest.slowest(ui) + ui.set_command(cmd) + return ui, cmd + + def test_shows_nothing_for_no_tests(self): + """Having no tests leads to an error and no output.""" + ui, cmd = self.get_test_ui_and_cmd() + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + self.assertEqual(3, cmd.execute()) + self.assertEqual([], ui.outputs) + + def insert_one_test_with_runtime(self, inserter, runtime): + """Insert one test, with the specified run time. + + :param inserter: the inserter to use to insert the + test. + :param runtime: the runtime (in seconds) that the + test should appear to take. + :return: the name of the test that was added. + """ + test_id = self.getUniqueString() + start_time = datetime.now(pytz.UTC) + inserter.status(test_id=test_id, test_status='inprogress', + timestamp=start_time) + inserter.status(test_id=test_id, test_status='success', + timestamp=start_time + timedelta(seconds=runtime)) + return test_id + + def test_shows_one_test_when_one_test(self): + """When there is one test it is shown.""" + ui, cmd = self.get_test_ui_and_cmd() + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + inserter = repo.get_inserter() + inserter.startTestRun() + runtime = 0.1 + test_id = self.insert_one_test_with_runtime( + inserter, runtime) + inserter.stopTestRun() + retcode = cmd.execute() + self.assertEqual( + [('table', + [slowest.slowest.TABLE_HEADER] + + slowest.slowest.format_times([(test_id, runtime)]))], + ui.outputs) + self.assertEqual(0, retcode) + + def test_orders_tests_based_on_runtime(self): + """Longer running tests are shown first.""" + ui, cmd = self.get_test_ui_and_cmd() + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + inserter = repo.get_inserter() + inserter.startTestRun() + runtime1 = 1.1 + test_id1 = self.insert_one_test_with_runtime( + inserter, runtime1) + runtime2 = 0.1 + test_id2 = self.insert_one_test_with_runtime( + inserter, runtime2) + inserter.stopTestRun() + retcode = cmd.execute() + rows = [(test_id1, runtime1), + (test_id2, runtime2)] + rows = slowest.slowest.format_times(rows) + self.assertEqual(0, retcode) + self.assertEqual( + [('table', + [slowest.slowest.TABLE_HEADER] + rows)], + ui.outputs) + + def insert_lots_of_tests_with_timing(self, repo): + inserter = repo.get_inserter() + inserter.startTestRun() + runtimes = [float(r) for r in range(slowest.slowest.DEFAULT_ROWS_SHOWN + 1)] + test_ids = [ + self.insert_one_test_with_runtime( + inserter, runtime) + for runtime in runtimes] + inserter.stopTestRun() + return test_ids, runtimes + + def test_limits_output_by_default(self): + """Only the first 10 tests are shown by default.""" + ui, cmd = self.get_test_ui_and_cmd() + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + test_ids, runtimes = self.insert_lots_of_tests_with_timing(repo) + retcode = cmd.execute() + rows = list(zip(reversed(test_ids), reversed(runtimes)) + )[:slowest.slowest.DEFAULT_ROWS_SHOWN] + rows = slowest.slowest.format_times(rows) + self.assertEqual(0, retcode) + self.assertEqual( + [('table', + [slowest.slowest.TABLE_HEADER] + rows)], + ui.outputs) + + def test_option_to_show_all_rows_does_so(self): + """When the all option is given all rows are shown.""" + ui, cmd = self.get_test_ui_and_cmd(options=[('all', True)]) + cmd.repository_factory = memory.RepositoryFactory() + repo = cmd.repository_factory.initialise(ui.here) + test_ids, runtimes = self.insert_lots_of_tests_with_timing(repo) + retcode = cmd.execute() + rows = zip(reversed(test_ids), reversed(runtimes)) + rows = slowest.slowest.format_times(rows) + self.assertEqual(0, retcode) + self.assertEqual( + [('table', + [slowest.slowest.TABLE_HEADER] + rows)], + ui.outputs) diff -Nru testrepository-0.0.5/testrepository/tests/__init__.py testrepository-0.0.18/testrepository/tests/__init__.py --- testrepository-0.0.5/testrepository/tests/__init__.py 2010-11-30 23:48:18.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/__init__.py 2013-04-09 11:54:33.000000000 +0000 @@ -20,15 +20,10 @@ from testscenarios import generate_scenarios from testtools import TestCase + class ResourcedTestCase(TestCase, testresources.ResourcedTestCase): """Make all testrepository tests have resource support.""" - def setUp(self): - TestCase.setUp(self) - testresources.ResourcedTestCase.setUpResources(self) - self.addCleanup(testresources.ResourcedTestCase.tearDownResources, - self) - class _Wildcard(object): """Object that is equal to everything.""" @@ -46,6 +41,19 @@ Wildcard = _Wildcard() +class StubTestCommand: + + def __init__(self, filter_tags=None): + self.results = [] + self.filter_tags = filter_tags or set() + + def __call__(self, ui, repo): + return self + + def get_filter_tags(self): + return self.filter_tags + + def test_suite(): packages = [ 'arguments', diff -Nru testrepository-0.0.5/testrepository/tests/monkeypatch.py testrepository-0.0.18/testrepository/tests/monkeypatch.py --- testrepository-0.0.5/testrepository/tests/monkeypatch.py 2010-01-10 11:13:50.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/monkeypatch.py 2012-12-18 22:28:01.000000000 +0000 @@ -14,7 +14,7 @@ """Monkeypatch helper function for tests. -This should be moved to testtools or something, its very generic. +This has been moved to fixtures, and should be removed from here. """ def monkeypatch(name, new_value): diff -Nru testrepository-0.0.5/testrepository/tests/repository/test_file.py testrepository-0.0.18/testrepository/tests/repository/test_file.py --- testrepository-0.0.5/testrepository/tests/repository/test_file.py 2010-11-28 18:48:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/repository/test_file.py 2013-03-15 07:04:40.000000000 +0000 @@ -56,13 +56,13 @@ def test_initialise(self): self.useFixture(FileRepositoryFixture(self)) base = os.path.join(self.tempdir, '.testrepository') - stream = open(os.path.join(base, 'format'), 'rb') + stream = open(os.path.join(base, 'format'), 'rt') try: contents = stream.read() finally: stream.close() self.assertEqual("1\n", contents) - stream = open(os.path.join(base, 'next-stream'), 'rb') + stream = open(os.path.join(base, 'next-stream'), 'rt') try: contents = stream.read() finally: @@ -86,7 +86,8 @@ repo = self.useFixture(FileRepositoryFixture(self)).repo result = repo.get_inserter() result.startTestRun() - self.assertEqual(0, result.stopTestRun()) + result.stopTestRun() + self.assertEqual(0, result.get_id()) def test_open_expands_user_directory(self): short_path = self.useFixture(HomeDirTempDir()).short_path diff -Nru testrepository-0.0.5/testrepository/tests/stubpackage.py testrepository-0.0.18/testrepository/tests/stubpackage.py --- testrepository-0.0.5/testrepository/tests/stubpackage.py 2010-01-10 11:13:50.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/stubpackage.py 2013-02-06 09:07:32.000000000 +0000 @@ -56,7 +56,7 @@ os.mkdir(root) init_seen = not self.init for modulename, contents in self.modulelist: - stream = file(os.path.join(root, modulename), 'wb') + stream = open(os.path.join(root, modulename), 'wt') try: stream.write(contents) finally: @@ -64,5 +64,5 @@ if modulename == '__init__.py': init_seen = True if not init_seen: - file(os.path.join(root, '__init__.py'), 'wb').close() + open(os.path.join(root, '__init__.py'), 'wt').close() return result diff -Nru testrepository-0.0.5/testrepository/tests/test_arguments.py testrepository-0.0.18/testrepository/tests/test_arguments.py --- testrepository-0.0.5/testrepository/tests/test_arguments.py 2010-12-03 03:29:10.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_arguments.py 2012-12-18 22:28:01.000000000 +0000 @@ -14,7 +14,10 @@ """Tests for the arguments package.""" -from testtools.matchers import raises +from testtools.matchers import ( + Equals, + raises, + ) from testrepository import arguments from testrepository.tests import ResourcedTestCase @@ -84,6 +87,15 @@ argument = AnArgument('foo') self.assertThat(lambda: argument.parse([]), raises(ValueError)) + def test_parsing_optional_not_matching(self): + class AnArgument(arguments.AbstractArgument): + def _parse_one(self, arg): + raise ValueError('not an argument') + argument = AnArgument('foo', min=0) + args = ['a', 'b'] + self.assertThat(argument.parse(args), Equals([])) + self.assertThat(args, Equals(['a', 'b'])) + # No interface tests for now, because the interface we expect is really just # _parse_one; however if bugs or issues show up... then we should add them. diff -Nru testrepository-0.0.5/testrepository/tests/test_commands.py testrepository-0.0.18/testrepository/tests/test_commands.py --- testrepository-0.0.5/testrepository/tests/test_commands.py 2010-12-06 05:58:58.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_commands.py 2012-12-18 22:28:01.000000000 +0000 @@ -14,11 +14,16 @@ """Tests for the commands module.""" +import optparse import os.path import sys from testresources import TestResource -from testtools.matchers import MatchesException, raises +from testtools.matchers import ( + IsInstance, + MatchesException, + raises, + ) from testrepository import commands from testrepository.repository import file @@ -163,6 +168,14 @@ 'err')) +class TestGetCommandParser(ResourcedTestCase): + + def test_trivial(self): + cmd = InstrumentedCommand(model.UI()) + parser = commands.get_command_parser(cmd) + self.assertThat(parser, IsInstance(optparse.OptionParser)) + + class InstrumentedCommand(commands.Command): """A command which records methods called on it. diff -Nru testrepository-0.0.5/testrepository/tests/test_repository.py testrepository-0.0.18/testrepository/tests/test_repository.py --- testrepository-0.0.5/testrepository/tests/test_repository.py 2010-12-19 10:32:28.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_repository.py 2013-04-01 09:31:50.000000000 +0000 @@ -14,7 +14,10 @@ """Tests for Repository support logic and the Repository contract.""" -import datetime +from datetime import ( + datetime, + timedelta, + ) import doctest from subunit import iso8601 @@ -22,13 +25,19 @@ from testresources import TestResource from testtools import ( clone_test_with_new_id, - TestResult, + PlaceHolder, ) +import testtools +from testtools.compat import _b +from testtools.testresult.doubles import ExtendedTestResult from testtools.matchers import DocTestMatches, raises from testrepository import repository from testrepository.repository import file, memory -from testrepository.tests import ResourcedTestCase +from testrepository.tests import ( + ResourcedTestCase, + Wildcard, + ) from testrepository.tests.stubpackage import ( TempDirResource, ) @@ -91,6 +100,9 @@ def failing(self): self.fail("oops") + def unexpected_success(self): + self.expectFailure("unexpected success", self.assertTrue, True) + def make_test(id, should_pass): """Make a test.""" @@ -103,13 +115,10 @@ def run_timed(id, duration, result): """Make and run a test taking duration seconds.""" - start = datetime.datetime.now(tz=iso8601.Utc()) - result.time(start) - test = make_test(id, True) - result.startTest(test) - result.time(start + datetime.timedelta(seconds=duration)) - result.addSuccess(test) - result.stopTest(test) + start = datetime.now(tz=iso8601.Utc()) + result.status(test_id=id, test_status='inprogress', timestamp=start) + result.status(test_id=id, test_status='success', + timestamp=start + timedelta(seconds=duration)) class TestRepositoryErrors(ResourcedTestCase): @@ -129,8 +138,23 @@ def get_failing(self, repo): """Analyze a failing stream from repo and return it.""" run = repo.get_failing() - analyzer = TestResult() - run.get_test().run(analyzer) + analyzer = testtools.StreamSummary() + analyzer.startTestRun() + try: + run.get_test().run(analyzer) + finally: + analyzer.stopTestRun() + return analyzer + + def get_last_run(self, repo): + """Return the results from a stream.""" + run = repo.get_test_run(repo.latest_id()) + analyzer = testtools.StreamSummary() + analyzer.startTestRun() + try: + run.get_test().run(analyzer) + finally: + analyzer.stopTestRun() return analyzer def test_can_initialise_with_param(self): @@ -150,10 +174,12 @@ pass case = Case('method') result = repo.get_inserter() - result.startTestRun() - case.run(result) - result.stopTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + case.run(legacy_result) + legacy_result.stopTestRun() self.assertEqual(1, repo.count()) + self.assertNotEqual(None, result.get_id()) def test_open(self): self.repo_impl.initialise(self.sample_url) @@ -166,6 +192,7 @@ def test_inserting_creates_id(self): # When inserting a stream, an id is returned from stopTestRun. + # Note that this is no longer recommended - but kept for compatibility. repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() result.startTestRun() @@ -192,7 +219,8 @@ repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() result.startTestRun() - inserted = result.stopTestRun() + result.stopTestRun() + inserted = result.get_id() self.assertEqual(inserted, repo.latest_id()) def test_get_failing_empty(self): @@ -205,54 +233,83 @@ # repositories can return a TestRun with just latest failures in it. repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() - result.startTestRun() - make_test('passing', True).run(result) - make_test('failing', False).run(result) - result.stopTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('passing', True).run(legacy_result) + make_test('failing', False).run(legacy_result) + legacy_result.stopTestRun() analyzed = self.get_failing(repo) self.assertEqual(1, analyzed.testsRun) - self.assertEqual(1, len(analyzed.failures)) - self.assertEqual('failing', analyzed.failures[0][0].id()) + self.assertEqual(1, len(analyzed.errors)) + self.assertEqual('failing', analyzed.errors[0][0].id()) + + def test_unexpected_success(self): + # Unexpected successes get forwarded too. (Test added because of a + # NameError in memory repo). + repo = self.repo_impl.initialise(self.sample_url) + result = repo.get_inserter() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + test = clone_test_with_new_id(Case('unexpected_success'), 'unexpected_success') + test.run(legacy_result) + legacy_result.stopTestRun() + analyzed = self.get_last_run(repo) + self.assertEqual(1, analyzed.testsRun) + self.assertEqual(1, len(analyzed.unexpectedSuccesses)) + self.assertEqual('unexpected_success', analyzed.unexpectedSuccesses[0].id()) def test_get_failing_complete_runs_delete_missing_failures(self): # failures from complete runs replace all failures. repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() - result.startTestRun() - make_test('passing', True).run(result) - make_test('failing', False).run(result) - make_test('missing', False).run(result) - result.stopTestRun() - result = repo.get_inserter() - result.startTestRun() - make_test('passing', False).run(result) - make_test('failing', True).run(result) - result.stopTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('passing', True).run(legacy_result) + make_test('failing', False).run(legacy_result) + make_test('missing', False).run(legacy_result) + legacy_result.stopTestRun() + result = repo.get_inserter() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('passing', False).run(legacy_result) + make_test('failing', True).run(legacy_result) + legacy_result.stopTestRun() analyzed = self.get_failing(repo) self.assertEqual(1, analyzed.testsRun) - self.assertEqual(1, len(analyzed.failures)) - self.assertEqual('passing', analyzed.failures[0][0].id()) + self.assertEqual(1, len(analyzed.errors)) + self.assertEqual('passing', analyzed.errors[0][0].id()) def test_get_failing_partial_runs_preserve_missing_failures(self): # failures from two runs add to existing failures, and successes remove # from them. repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() - result.startTestRun() - make_test('passing', True).run(result) - make_test('failing', False).run(result) - make_test('missing', False).run(result) - result.stopTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('passing', True).run(legacy_result) + make_test('failing', False).run(legacy_result) + make_test('missing', False).run(legacy_result) + legacy_result.stopTestRun() result = repo.get_inserter(partial=True) - result.startTestRun() - make_test('passing', False).run(result) - make_test('failing', True).run(result) - result.stopTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('passing', False).run(legacy_result) + make_test('failing', True).run(legacy_result) + legacy_result.stopTestRun() analyzed = self.get_failing(repo) self.assertEqual(2, analyzed.testsRun) - self.assertEqual(2, len(analyzed.failures)) + self.assertEqual(2, len(analyzed.errors)) self.assertEqual(set(['passing', 'missing']), - set([test[0].id() for test in analyzed.failures])) + set([test[0].id() for test in analyzed.errors])) + + def test_get_test_run_missing_keyerror(self): + repo = self.repo_impl.initialise(self.sample_url) + result = repo.get_inserter() + result.startTestRun() + result.stopTestRun() + inserted = result.get_id() + self.assertThat(lambda:repo.get_test_run(inserted - 1), + raises(KeyError)) def test_get_test_run(self): repo = self.repo_impl.initialise(self.sample_url) @@ -262,27 +319,87 @@ run = repo.get_test_run(inserted) self.assertNotEqual(None, run) - def test_get_subunit_from_test_run(self): + def test_get_latest_run(self): + repo = self.repo_impl.initialise(self.sample_url) + result = repo.get_inserter() + result.startTestRun() + inserted = result.stopTestRun() + run = repo.get_latest_run() + self.assertEqual(inserted, run.get_id()) + + def test_get_latest_run_empty_repo(self): + repo = self.repo_impl.initialise(self.sample_url) + self.assertRaises(KeyError, repo.get_latest_run) + + def test_get_test_run_get_id(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() result.startTestRun() - make_test('testrepository.tests.test_repository.Case.method', True).run(result) inserted = result.stopTestRun() run = repo.get_test_run(inserted) + self.assertEqual(inserted, run.get_id()) + + def test_get_test_run_preserves_time(self): + self.skip('Fix me before releasing.') + # The test run outputs the time events that it received. + now = datetime(2001, 1, 1, 0, 0, 0, tzinfo=iso8601.Utc()) + second = timedelta(seconds=1) + repo = self.repo_impl.initialise(self.sample_url) + test_id = self.getUniqueString() + test = make_test(test_id, True) + result = repo.get_inserter() + result.startTestRun() + result.status(timestamp=now, test_id=test_id, test_status='inprogress') + result.status(timestamp=(now + 1 * second), test_id=test_id, test_status='success') + inserted = result.stopTestRun() + run = repo.get_test_run(inserted) + result = ExtendedTestResult() + run.get_test().run(result) + self.assertEqual( + [('time', now), + ('tags', set(), set()), + ('startTest', Wildcard), + ('time', now + 1 * second), + ('addSuccess', Wildcard), + ('stopTest', Wildcard), + ('tags', set(), set()), + ], + result._events) + + def test_get_failing_get_id(self): + repo = self.repo_impl.initialise(self.sample_url) + result = repo.get_inserter() + result.startTestRun() + result.stopTestRun() + run = repo.get_failing() + self.assertEqual(None, run.get_id()) + + def test_get_subunit_from_test_run(self): + repo = self.repo_impl.initialise(self.sample_url) + result = repo.get_inserter() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('testrepository.tests.test_repository.Case.method', True).run(legacy_result) + legacy_result.stopTestRun() + inserted = result.get_id() + run = repo.get_test_run(inserted) as_subunit = run.get_subunit_stream() - self.assertThat(as_subunit.read(), DocTestMatches("""...test: testrepository.tests.test_repository.Case.method... + self.assertThat(as_subunit.read().decode('utf8'), + DocTestMatches("""...test: testrepository.tests.test_repository.Case.method... successful: testrepository.tests.test_repository.Case.method... """, doctest.ELLIPSIS)) def test_get_test_from_test_run(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() - result.startTestRun() - make_test('testrepository.tests.test_repository.Case.method', True).run(result) - inserted = result.stopTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() + make_test('testrepository.tests.test_repository.Case.method', True).run(legacy_result) + legacy_result.stopTestRun() + inserted = result.get_id() run = repo.get_test_run(inserted) test = run.get_test() - result = TestResult() + result = testtools.StreamSummary() result.startTestRun() try: test.run(result) @@ -298,9 +415,24 @@ def test_inserted_test_times_known(self): repo = self.repo_impl.initialise(self.sample_url) result = repo.get_inserter() - result.startTestRun() + legacy_result = testtools.ExtendedToStreamDecorator(result) + legacy_result.startTestRun() test_name = 'testrepository.tests.test_repository.Case.method' - run_timed(test_name, 1, result) - result.stopTestRun() - self.assertEqual({test_name: 1.0}, + run_timed(test_name, 0.1, legacy_result) + legacy_result.stopTestRun() + self.assertEqual({test_name: 0.1}, repo.get_test_times([test_name])['known']) + + def test_get_test_ids(self): + repo = self.repo_impl.initialise(self.sample_url) + inserter = repo.get_inserter() + legacy_result = testtools.ExtendedToStreamDecorator(inserter) + legacy_result.startTestRun() + test_cases = [PlaceHolder(self.getUniqueString()) for r in range(5)] + for test_case in test_cases: + test_case.run(legacy_result) + legacy_result.stopTestRun() + run_id = inserter.get_id() + self.assertEqual(run_id, repo.latest_id()) + returned_ids = repo.get_test_ids(run_id) + self.assertEqual([test.id() for test in test_cases], returned_ids) diff -Nru testrepository-0.0.5/testrepository/tests/test_results.py testrepository-0.0.18/testrepository/tests/test_results.py --- testrepository-0.0.5/testrepository/tests/test_results.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_results.py 2013-04-10 10:25:35.000000000 +0000 @@ -12,17 +12,52 @@ # license you chose for the specific language governing permissions and # limitations under that license. -from testtools import TestCase, TestResult +from datetime import ( + datetime, + timedelta, + ) +import sys -from testrepository.results import TestResultFilter +from testtools import TestCase +from testrepository.results import SummarizingResult -class ResultFilter(TestCase): - def test_addSuccess_increases_count(self): - result = TestResult() - filtered = TestResultFilter(result) - filtered.startTest(self) - filtered.addSuccess(self) - filtered.stopTest(self) - self.assertEqual(1, result.testsRun) +class TestSummarizingResult(TestCase): + + def test_empty(self): + result = SummarizingResult() + result.startTestRun() + result.stopTestRun() + self.assertEqual(0, result.testsRun) + self.assertEqual(0, result.get_num_failures()) + self.assertIs(None, result.get_time_taken()) + + def test_time_taken(self): + result = SummarizingResult() + now = datetime.now() + result.startTestRun() + result.status(timestamp=now) + result.status(timestamp=now + timedelta(seconds=5)) + result.stopTestRun() + self.assertEqual(5.0, result.get_time_taken()) + + def test_num_failures(self): + result = SummarizingResult() + result.startTestRun() + try: + 1/0 + except ZeroDivisionError: + error = sys.exc_info() + result.status(test_id='foo', test_status='fail') + result.status(test_id='foo', test_status='fail') + result.stopTestRun() + self.assertEqual(2, result.get_num_failures()) + + def test_tests_run(self): + result = SummarizingResult() + result.startTestRun() + for i in range(5): + result.status(test_id='foo', test_status='success') + result.stopTestRun() + self.assertEqual(5, result.testsRun) diff -Nru testrepository-0.0.5/testrepository/tests/test_setup.py testrepository-0.0.18/testrepository/tests/test_setup.py --- testrepository-0.0.5/testrepository/tests/test_setup.py 2010-12-27 03:53:33.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_setup.py 2013-02-06 10:01:04.000000000 +0000 @@ -36,7 +36,6 @@ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) output, _ = proc.communicate() - self.assertEqual(0, proc.returncode) self.assertThat(output, MatchesAny( # win32 DocTestMatches("""... @@ -49,3 +48,4 @@ ...bin/testr ... """, doctest.ELLIPSIS) )) + self.assertEqual(0, proc.returncode) diff -Nru testrepository-0.0.5/testrepository/tests/test_stubpackage.py testrepository-0.0.18/testrepository/tests/test_stubpackage.py --- testrepository-0.0.5/testrepository/tests/test_stubpackage.py 2010-02-12 03:13:49.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_stubpackage.py 2013-02-07 06:38:28.000000000 +0000 @@ -34,9 +34,9 @@ resource = StubPackageResource('foo', [('bar.py', 'woo')]) pkg = resource.getResource() self.addCleanup(resource.finishedWith, pkg) - self.assertEqual('', file(os.path.join(pkg.base, 'foo', + self.assertEqual('', open(os.path.join(pkg.base, 'foo', '__init__.py')).read()) - self.assertEqual('woo', file(os.path.join(pkg.base, 'foo', + self.assertEqual('woo', open(os.path.join(pkg.base, 'foo', 'bar.py')).read()) def test_no__init__(self): diff -Nru testrepository-0.0.5/testrepository/tests/test_testcommand.py testrepository-0.0.18/testrepository/tests/test_testcommand.py --- testrepository-0.0.5/testrepository/tests/test_testcommand.py 2010-12-19 11:00:03.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_testcommand.py 2013-07-17 02:53:08.000000000 +0000 @@ -14,18 +14,30 @@ """Tests for the testcommand module.""" +from io import BytesIO import os.path +import optparse +import re -from fixtures import Fixture -from testtools.matchers import MatchesException, Raises +from extras import try_import +import subunit +v2_avail = try_import('subunit.ByteStreamToStreamResult') +from testtools.compat import _b +from testtools.matchers import ( + Equals, + MatchesAny, + MatchesException, + raises, + ) +from testtools.testresult.doubles import ExtendedTestResult from testrepository.commands import run from testrepository.ui.model import UI from testrepository.repository import memory from testrepository.testcommand import TestCommand -from testrepository.tests import ResourcedTestCase +from testrepository.tests import ResourcedTestCase, Wildcard from testrepository.tests.stubpackage import TempDirResource -from testrepository.tests.test_repository import make_test, run_timed +from testrepository.tests.test_repository import run_timed class FakeTestCommand(TestCommand): @@ -43,7 +55,7 @@ self.dirty() ui = UI(options=options, args=args) ui.here = self.tempdir - return ui, TestCommand(ui, repository) + return ui, self.useFixture(TestCommand(ui, repository)) def get_test_ui_and_cmd2(self, options=(), args=()): self.dirty() @@ -60,38 +72,72 @@ def config_path(self): return os.path.join(self.tempdir, '.testr.conf') - def set_config(self, bytes): - stream = file(self.config_path(), 'wb') + def set_config(self, text): + stream = open(self.config_path(), 'wt') try: - stream.write(bytes) + stream.write(text) finally: stream.close() - def setup_repo(self, cmd, ui): - repo = cmd.repository_factory.initialise(ui.here) - inserter = repo.get_inserter() - inserter.startTestRun() - make_test('passing', True).run(inserter) - make_test('failing', False).run(inserter) - inserter.stopTestRun() - def test_takes_ui(self): ui = UI() ui.here = self.tempdir command = TestCommand(ui, None) self.assertEqual(command.ui, ui) + def test_TestCommand_is_a_fixture(self): + ui = UI() + ui.here = self.tempdir + command = TestCommand(ui, None) + command.setUp() + command.cleanUp() + + def test_TestCommand_get_run_command_outside_setUp_fails(self): + self.dirty() + ui = UI() + ui.here = self.tempdir + command = TestCommand(ui, None) + self.set_config('[DEFAULT]\ntest_command=foo\n') + self.assertThat(command.get_run_command, raises(TypeError)) + command.setUp() + command.cleanUp() + self.assertThat(command.get_run_command, raises(TypeError)) + + def test_TestCommand_cleanUp_disposes_instances(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo\n' + 'instance_dispose=bar $INSTANCE_IDS\n') + command._instances.update([_b('baz'), _b('quux')]) + command.cleanUp() + command.setUp() + self.assertEqual([ + ('values', [('running', 'bar baz quux')]), + ('popen', ('bar baz quux',), {'shell': True}), + ('communicate',)], ui.outputs) + + def test_TestCommand_cleanUp_disposes_instances_fail_raises(self): + ui, command = self.get_test_ui_and_cmd() + ui.proc_results = [1] + self.set_config( + '[DEFAULT]\ntest_command=foo\n' + 'instance_dispose=bar $INSTANCE_IDS\n') + command._instances.update([_b('baz'), _b('quux')]) + self.assertThat(command.cleanUp, + raises(ValueError('Disposing of instances failed, return 1'))) + command.setUp() + def test_get_run_command_no_config_file_errors(self): ui, command = self.get_test_ui_and_cmd() self.assertThat(command.get_run_command, - Raises(MatchesException(ValueError('No .testr.conf config file')))) + raises(ValueError('No .testr.conf config file'))) def test_get_run_command_no_config_settings_errors(self): ui, command = self.get_test_ui_and_cmd() self.set_config('') self.assertThat(command.get_run_command, - Raises(MatchesException(ValueError( - 'No test_command option present in .testr.conf')))) + raises(ValueError( + 'No test_command option present in .testr.conf'))) def test_get_run_command_returns_fixture_makes_IDFILE(self): ui, command = self.get_test_ui_and_cmd() @@ -101,7 +147,7 @@ try: fixture.setUp() list_file_path = fixture.list_file_name - source = open(list_file_path, 'rb') + source = open(list_file_path, 'rt') try: list_file_content = source.read() finally: @@ -137,6 +183,28 @@ expected_cmd = 'foo ' self.assertEqual(expected_cmd, fixture.cmd) + def test_get_run_command_default_and_list_expands(self): + ui, command = self.get_test_ui_and_cmd() + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='returned', test_status='exists') + stream.status(test_id='ids', test_status='exists') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = _b('returned\nids\n') + ui.proc_outputs = [subunit_bytes] + ui.options = optparse.Values() + ui.options.parallel = True + ui.options.concurrency = 2 + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_id_list_default=whoo yea\n' + 'test_list_option=--list\n') + fixture = self.useFixture(command.get_run_command()) + expected_cmd = 'foo returned ids ' + self.assertEqual(expected_cmd, fixture.cmd) + def test_get_run_command_IDLIST_default_passed_normally(self): ui, command = self.get_test_ui_and_cmd() self.set_config( @@ -153,6 +221,16 @@ expected_cmd = 'foo ' self.assertEqual(expected_cmd, fixture.cmd) + def test_group_regex_option(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo $IDOPTION\n' + 'test_id_option=--load-list $IDFILE\n' + 'group_regex=([^\\.]+\\.)+\n') + fixture = self.useFixture(command.get_run_command()) + self.assertEqual( + 'pkg.class.', fixture._group_callback('pkg.class.test_method')) + def test_extra_args_passed_in(self): ui, command = self.get_test_ui_and_cmd() self.set_config( @@ -162,6 +240,61 @@ expected_cmd = 'foo bar quux' self.assertEqual(expected_cmd, fixture.cmd) + def test_list_tests_requests_concurrency_instances(self): + # testr list-tests is non-parallel, so needs 1 instance. + # testr run triggering list-tests will want to run parallel on all, so + # avoid latency by asking for whatever concurrency is up front. + # This covers the case for non-listing runs as well, as the code path + # is common. + self.dirty() + ui = UI(options= [('concurrency', 2), ('parallel', True)]) + ui.here = self.tempdir + cmd = run.run(ui) + ui.set_command(cmd) + ui.proc_outputs = [_b('returned\ninstances\n')] + command = self.useFixture(TestCommand(ui, None)) + self.set_config( + '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' + 'test_list_option=--list\n' + 'instance_provision=provision -c $INSTANCE_COUNT\n' + 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') + fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + fixture.list_tests() + self.assertEqual(set([_b('returned'), _b('instances')]), command._instances) + self.assertEqual(set([]), command._allocated_instances) + self.assertThat(ui.outputs, MatchesAny(Equals([ + ('values', [('running', 'provision -c 2')]), + ('popen', ('provision -c 2',), {'shell': True, 'stdout': -1}), + ('communicate',), + ('values', [('running', 'quux instances -- foo --list whoo yea')]), + ('popen',('quux instances -- foo --list whoo yea',), + {'shell': True, 'stdin': -1, 'stdout': -1}), + ('communicate',)]), Equals([ + ('values', [('running', 'provision -c 2')]), + ('popen', ('provision -c 2',), {'shell': True, 'stdout': -1}), + ('communicate',), + ('values', [('running', 'quux returned -- foo --list whoo yea')]), + ('popen',('quux returned -- foo --list whoo yea',), + {'shell': True, 'stdin': -1, 'stdout': -1}), + ('communicate',)]))) + + def test_list_tests_uses_instances(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' + 'test_list_option=--list\n' + 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') + fixture = self.useFixture(command.get_run_command()) + command._instances.add(_b('bar')) + fixture.list_tests() + self.assertEqual(set([_b('bar')]), command._instances) + self.assertEqual(set([]), command._allocated_instances) + self.assertEqual([ + ('values', [('running', 'quux bar -- foo --list whoo yea')]), + ('popen', ('quux bar -- foo --list whoo yea',), + {'shell': True, 'stdin': -1, 'stdout': -1}), ('communicate',)], + ui.outputs) + def test_list_tests_cmd(self): ui, command = self.get_test_ui_and_cmd() self.set_config( @@ -172,14 +305,31 @@ self.assertEqual(expected_cmd, fixture.list_cmd) def test_list_tests_parsing(self): + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='returned', test_status='exists') + stream.status(test_id='ids', test_status='exists') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = _b('returned\nids\n') ui, command = self.get_test_ui_and_cmd() - ui.proc_outputs = ['returned\nids\n'] + ui.proc_outputs = [subunit_bytes] self.set_config( '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' 'test_list_option=--list\n') fixture = self.useFixture(command.get_run_command()) self.assertEqual(set(['returned', 'ids']), set(fixture.list_tests())) + def test_list_tests_nonzero_exit(self): + ui, command = self.get_test_ui_and_cmd() + ui.proc_results = [1] + self.set_config( + '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' + 'test_list_option=--list\n') + fixture = self.useFixture(command.get_run_command()) + self.assertThat(lambda:fixture.list_tests(), raises(ValueError)) + def test_partition_tests_smoke(self): repo = memory.RepositoryFactory().initialise('memory:') # Seed with 1 slow and 2 tests making up 2/3 the time. @@ -191,7 +341,8 @@ result.stopTestRun() ui, command = self.get_test_ui_and_cmd(repository=repo) self.set_config( - '[DEFAULT]\ntest_command=foo $IDLIST\n') + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_list_option=--list\n') fixture = self.useFixture(command.get_run_command()) # partitioning by two generates 'slow' and the two fast ones as partitions # flushed out by equal numbers of unknown duration tests. @@ -206,3 +357,221 @@ self.assertTrue('fast2' in partitions[1]) self.assertEqual(3, len(partitions[0])) self.assertEqual(4, len(partitions[1])) + + def test_partition_tests_914359(self): + # When two partitions have the same duration, timed tests should be + # appended to the shortest partition. In theory this doesn't matter, + # but in practice, if a test is recorded with 0 duration (e.g. due to a + # bug), it is better to have them split out rather than all in one + # partition. 0 duration tests are unlikely to really be 0 duration. + repo = memory.RepositoryFactory().initialise('memory:') + # Seed with two 0-duration tests. + result = repo.get_inserter() + result.startTestRun() + run_timed("zero1", 0, result) + run_timed("zero2", 0, result) + result.stopTestRun() + ui, command = self.get_test_ui_and_cmd(repository=repo) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST\n') + fixture = self.useFixture(command.get_run_command()) + # partitioning by two should generate two one-entry partitions. + test_ids = frozenset(['zero1', 'zero2']) + partitions = fixture.partition_tests(test_ids, 2) + self.assertEqual(1, len(partitions[0])) + self.assertEqual(1, len(partitions[1])) + + def test_partition_tests_with_grouping(self): + repo = memory.RepositoryFactory().initialise('memory:') + result = repo.get_inserter() + result.startTestRun() + run_timed("TestCase1.slow", 3, result) + run_timed("TestCase2.fast1", 1, result) + run_timed("TestCase2.fast2", 1, result) + result.stopTestRun() + ui, command = self.get_test_ui_and_cmd(repository=repo) + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST $LISTOPT\n' + 'test_list_option=--list\n') + fixture = self.useFixture(command.get_run_command()) + test_ids = frozenset(['TestCase1.slow', 'TestCase1.fast', + 'TestCase1.fast2', 'TestCase2.fast1', + 'TestCase3.test1', 'TestCase3.test2', + 'TestCase2.fast2', 'TestCase4.test', + 'testdir.testfile.TestCase5.test']) + regex = 'TestCase[0-5]' + def group_id(test_id, regex=re.compile('TestCase[0-5]')): + match = regex.match(test_id) + if match: + return match.group(0) + # There isn't a public way to define a group callback [as yet]. + fixture._group_callback = group_id + partitions = fixture.partition_tests(test_ids, 2) + # Timed groups are deterministic: + self.assertTrue('TestCase2.fast1' in partitions[0]) + self.assertTrue('TestCase2.fast2' in partitions[0]) + self.assertTrue('TestCase1.slow' in partitions[1]) + self.assertTrue('TestCase1.fast' in partitions[1]) + self.assertTrue('TestCase1.fast2' in partitions[1]) + # Untimed groups just need to be kept together: + if 'TestCase3.test1' in partitions[0]: + self.assertTrue('TestCase3.test2' in partitions[0]) + if 'TestCase4.test' not in partitions[0]: + self.assertTrue('TestCase4.test' in partitions[1]) + if 'testdir.testfile.TestCase5.test' not in partitions[0]: + self.assertTrue('testdir.testfile.TestCase5.test' in partitions[1]) + + def test_run_tests_with_instances(self): + # when there are instances and no instance_execute, run_tests acts as + # normal. + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST\n') + command._instances.update([_b('foo'), _b('bar')]) + fixture = self.useFixture(command.get_run_command()) + procs = fixture.run_tests() + self.assertEqual([ + ('values', [('running', 'foo ')]), + ('popen', ('foo ',), {'shell': True, 'stdin': -1, 'stdout': -1})], + ui.outputs) + + def test_run_tests_with_existing_instances_configured(self): + # when there are instances present, they are pulled out for running + # tests. + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST\n' + 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') + command._instances.add(_b('bar')) + fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + procs = fixture.run_tests() + self.assertEqual([ + ('values', [('running', 'quux bar -- foo 1')]), + ('popen', ('quux bar -- foo 1',), + {'shell': True, 'stdin': -1, 'stdout': -1})], + ui.outputs) + # No --parallel, so the one instance should have been allocated. + self.assertEqual(set([_b('bar')]), command._instances) + self.assertEqual(set([_b('bar')]), command._allocated_instances) + # And after the process is run, bar is returned for re-use. + procs[0].stdout.read() + procs[0].wait() + self.assertEqual(0, procs[0].returncode) + self.assertEqual(set([_b('bar')]), command._instances) + self.assertEqual(set(), command._allocated_instances) + + def test_run_tests_allocated_instances_skipped(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo $IDLIST\n' + 'instance_execute=quux $INSTANCE_ID -- $COMMAND\n') + command._instances.update([_b('bar'), _b('baz')]) + command._allocated_instances.add(_b('baz')) + fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + procs = fixture.run_tests() + self.assertEqual([ + ('values', [('running', 'quux bar -- foo 1')]), + ('popen', ('quux bar -- foo 1',), + {'shell': True, 'stdin': -1, 'stdout': -1})], + ui.outputs) + # No --parallel, so the one instance should have been allocated. + self.assertEqual(set([_b('bar'), _b('baz')]), command._instances) + self.assertEqual(set([_b('bar'), _b('baz')]), command._allocated_instances) + # And after the process is run, bar is returned for re-use. + procs[0].wait() + procs[0].stdout.read() + self.assertEqual(0, procs[0].returncode) + self.assertEqual(set([_b('bar'), _b('baz')]), command._instances) + self.assertEqual(set([_b('baz')]), command._allocated_instances) + + def test_run_tests_list_file_in_FILES(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\ntest_command=foo $IDFILE\n' + 'instance_execute=quux $INSTANCE_ID $FILES -- $COMMAND\n') + command._instances.add(_b('bar')) + fixture = self.useFixture(command.get_run_command(test_ids=['1'])) + list_file = fixture.list_file_name + procs = fixture.run_tests() + expected_cmd = 'quux bar %s -- foo %s' % (list_file, list_file) + self.assertEqual([ + ('values', [('running', expected_cmd)]), + ('popen', (expected_cmd,), + {'shell': True, 'stdin': -1, 'stdout': -1})], + ui.outputs) + # No --parallel, so the one instance should have been allocated. + self.assertEqual(set([_b('bar')]), command._instances) + self.assertEqual(set([_b('bar')]), command._allocated_instances) + # And after the process is run, bar is returned for re-use. + procs[0].stdout.read() + self.assertEqual(0, procs[0].returncode) + self.assertEqual(set([_b('bar')]), command._instances) + self.assertEqual(set(), command._allocated_instances) + + def test_filter_tags_parsing(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config('[DEFAULT]\nfilter_tags=foo bar\n') + self.assertEqual(set(['foo', 'bar']), command.get_filter_tags()) + + def test_callout_concurrency(self): + ui, command = self.get_test_ui_and_cmd() + ui.proc_outputs = [_b('4')] + self.set_config( + '[DEFAULT]\ntest_run_concurrency=probe\n' + 'test_command=foo\n') + fixture = self.useFixture(command.get_run_command()) + self.assertEqual(4, fixture.callout_concurrency()) + self.assertEqual([ + ('popen', ('probe',), {'shell': True, 'stdin': -1, 'stdout': -1}), + ('communicate',)], ui.outputs) + + def test_callout_concurrency_failed(self): + ui, command = self.get_test_ui_and_cmd() + ui.proc_results = [1] + self.set_config( + '[DEFAULT]\ntest_run_concurrency=probe\n' + 'test_command=foo\n') + fixture = self.useFixture(command.get_run_command()) + self.assertThat(lambda:fixture.callout_concurrency(), raises( + ValueError("test_run_concurrency failed: exit code 1, stderr=''"))) + self.assertEqual([ + ('popen', ('probe',), {'shell': True, 'stdin': -1, 'stdout': -1}), + ('communicate',)], ui.outputs) + + def test_callout_concurrency_not_set(self): + ui, command = self.get_test_ui_and_cmd() + self.set_config( + '[DEFAULT]\n' + 'test_command=foo\n') + fixture = self.useFixture(command.get_run_command()) + self.assertEqual(None, fixture.callout_concurrency()) + self.assertEqual([], ui.outputs) + + def test_filter_tests_by_regex_only(self): + if v2_avail: + buffer = BytesIO() + stream = subunit.StreamResultToBytes(buffer) + stream.status(test_id='returned', test_status='exists') + stream.status(test_id='ids', test_status='exists') + subunit_bytes = buffer.getvalue() + else: + subunit_bytes = _b('returned\nids\n') + ui, command = self.get_test_ui_and_cmd() + ui.proc_outputs = [subunit_bytes] + self.set_config( + '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' + 'test_list_option=--list\n') + filters = ['return'] + fixture = self.useFixture(command.get_run_command(test_filters=filters)) + self.assertEqual(['returned'], fixture.test_ids) + + def test_filter_tests_by_regex_supplied_ids(self): + ui, command = self.get_test_ui_and_cmd() + ui.proc_outputs = [_b('returned\nids\n')] + self.set_config( + '[DEFAULT]\ntest_command=foo $LISTOPT $IDLIST\ntest_id_list_default=whoo yea\n' + 'test_list_option=--list\n') + filters = ['return'] + fixture = self.useFixture(command.get_run_command( + test_ids=['return', 'of', 'the', 'king'], test_filters=filters)) + self.assertEqual(['return'], fixture.test_ids) diff -Nru testrepository-0.0.5/testrepository/tests/test_testr.py testrepository-0.0.18/testrepository/tests/test_testr.py --- testrepository-0.0.5/testrepository/tests/test_testr.py 2010-12-26 18:05:22.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_testr.py 2013-02-06 09:08:46.000000000 +0000 @@ -61,12 +61,12 @@ # Make a copy of the testr script as running in place uses the current # library, not the stub library. execpath = os.path.join(stub.base, 'testr') - source = file(path, 'rb') + source = open(path, 'rb') try: testr_contents = source.read() finally: source.close() - target = file(execpath, 'wb') + target = open(execpath, 'wb') try: target.write(testr_contents) finally: diff -Nru testrepository-0.0.5/testrepository/tests/test_ui.py testrepository-0.0.18/testrepository/tests/test_ui.py --- testrepository-0.0.5/testrepository/tests/test_ui.py 2010-12-26 18:05:22.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/test_ui.py 2013-04-11 11:13:52.000000000 +0000 @@ -1,11 +1,11 @@ # # Copyright (c) 2009, 2010 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -14,18 +14,21 @@ """Tests for UI support logic and the UI contract.""" -from cStringIO import StringIO +from io import BytesIO, TextIOWrapper import optparse import subprocess import sys +from fixtures import EnvironmentVariable +from testtools.compat import _b, _u +from testtools.content import text_content from testtools.matchers import raises from testrepository import arguments, commands -import testrepository.arguments.command from testrepository.commands import load +from testrepository.repository import memory +from testrepository.tests import ResourcedTestCase, StubTestCommand from testrepository.ui import cli, decorator, model -from testrepository.tests import ResourcedTestCase def cli_ui_factory(input_streams=None, options=(), args=()): @@ -34,12 +37,12 @@ # something - however, may need to be cli specific tests at that # point. raise NotImplementedError(cli_ui_factory) - stdout = StringIO() + stdout = TextIOWrapper(BytesIO(), line_buffering=True) if input_streams: - stdin = StringIO(input_streams[0][1]) + stdin = TextIOWrapper(BytesIO(input_streams[0][1])) else: - stdin = StringIO() - stderr = StringIO() + stdin = TextIOWrapper(BytesIO()) + stderr = TextIOWrapper(BytesIO(), line_buffering=True) argv = list(args) for option, value in options: # only bool handled so far @@ -75,7 +78,7 @@ ui = self.ui_factory() def test_factory_input_stream_args(self): - ui = self.ui_factory([('subunit', 'value')]) + ui = self.ui_factory([('subunit', _b('value'))]) def test_here(self): ui = self.get_test_ui() @@ -84,20 +87,21 @@ def test_iter_streams_load_stdin_use_case(self): # A UI can be asked for the streams that a command has indicated it # accepts, which is what load < foo will require. - ui = self.ui_factory([('subunit', 'test: foo\nsuccess: foo\n')]) + ui = self.ui_factory([('subunit', _b('test: foo\nsuccess: foo\n'))]) cmd = commands.Command(ui) cmd.input_streams = ['subunit+'] ui.set_command(cmd) results = [] for result in ui.iter_streams('subunit'): results.append(result.read()) - self.assertEqual(['test: foo\nsuccess: foo\n'], results) + self.assertEqual([_b('test: foo\nsuccess: foo\n')], results) def test_iter_streams_unexpected_type_raises(self): ui = self.get_test_ui() self.assertThat(lambda: ui.iter_streams('subunit'), raises(KeyError)) def test_output_error(self): + self.useFixture(EnvironmentVariable('TESTR_PDB')) try: raise Exception('fooo') except Exception: @@ -108,18 +112,18 @@ def test_output_rest(self): # output some ReST - used for help and docs. ui = self.get_test_ui() - ui.output_rest('') + ui.output_rest(_u('')) def test_output_stream(self): # a stream of bytes can be output. ui = self.get_test_ui() - ui.output_stream(StringIO()) + ui.output_stream(BytesIO()) def test_output_table(self): # output_table shows a table. ui = self.get_test_ui() ui.output_table([('col1', 'col2'), ('row1c1','row1c2')]) - + def test_output_tests(self): # output_tests can be called, and takes a list of tests to output. ui = self.get_test_ui() @@ -130,6 +134,12 @@ ui = self.get_test_ui() ui.output_values([('foo', 1), ('bar', 'quux')]) + def test_output_summary(self): + # output_summary can be called, takes success boolean and list of + # things to output. + ui = self.get_test_ui() + ui.output_summary(True, 1, None, 1, None, []) + def test_set_command(self): # All ui objects can be given their command. ui = self.ui_factory() @@ -160,6 +170,12 @@ self.assertEqual(True, ui.set_command(cmd)) self.assertEqual({'foo':[load.load]}, ui.arguments) + def test_set_command_with_no_name_works(self): + # Degrade gracefully if the name attribute has not been set. + ui = self.ui_factory() + cmd = commands.Command(ui) + self.assertEqual(True, ui.set_command(cmd)) + def test_options_at_options(self): ui = self.get_test_ui() self.assertEqual(False, ui.options.quiet) @@ -210,10 +226,23 @@ out, err = proc.communicate() def test_make_result(self): - # make_result should return a TestResult. + # make_result should return a StreamResult and a summary result. + ui = self.ui_factory() + ui.set_command(commands.Command(ui)) + result, summary = ui.make_result(lambda: None, StubTestCommand()) + result.startTestRun() + result.status() + result.stopTestRun() + summary.wasSuccessful() + + def test_make_result_previous_run(self): + # make_result can take a previous run. ui = self.ui_factory() ui.set_command(commands.Command(ui)) - result = ui.make_result(lambda: None) + result, summary = ui.make_result( + lambda: None, StubTestCommand(), + previous_run=memory.Repository().get_failing()) result.startTestRun() + result.status() result.stopTestRun() - self.assertEqual(0, result.testsRun) + summary.wasSuccessful() diff -Nru testrepository-0.0.5/testrepository/tests/ui/test_cli.py testrepository-0.0.18/testrepository/tests/ui/test_cli.py --- testrepository-0.0.5/testrepository/tests/ui/test_cli.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/tests/ui/test_cli.py 2013-07-13 11:55:12.000000000 +0000 @@ -16,39 +16,60 @@ """Tests for UI support logic and the UI contract.""" import doctest -from cStringIO import StringIO +from io import BytesIO, StringIO, TextIOWrapper +import optparse +import os import sys +from textwrap import dedent +from fixtures import EnvironmentVariable +import subunit +import testtools from testtools import TestCase -from testtools.matchers import DocTestMatches +from testtools.compat import _b, _u +from testtools.matchers import ( + DocTestMatches, + MatchesException, + ) from testrepository import arguments from testrepository import commands +from testrepository.commands import run from testrepository.ui import cli -from testrepository.tests import ResourcedTestCase +from testrepository.tests import ResourcedTestCase, StubTestCommand + + +def get_test_ui_and_cmd(options=(), args=()): + stdout = TextIOWrapper(BytesIO(), 'utf8', line_buffering=True) + stdin = StringIO() + stderr = StringIO() + argv = list(args) + for option, value in options: + # only bool handled so far + if value: + argv.append('--%s' % option) + ui = cli.UI(argv, stdin, stdout, stderr) + cmd = run.run(ui) + ui.set_command(cmd) + return ui, cmd class TestCLIUI(ResourcedTestCase): - def get_test_ui_and_cmd(self): - stdout = StringIO() - stdin = StringIO() - stderr = StringIO() - ui = cli.UI([], stdin, stdout, stderr) - cmd = commands.Command(ui) - ui.set_command(cmd) - return ui, cmd + def setUp(self): + super(TestCLIUI, self).setUp() + self.useFixture(EnvironmentVariable('TESTR_PDB')) def test_construct(self): - stdout = StringIO() - stdin = StringIO() - stderr = StringIO() + stdout = BytesIO() + stdin = BytesIO() + stderr = BytesIO() cli.UI([], stdin, stdout, stderr) def test_stream_comes_from_stdin(self): - stdout = StringIO() - stdin = StringIO('foo\n') - stderr = StringIO() + stdout = BytesIO() + stdin = BytesIO(_b('foo\n')) + stderr = BytesIO() ui = cli.UI([], stdin, stdout, stderr) cmd = commands.Command(ui) cmd.input_streams = ['subunit'] @@ -56,12 +77,27 @@ results = [] for stream in ui.iter_streams('subunit'): results.append(stream.read()) - self.assertEqual(['foo\n'], results) + self.assertEqual([_b('foo\n')], results) + + def test_stream_type_honoured(self): + # The CLI UI has only one stdin, so when a command asks for a stream + # type it didn't declare, no streams are found. + stdout = BytesIO() + stdin = BytesIO(_b('foo\n')) + stderr = BytesIO() + ui = cli.UI([], stdin, stdout, stderr) + cmd = commands.Command(ui) + cmd.input_streams = ['subunit+', 'interactive?'] + ui.set_command(cmd) + results = [] + for stream in ui.iter_streams('interactive'): + results.append(stream.read()) + self.assertEqual([], results) def test_dash_d_sets_here_option(self): - stdout = StringIO() - stdin = StringIO('foo\n') - stderr = StringIO() + stdout = BytesIO() + stdin = BytesIO(_b('foo\n')) + stderr = BytesIO() ui = cli.UI(['-d', '/nowhere/'], stdin, stdout, stderr) cmd = commands.Command(ui) ui.set_command(cmd) @@ -80,57 +116,84 @@ ui.output_error(err_tuple) self.assertThat(stderr.getvalue(), DocTestMatches(expected)) + def test_error_enters_pdb_when_TESTR_PDB_set(self): + os.environ['TESTR_PDB'] = '1' + try: + raise Exception('fooo') + except Exception: + err_tuple = sys.exc_info() + expected = dedent("""\ + File "...test_cli.py", line ..., in ...pdb_when_TESTR_PDB_set + raise Exception('fooo') + + fooo + """) + stdout = StringIO() + stdin = StringIO(_u('c\n')) + stderr = StringIO() + ui = cli.UI([], stdin, stdout, stderr) + ui.output_error(err_tuple) + self.assertThat(stderr.getvalue(), + DocTestMatches(expected, doctest.ELLIPSIS)) + def test_outputs_rest_to_stdout(self): - ui, cmd = self.get_test_ui_and_cmd() - ui.output_rest('topic\n=====\n') - self.assertEqual('topic\n=====\n', ui._stdout.getvalue()) + ui, cmd = get_test_ui_and_cmd() + ui.output_rest(_u('topic\n=====\n')) + self.assertEqual(_b('topic\n=====\n'), ui._stdout.buffer.getvalue()) def test_outputs_results_to_stdout(self): - ui, cmd = self.get_test_ui_and_cmd() + ui, cmd = get_test_ui_and_cmd() class Case(ResourcedTestCase): def method(self): self.fail('quux') - result = ui.make_result(lambda: None) - Case('method').run(result) - self.assertThat(ui._stdout.getvalue(),DocTestMatches( - """====================================================================== + result, summary = ui.make_result(lambda: None, StubTestCommand()) + result.startTestRun() + Case('method').run(testtools.ExtendedToStreamDecorator(result)) + result.stopTestRun() + self.assertThat(ui._stdout.buffer.getvalue().decode('utf8'), + DocTestMatches("""\ +====================================================================== FAIL: testrepository.tests.ui.test_cli.Case.method ---------------------------------------------------------------------- -Text attachment: traceback ------------- -Traceback (most recent call last): -... +...Traceback (most recent call last):... File "...test_cli.py", line ..., in method - self.fail(\'quux\') -AssertionError: quux ------------- + self.fail(\'quux\')... +AssertionError: quux... """, doctest.ELLIPSIS)) def test_outputs_stream_to_stdout(self): - ui, cmd = self.get_test_ui_and_cmd() - stream = StringIO("Foo \n bar") + ui, cmd = get_test_ui_and_cmd() + stream = BytesIO(_b("Foo \n bar")) ui.output_stream(stream) - self.assertEqual("Foo \n bar", ui._stdout.getvalue()) + self.assertEqual(_b("Foo \n bar"), ui._stdout.buffer.getvalue()) def test_outputs_tables_to_stdout(self): - ui, cmd = self.get_test_ui_and_cmd() + ui, cmd = get_test_ui_and_cmd() ui.output_table([('foo', 1), ('b', 'quux')]) - self.assertEqual('foo 1\n--- ----\nb quux\n', - ui._stdout.getvalue()) + self.assertEqual(_b('foo 1\n--- ----\nb quux\n'), + ui._stdout.buffer.getvalue()) def test_outputs_tests_to_stdout(self): - ui, cmd = self.get_test_ui_and_cmd() + ui, cmd = get_test_ui_and_cmd() ui.output_tests([self, self.__class__('test_construct')]) self.assertThat( - ui._stdout.getvalue(), + ui._stdout.buffer.getvalue().decode('utf8'), DocTestMatches( '...TestCLIUI.test_outputs_tests_to_stdout\n' '...TestCLIUI.test_construct\n', doctest.ELLIPSIS)) def test_outputs_values_to_stdout(self): - ui, cmd = self.get_test_ui_and_cmd() + ui, cmd = get_test_ui_and_cmd() ui.output_values([('foo', 1), ('bar', 'quux')]) - self.assertEqual('foo=1, bar=quux\n', ui._stdout.getvalue()) + self.assertEqual(_b('foo=1, bar=quux\n'), ui._stdout.buffer.getvalue()) + + def test_outputs_summary_to_stdout(self): + ui, cmd = get_test_ui_and_cmd() + summary = [True, 1, None, 2, None, []] + expected_summary = ui._format_summary(*summary) + ui.output_summary(*summary) + self.assertEqual(_b("%s\n" % (expected_summary,)), + ui._stdout.buffer.getvalue()) def test_parse_error_goes_to_stderr(self): stdout = StringIO() @@ -151,15 +214,116 @@ ui.set_command(cmd) self.assertEqual("Unexpected arguments: ['one']\n", stderr.getvalue()) - def test_parse_after_double_dash_are_arguments(self): + def test_parse_options_after_double_dash_are_arguments(self): + stdout = BytesIO() + stdin = BytesIO() + stderr = BytesIO() + ui = cli.UI(['one', '--', '--two', 'three'], stdin, stdout, stderr) + cmd = commands.Command(ui) + cmd.args = [arguments.string.StringArgument('myargs', max=None), + arguments.doubledash.DoubledashArgument(), + arguments.string.StringArgument('subargs', max=None)] + ui.set_command(cmd) + self.assertEqual({ + 'doubledash': ['--'], + 'myargs': ['one'], + 'subargs': ['--two', 'three']}, + ui.arguments) + + def test_double_dash_passed_to_arguments(self): + class CaptureArg(arguments.AbstractArgument): + def _parse_one(self, arg): + return arg + stdout = BytesIO() + stdin = BytesIO() + stderr = BytesIO() + ui = cli.UI(['one', '--', '--two', 'three'], stdin, stdout, stderr) + cmd = commands.Command(ui) + cmd.args = [CaptureArg('args', max=None)] + ui.set_command(cmd) + self.assertEqual({'args':['one', '--', '--two', 'three']}, ui.arguments) + + def test_run_subunit_option(self): + ui, cmd = get_test_ui_and_cmd(options=[('subunit', True)]) + self.assertEqual(True, ui.options.subunit) + + def test_dash_dash_help_shows_help(self): stdout = StringIO() stdin = StringIO() stderr = StringIO() - ui = cli.UI(['one', '--', '--two', 'three'], stdin, stdout, stderr) + ui = cli.UI(['--help'], stdin, stdout, stderr) cmd = commands.Command(ui) - cmd.args = [arguments.string.StringArgument('args', max=None)] - ui.set_command(cmd) - self.assertEqual({'args':['one', '--two', 'three']}, ui.arguments) + cmd.args = [arguments.string.StringArgument('foo')] + cmd.name = "bar" + # By definition SystemExit is not caught by 'except Exception'. + try: + ui.set_command(cmd) + except SystemExit: + exc_info = sys.exc_info() + self.assertThat(exc_info, MatchesException(SystemExit(0))) + else: + self.fail('ui.set_command did not raise') + self.assertThat(stdout.getvalue(), + DocTestMatches("""Usage: run.py bar [options] foo +... +A command that can be run... +... + -d HERE, --here=HERE... +...""", doctest.ELLIPSIS)) + +class TestCLISummary(TestCase): + + def get_summary(self, successful, tests, tests_delta, time, time_delta, values): + """Get the summary that would be output for successful & values.""" + ui, cmd = get_test_ui_and_cmd() + return ui._format_summary( + successful, tests, tests_delta, time, time_delta, values) + + def test_success_only(self): + x = self.get_summary(True, None, None, None, None, []) + self.assertEqual('PASSED', x) + + def test_failure_only(self): + x = self.get_summary(False, None, None, None, None, []) + self.assertEqual('FAILED', x) + + def test_time(self): + x = self.get_summary(True, None, None, 3.4, None, []) + self.assertEqual('Ran tests in 3.400s\nPASSED', x) + + def test_time_with_delta(self): + x = self.get_summary(True, None, None, 3.4, 0.1, []) + self.assertEqual('Ran tests in 3.400s (+0.100s)\nPASSED', x) + + def test_tests_run(self): + x = self.get_summary(True, 34, None, None, None, []) + self.assertEqual('Ran 34 tests\nPASSED', x) + + def test_tests_run_with_delta(self): + x = self.get_summary(True, 34, 5, None, None, []) + self.assertEqual('Ran 34 (+5) tests\nPASSED', x) + + def test_tests_and_time(self): + x = self.get_summary(True, 34, -5, 3.4, 0.1, []) + self.assertEqual('Ran 34 (-5) tests in 3.400s (+0.100s)\nPASSED', x) + + def test_other_values(self): + x = self.get_summary( + True, None, None, None, None, [('failures', 12, -1), ('errors', 13, 2)]) + self.assertEqual('PASSED (failures=12 (-1), errors=13 (+2))', x) + + def test_values_no_delta(self): + x = self.get_summary( + True, None, None, None, None, + [('failures', 12, None), ('errors', 13, None)]) + self.assertEqual('PASSED (failures=12, errors=13)', x) + + def test_combination(self): + x = self.get_summary( + True, 34, -5, 3.4, 0.1, [('failures', 12, -1), ('errors', 13, 2)]) + self.assertEqual( + ('Ran 34 (-5) tests in 3.400s (+0.100s)\n' + 'PASSED (failures=12 (-1), errors=13 (+2))'), x) class TestCLITestResult(TestCase): @@ -171,11 +335,19 @@ except ZeroDivisionError: return sys.exc_info() - def make_result(self, stream=None): + def make_result(self, stream=None, argv=None, filter_tags=None): if stream is None: - stream = StringIO() - ui = cli.UI([], None, stream, None) - return ui.make_result(lambda: None) + stream = BytesIO() + argv = argv or [] + ui = cli.UI(argv, None, stream, None) + cmd = commands.Command(ui) + cmd.options = [ + optparse.Option("--subunit", action="store_true", + default=False, help="Display results in subunit format."), + ] + ui.set_command(cmd) + return ui.make_result( + lambda: None, StubTestCommand(filter_tags=filter_tags)) def test_initial_stream(self): # CLITestResult.__init__ does not do anything to the stream it is @@ -188,32 +360,83 @@ # CLITestResult formats errors by giving them a big fat line, a title # made up of their 'label' and the name of the test, another different # big fat line, and then the actual error itself. - result = self.make_result() + result = self.make_result()[0] error = result._format_error('label', self, 'error text') expected = '%s%s: %s\n%s%s' % ( result.sep1, 'label', self.id(), result.sep2, 'error text') self.assertThat(error, DocTestMatches(expected)) - def test_addError_outputs_error(self): - # CLITestResult.addError outputs the given error immediately to the - # stream. - stream = StringIO() - result = self.make_result(stream) - error = self.make_exc_info() - error_text = result._err_details_to_string(self, error) - result.addError(self, error) - self.assertThat( - stream.getvalue(), - DocTestMatches(result._format_error('ERROR', self, error_text))) + def test_format_error_includes_tags(self): + result = self.make_result()[0] + error = result._format_error('label', self, 'error text', set(['foo'])) + expected = '%s%s: %s\ntags: foo\n%s%s' % ( + result.sep1, 'label', self.id(), result.sep2, 'error text') + self.assertThat(error, DocTestMatches(expected)) - def test_addFailure_outputs_failure(self): - # CLITestResult.addError outputs the given error immediately to the - # stream. + def test_addFail_outputs_error(self): + # CLITestResult.status test_status='fail' outputs the given error + # immediately to the stream. stream = StringIO() - result = self.make_result(stream) + result = self.make_result(stream)[0] error = self.make_exc_info() - error_text = result._err_details_to_string(self, error) - result.addFailure(self, error) + error_text = 'foo\nbar\n' + result.startTestRun() + result.status(test_id=self.id(), test_status='fail', eof=True, + file_name='traceback', mime_type='text/plain;charset=utf8', + file_bytes=error_text.encode('utf8')) self.assertThat( stream.getvalue(), DocTestMatches(result._format_error('FAIL', self, error_text))) + + def test_addFailure_handles_string_encoding(self): + # CLITestResult.addFailure outputs the given error handling non-ascii + # characters. + # Lets say we have bytes output, not string for some reason. + stream = BytesIO() + result = self.make_result(stream)[0] + result.startTestRun() + result.status(test_id='foo', test_status='fail', file_name='traceback', + mime_type='text/plain;charset=utf8', + file_bytes=b'-->\xe2\x80\x9c<--', eof=True) + pattern = _u("...-->?<--...") + self.assertThat( + stream.getvalue().decode('utf8'), + DocTestMatches(pattern, doctest.ELLIPSIS)) + + def test_subunit_output(self): + bytestream = BytesIO() + stream = TextIOWrapper(bytestream, 'utf8', line_buffering=True) + result = self.make_result(stream, argv=['--subunit'])[0] + result.startTestRun() + result.stopTestRun() + self.assertEqual(b'', bytestream.getvalue()) + + def test_make_result_tag_filter(self): + stream = StringIO() + result, summary = self.make_result( + stream, filter_tags=set(['worker-0'])) + # Generate a bunch of results with tags in the same events that + # testtools generates them. + tags = set(['worker-0']) + result.startTestRun() + result.status(test_id='pass', test_status='inprogress') + result.status(test_id='pass', test_status='success', test_tags=tags) + result.status(test_id='fail', test_status='inprogress') + result.status(test_id='fail', test_status='fail', test_tags=tags) + result.status(test_id='xfail', test_status='inprogress') + result.status(test_id='xfail', test_status='xfail', test_tags=tags) + result.status(test_id='uxsuccess', test_status='inprogress') + result.status( + test_id='uxsuccess', test_status='uxsuccess', test_tags=tags) + result.status(test_id='skip', test_status='inprogress') + result.status(test_id='skip', test_status='skip', test_tags=tags) + result.stopTestRun() + self.assertEqual("""\ +====================================================================== +FAIL: fail +tags: worker-0 +---------------------------------------------------------------------- +Ran 1 tests +FAILED (id=None, failures=1, skips=1) +""", stream.getvalue()) + diff -Nru testrepository-0.0.5/testrepository/ui/cli.py testrepository-0.0.18/testrepository/ui/cli.py --- testrepository-0.0.5/testrepository/ui/cli.py 2010-12-26 18:05:22.000000000 +0000 +++ testrepository-0.0.18/testrepository/ui/cli.py 2013-04-13 16:32:47.000000000 +0000 @@ -1,11 +1,11 @@ # # Copyright (c) 2009 Testrepository Contributors -# +# # Licensed under either the Apache License, Version 2.0 or the BSD 3-clause # license at the users choice. A copy of both licenses are available in the # project source as Apache-2.0 and BSD. You may not use this file except in # compliance with one of these two licences. -# +# # Unless required by applicable law or agreed to in writing, software # distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -14,39 +14,68 @@ """A command line UI for testrepository.""" -from optparse import OptionParser +import io import os import signal +import subunit import sys +from extras import try_import +v2_avail = try_import('subunit.ByteStreamToStreamResult') +import testtools +from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator +from testtools.compat import unicode_output_stream, _u + from testrepository import ui +from testrepository.commands import get_command_parser class CLITestResult(ui.BaseUITestResult): """A TestResult for the CLI.""" - def __init__(self, ui, get_id, stream): - """Construct a CLITestResult writing to stream.""" - super(CLITestResult, self).__init__(ui, get_id) - self.stream = stream - self.sep1 = u'=' * 70 + '\n' - self.sep2 = u'-' * 70 + '\n' - - def _format_error(self, label, test, error_text): - return u''.join([ + def __init__(self, ui, get_id, stream, previous_run=None, filter_tags=None): + """Construct a CLITestResult writing to stream. + + :param filter_tags: Tags that should be used to filter tests out. When + a tag in this set is present on a test outcome, the test is not + counted towards the test run count. If the test errors, then it is + still counted and the error is still shown. + """ + super(CLITestResult, self).__init__(ui, get_id, previous_run) + self.stream = unicode_output_stream(stream) + self.sep1 = _u('=' * 70 + '\n') + self.sep2 = _u('-' * 70 + '\n') + self.filter_tags = filter_tags or frozenset() + self.filterable_states = set(['success', 'uxsuccess', 'xfail', 'skip']) + + def _format_error(self, label, test, error_text, test_tags=None): + test_tags = test_tags or () + tags = _u(' ').join(test_tags) + if tags: + tags = _u('tags: %s\n') % tags + return _u('').join([ self.sep1, - u'%s: %s\n' % (label, test.id()), + _u('%s: %s\n') % (label, test.id()), + tags, self.sep2, error_text, ]) - def addError(self, test, err=None, details=None): - super(CLITestResult, self).addError(test, err=err, details=details) - self.stream.write(self._format_error(u'ERROR', *(self.errors[-1]))) - - def addFailure(self, test, err=None, details=None): - super(CLITestResult, self).addFailure(test, err=err, details=details) - self.stream.write(self._format_error(u'FAIL', *(self.failures[-1]))) + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + super(CLITestResult, self).status(test_id=test_id, + test_status=test_status, test_tags=test_tags, runnable=runnable, + file_name=file_name, file_bytes=file_bytes, eof=eof, + mime_type=mime_type, route_code=route_code, timestamp=timestamp) + if test_status == 'fail': + self.stream.write( + self._format_error(_u('FAIL'), *(self._summary.errors[-1]), + test_tags=test_tags)) + if test_status not in self.filterable_states: + return + if test_tags and test_tags.intersection(self.filter_tags): + self._summary.testsRun -= 1 class UI(ui.AbstractUI): @@ -66,23 +95,72 @@ self._stderr = stderr def _iter_streams(self, stream_type): - yield self._stdin + # Only the first stream declared in a command can be accepted at the + # moment - as there is only one stdin and alternate streams are not yet + # configurable in the CLI. + first_stream_type = self.cmd.input_streams[0] + if (stream_type != first_stream_type + and stream_type != first_stream_type[:-1]): + return + yield subunit.make_stream_binary(self._stdin) - def make_result(self, get_id): - return CLITestResult(self, get_id, self._stdout) + def make_result(self, get_id, test_command, previous_run=None): + if getattr(self.options, 'subunit', False): + if v2_avail: + serializer = subunit.StreamResultToBytes(self._stdout) + else: + serializer = StreamToExtendedDecorator( + subunit.TestProtocolClient(self._stdout)) + # By pass user transforms - just forward it all, + result = serializer + # and interpret everything as success. + summary = testtools.StreamSummary() + summary.startTestRun() + summary.stopTestRun() + return result, summary + else: + # Apply user defined transforms. + filter_tags = test_command.get_filter_tags() + output = CLITestResult(self, get_id, self._stdout, previous_run, + filter_tags=filter_tags) + summary = output._summary + return output, summary def output_error(self, error_tuple): - self._stderr.write(str(error_tuple[1]) + '\n') + if 'TESTR_PDB' in os.environ: + import traceback + self._stderr.write(_u('').join(traceback.format_tb(error_tuple[2]))) + self._stderr.write(_u('\n')) + # This is terrible: it is because on Python2.x pdb writes bytes to + # its pipes, and the test suite uses io.StringIO that refuse bytes. + import pdb; + if sys.version_info[0]==2: + if isinstance(self._stdout, io.StringIO): + write = self._stdout.write + def _write(text): + return write(text.decode('utf8')) + self._stdout.write = _write + p = pdb.Pdb(stdin=self._stdin, stdout=self._stdout) + p.reset() + p.interaction(None, error_tuple[2]) + error_type = str(error_tuple[1]) + # XX: Python2. + if type(error_type) is bytes: + error_type = error_type.decode('utf8') + self._stderr.write(error_type + _u('\n')) def output_rest(self, rest_string): self._stdout.write(rest_string) if not rest_string.endswith('\n'): - self._stdout.write('\n') + self._stdout.write(_u('\n')) def output_stream(self, stream): contents = stream.read(65536) + assert type(contents) is bytes, \ + "Bad stream contents %r" % type(contents) + # Outputs bytes, treat them as utf8. Probably needs fixing. while contents: - self._stdout.write(contents) + self._stdout.write(contents.decode('utf8')) contents = stream.read(65536) def output_table(self, table): @@ -121,21 +199,67 @@ outputs.append(' ') for row in contents[1:]: show_row(row) - self._stdout.write(''.join(outputs)) + self._stdout.write(_u('').join(outputs)) def output_tests(self, tests): for test in tests: - self._stdout.write(test.id()) - self._stdout.write('\n') + # On Python 2.6 id() returns bytes. + id_str = test.id() + if type(id_str) is bytes: + id_str = id_str.decode('utf8') + self._stdout.write(id_str) + self._stdout.write(_u('\n')) def output_values(self, values): outputs = [] for label, value in values: outputs.append('%s=%s' % (label, value)) - self._stdout.write('%s\n' % ', '.join(outputs)) + self._stdout.write(_u('%s\n' % ', '.join(outputs))) + + def _format_summary(self, successful, tests, tests_delta, + time, time_delta, values): + # We build the string by appending to a list of strings and then + # joining trivially at the end. Avoids expensive string concatenation. + summary = [] + a = summary.append + if tests: + a("Ran %s" % (tests,)) + if tests_delta: + a(" (%+d)" % (tests_delta,)) + a(" tests") + if time: + if not summary: + a("Ran tests") + a(" in %0.3fs" % (time,)) + if time_delta: + a(" (%+0.3fs)" % (time_delta,)) + if summary: + a("\n") + if successful: + a('PASSED') + else: + a('FAILED') + if values: + a(' (') + values_strings = [] + for name, value, delta in values: + value_str = '%s=%s' % (name, value) + if delta: + value_str += ' (%+d)' % (delta,) + values_strings.append(value_str) + a(', '.join(values_strings)) + a(')') + return _u('').join(summary) + + def output_summary(self, successful, tests, tests_delta, + time, time_delta, values): + self._stdout.write( + self._format_summary( + successful, tests, tests_delta, time, time_delta, values)) + self._stdout.write(_u('\n')) def _check_cmd(self): - parser = OptionParser() + parser = get_command_parser(self.cmd) parser.add_option("-d", "--here", dest="here", help="Set the directory or url that a command should run from. " "This affects all default path lookups but does not affect paths " @@ -143,9 +267,21 @@ parser.add_option("-q", "--quiet", action="store_true", default=False, help="Turn off output other than the primary output for a command " "and any errors.") - for option in self.cmd.options: - parser.add_option(option) - options, args = parser.parse_args(self._argv) + # yank out --, as optparse makes it silly hard to just preserve it. + try: + where_dashdash = self._argv.index('--') + opt_argv = self._argv[:where_dashdash] + other_args = self._argv[where_dashdash:] + except ValueError: + opt_argv = self._argv + other_args = [] + if '-h' in opt_argv or '--help' in opt_argv or '-?' in opt_argv: + self.output_rest(parser.format_help()) + # Fugly, but its what optparse does: we're just overriding the + # output path. + raise SystemExit(0) + options, args = parser.parse_args(opt_argv) + args += other_args self.here = options.here self.options = options parsed_args = {} @@ -156,12 +292,12 @@ except ValueError: exc_info = sys.exc_info() failed = True - self._stderr.write("%s\n" % str(exc_info[1])) + self._stderr.write(_u("%s\n") % str(exc_info[1])) break if not failed: self.arguments = parsed_args if args != []: - self._stderr.write("Unexpected arguments: %r\n" % args) + self._stderr.write(_u("Unexpected arguments: %r\n") % args) return not failed and args == [] def _clear_SIGPIPE(self): diff -Nru testrepository-0.0.5/testrepository/ui/decorator.py testrepository-0.0.18/testrepository/ui/decorator.py --- testrepository-0.0.5/testrepository/ui/decorator.py 2010-12-07 01:28:35.000000000 +0000 +++ testrepository-0.0.18/testrepository/ui/decorator.py 2013-02-06 08:34:17.000000000 +0000 @@ -14,7 +14,7 @@ """A decorator for UIs to allow use of additional command objects in-process.""" -from StringIO import StringIO +from io import BytesIO import optparse from testrepository import ui @@ -60,10 +60,11 @@ if getattr(stream_value, 'read', None): yield stream_value else: - yield StringIO(stream_value) + yield BytesIO(stream_value) - def make_result(self, get_id): - return self._decorated.make_result(get_id) + def make_result(self, get_id, test_command, previous_run=None): + return self._decorated.make_result( + get_id, test_command, previous_run=previous_run) def output_error(self, error_tuple): return self._decorated.output_error(error_tuple) @@ -83,6 +84,10 @@ def output_values(self, values): return self._decorated.output_values(values) + def output_summary(self, successful, tests, tests_delta, time, time_delta, values): + return self._decorated.output_summary( + successful, tests, tests_delta, time, time_delta, values) + def set_command(self, cmd): self.cmd = cmd result = True @@ -93,6 +98,8 @@ # Merge options self.options = optparse.Values() for option in dir(self._decorated.options): + if option.startswith('_'): + continue setattr(self.options, option, getattr(self._decorated.options, option)) for option, value in self._options.items(): diff -Nru testrepository-0.0.5/testrepository/ui/__init__.py testrepository-0.0.18/testrepository/ui/__init__.py --- testrepository-0.0.5/testrepository/ui/__init__.py 2010-11-11 01:58:47.000000000 +0000 +++ testrepository-0.0.18/testrepository/ui/__init__.py 2013-04-11 11:05:27.000000000 +0000 @@ -22,8 +22,10 @@ for. """ -from testtools import TestResult +from testtools import StreamResult +from testrepository.results import SummarizingResult +from testrepository.utils import timedelta_to_seconds class AbstractUI(object): """The base class for UI objects, this providers helpers and the interface. @@ -85,11 +87,19 @@ """Helper for iter_streams which subclasses should implement.""" raise NotImplementedError(self._iter_streams) - def make_result(self, get_id): - """Make a `TestResult` that can be used to display test results. + def make_result(self, get_id, test_command, previous_run=None): + """Make a `StreamResult` that can be used to display test results. + + This will also support the `TestResult` API until at least + testrepository 0.0.16 to permit clients to migrate gracefully. :param get_id: A nullary callable that returns the id of the test run when called. + :param test_command: A TestCommand object used to configure user + transforms. + :param previous_run: An optional previous test run. + :return: A two-tuple with the stream to forward events to, and a + StreamSummary for querying success after the stream is finished. """ raise NotImplementedError(self.make_result) @@ -140,6 +150,21 @@ """ raise NotImplementedError(self.output_values) + def output_summary(self, successful, tests, tests_delta, time, time_delta, values): + """Output a summary of a test run. + + An example summary might look like: + Run 565 (+2) tests in 2.968s + FAILED (errors=13 (-2), succeesses=31 (+2)) + + :param successful: A boolean indicating whether the result was + successful. + :param values: List of tuples in the form ``(name, value, delta)``. + e.g. ``('failures', 5, -1)``. ``delta`` is None means that either + the delta is unknown or inappropriate. + """ + raise NotImplementedError(self.output_summary) + def set_command(self, cmd): """Inform the UI what command it is running. @@ -164,13 +189,13 @@ raise NotImplementedError(self.subprocess_Popen) -class BaseUITestResult(TestResult): +class BaseUITestResult(StreamResult): """An abstract test result used with the UI. AbstractUI.make_result probably wants to return an object like this. """ - def __init__(self, ui, get_id): + def __init__(self, ui, get_id, previous_run=None): """Construct an `AbstractUITestResult`. :param ui: The UI this result is associated with. @@ -179,6 +204,18 @@ super(BaseUITestResult, self).__init__() self.ui = ui self.get_id = get_id + self._previous_run = previous_run + self._summary = SummarizingResult() + + def _get_previous_summary(self): + if self._previous_run is None: + return None + previous_summary = SummarizingResult() + previous_summary.startTestRun() + test = self._previous_run.get_test() + test.run(previous_summary) + previous_summary.stopTestRun() + return previous_summary def _output_summary(self, run_id): """Output a test run. @@ -187,16 +224,39 @@ """ if self.ui.options.quiet: return - values = [('id', run_id), ('tests', self.testsRun)] - failures = len(self.failures) + len(self.errors) + time = self._summary.get_time_taken() + time_delta = None + num_tests_run_delta = None + num_failures_delta = None + values = [('id', run_id, None)] + failures = self._summary.get_num_failures() + previous_summary = self._get_previous_summary() if failures: - values.append(('failures', failures)) - skips = sum(map(len, self.skip_reasons.itervalues())) + if previous_summary: + num_failures_delta = failures - previous_summary.get_num_failures() + values.append(('failures', failures, num_failures_delta)) + if previous_summary: + num_tests_run_delta = self._summary.testsRun - previous_summary.testsRun + if time: + previous_time_taken = previous_summary.get_time_taken() + if previous_time_taken: + time_delta = time - previous_time_taken + skips = len(self._summary.skipped) if skips: - values.append(('skips', skips)) - self.ui.output_values(values) + values.append(('skips', skips, None)) + self.ui.output_summary( + not bool(failures), self._summary.testsRun, num_tests_run_delta, + time, time_delta, values) + + def startTestRun(self): + super(BaseUITestResult, self).startTestRun() + self._summary.startTestRun() def stopTestRun(self): super(BaseUITestResult, self).stopTestRun() run_id = self.get_id() + self._summary.stopTestRun() self._output_summary(run_id) + + def status(self, *args, **kwargs): + self._summary.status(*args, **kwargs) diff -Nru testrepository-0.0.5/testrepository/ui/model.py testrepository-0.0.18/testrepository/ui/model.py --- testrepository-0.0.5/testrepository/ui/model.py 2010-12-06 06:09:54.000000000 +0000 +++ testrepository-0.0.18/testrepository/ui/model.py 2013-04-11 11:14:26.000000000 +0000 @@ -14,9 +14,11 @@ """Am object based UI for testrepository.""" -from cStringIO import StringIO +from io import BytesIO import optparse +import testtools + from testrepository import ui @@ -26,13 +28,16 @@ def __init__(self, ui): self.ui = ui self.returncode = 0 - self.stdin = StringIO() - self.stdout = StringIO() + self.stdin = BytesIO() + self.stdout = BytesIO() def communicate(self): self.ui.outputs.append(('communicate',)) return self.stdout.getvalue(), '' + def wait(self): + return self.returncode + class TestSuiteModel(object): @@ -49,24 +54,18 @@ class TestResultModel(ui.BaseUITestResult): - def __init__(self, ui, get_id): - super(TestResultModel, self).__init__(ui, get_id) + def __init__(self, ui, get_id, previous_run=None): + super(TestResultModel, self).__init__(ui, get_id, previous_run) self._suite = TestSuiteModel() - def startTest(self, test): - super(TestResultModel, self).startTest(test) - self._suite.recordResult('startTest', test) - - def stopTest(self, test): - self._suite.recordResult('stopTest', test) - - def addError(self, test, *args): - super(TestResultModel, self).addError(test, *args) - self._suite.recordResult('addError', test, *args) - - def addFailure(self, test, *args): - super(TestResultModel, self).addFailure(test, *args) - self._suite.recordResult('addFailure', test, *args) + def status(self, test_id=None, test_status=None, test_tags=None, + runnable=True, file_name=None, file_bytes=None, eof=False, + mime_type=None, route_code=None, timestamp=None): + super(TestResultModel, self).status(test_id=test_id, + test_status=test_status, test_tags=test_tags, runnable=runnable, + file_name=file_name, file_bytes=file_bytes, eof=eof, + mime_type=mime_type, route_code=route_code, timestamp=timestamp) + self._suite.recordResult('status', test_id, test_status) def stopTestRun(self): if self.ui.options.quiet: @@ -83,8 +82,8 @@ testing testrepository commands. """ - def __init__(self, input_streams=None, options=(), args={}, - here='memory:', proc_outputs=()): + def __init__(self, input_streams=None, options=(), args=(), + here='memory:', proc_outputs=(), proc_results=()): """Create a model UI. :param input_streams: A list of stream name, (file or bytes) tuples to @@ -94,10 +93,14 @@ :param here: Set the here value for the UI. :param proc_outputs: byte strings to be returned in the stdout from created processes. + :param proc_results: numeric exit code to be set in each created + process. """ self.input_streams = {} if input_streams: for stream_type, stream_value in input_streams: + if isinstance(stream_value, str) and str is not bytes: + raise Exception('bad stream_value') self.input_streams.setdefault(stream_type, []).append( stream_value) self.here = here @@ -106,6 +109,8 @@ # Could take parsed args, but for now this is easier. self.unparsed_args = args self.proc_outputs = list(proc_outputs) + self.require_proc_stdout = False + self.proc_results = list(proc_results) def _check_cmd(self): options = list(self.unparsed_opts) @@ -137,10 +142,11 @@ if getattr(stream_value, 'read', None): yield stream_value else: - yield StringIO(stream_value) + yield BytesIO(stream_value) - def make_result(self, get_id): - return TestResultModel(self, get_id) + def make_result(self, get_id, test_command, previous_run=None): + result = TestResultModel(self, get_id, previous_run) + return result, result._summary def output_error(self, error_tuple): self.outputs.append(('error', error_tuple)) @@ -161,10 +167,18 @@ def output_values(self, values): self.outputs.append(('values', values)) + def output_summary(self, successful, tests, tests_delta, time, time_delta, values): + self.outputs.append( + ('summary', successful, tests, tests_delta, time, time_delta, values)) + def subprocess_Popen(self, *args, **kwargs): # Really not an output - outputs should be renamed to events. self.outputs.append(('popen', args, kwargs)) result = ProcessModel(self) if self.proc_outputs: - result.stdout = StringIO(self.proc_outputs.pop(0)) + result.stdout = BytesIO(self.proc_outputs.pop(0)) + elif self.require_proc_stdout: + raise Exception("No process output available") + if self.proc_results: + result.returncode = self.proc_results.pop(0) return result diff -Nru testrepository-0.0.5/testrepository/utils.py testrepository-0.0.18/testrepository/utils.py --- testrepository-0.0.5/testrepository/utils.py 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository/utils.py 2012-01-10 13:30:21.000000000 +0000 @@ -0,0 +1,7 @@ + +def timedelta_to_seconds(delta): + """Return the number of seconds that make up the duration of a timedelta. + """ + return ( + (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) + / float(10**6)) diff -Nru testrepository-0.0.5/testrepository.egg-info/dependency_links.txt testrepository-0.0.18/testrepository.egg-info/dependency_links.txt --- testrepository-0.0.5/testrepository.egg-info/dependency_links.txt 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository.egg-info/dependency_links.txt 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1 @@ + diff -Nru testrepository-0.0.5/testrepository.egg-info/entry_points.txt testrepository-0.0.18/testrepository.egg-info/entry_points.txt --- testrepository-0.0.5/testrepository.egg-info/entry_points.txt 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository.egg-info/entry_points.txt 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1,3 @@ +[distutils.commands] +testr = testrepository.setuptools_command:Testr + diff -Nru testrepository-0.0.5/testrepository.egg-info/PKG-INFO testrepository-0.0.18/testrepository.egg-info/PKG-INFO --- testrepository-0.0.5/testrepository.egg-info/PKG-INFO 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository.egg-info/PKG-INFO 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1,75 @@ +Metadata-Version: 1.1 +Name: testrepository +Version: 0.0.18 +Summary: A repository of test results. +Home-page: https://launchpad.net/testrepository +Author: Robert Collins +Author-email: robertc@robertcollins.net +License: UNKNOWN +Description: Test Repository + +++++++++++++++ + + Overview + ~~~~~~~~ + + This project provides a database of test results which can be used as part of + developer workflow to ensure/check things like: + + * No commits without having had a test failure, test fixed cycle. + * No commits without new tests being added. + * What tests have failed since the last commit (to run just a subset). + * What tests are currently failing and need work. + + Test results are inserted using subunit (and thus anything that can output + subunit or be converted into a subunit stream can be accepted). + + A mailing list for discussion, usage and development is at + https://launchpad.net/~testrepository-dev - all are welcome to join. Some folk + hang out on #testrepository on irc.freenode.net. + + CI for the project is at http://build.robertcollins.net/job/testrepository-default/. + + Licensing + ~~~~~~~~~ + + Test Repository is under BSD / Apache 2.0 licences. See the file COPYING in the source for details. + + Quick Start + ~~~~~~~~~~~ + + Create a config file:: + $ touch .testr.conf + + Create a repository:: + $ testr init + + Load a test run into the repository:: + $ testr load < testrun + + Query the repository:: + $ testr stats + $ testr last + $ testr failing + + Delete a repository:: + $ rm -rf .testrepository + + Documentation + ~~~~~~~~~~~~~ + + More detailed documentation including design and implementation details, a + user manual, and guidelines for development of Test Repository itself can be + found at https://testrepository.readthedocs.org/en/latest, or in the source + tree at doc/ (run make -C doc html). + +Keywords: subunit unittest testrunner +Platform: UNKNOWN +Classifier: Development Status :: 6 - Mature +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Quality Assurance +Classifier: Topic :: Software Development :: Testing diff -Nru testrepository-0.0.5/testrepository.egg-info/requires.txt testrepository-0.0.18/testrepository.egg-info/requires.txt --- testrepository-0.0.5/testrepository.egg-info/requires.txt 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository.egg-info/requires.txt 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1,9 @@ +fixtures +python-subunit >= 0.0.10 +testtools >= 0.9.30 + +[test] +bzr +pytz +testresources +testscenarios \ No newline at end of file diff -Nru testrepository-0.0.5/testrepository.egg-info/SOURCES.txt testrepository-0.0.18/testrepository.egg-info/SOURCES.txt --- testrepository-0.0.5/testrepository.egg-info/SOURCES.txt 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository.egg-info/SOURCES.txt 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1,89 @@ +.bzrignore +.testr.conf +Apache-2.0 +BSD +COPYING +INSTALL.txt +MANIFEST.in +Makefile +NEWS +README.txt +setup.py +testr +doc/DESIGN.txt +doc/DEVELOPERS.txt +doc/MANUAL.txt +doc/index.txt +testrepository/__init__.py +testrepository/results.py +testrepository/setuptools_command.py +testrepository/testcommand.py +testrepository/testlist.py +testrepository/utils.py +testrepository.egg-info/PKG-INFO +testrepository.egg-info/SOURCES.txt +testrepository.egg-info/dependency_links.txt +testrepository.egg-info/entry_points.txt +testrepository.egg-info/requires.txt +testrepository.egg-info/top_level.txt +testrepository/arguments/__init__.py +testrepository/arguments/command.py +testrepository/arguments/doubledash.py +testrepository/arguments/path.py +testrepository/arguments/string.py +testrepository/commands/__init__.py +testrepository/commands/commands.py +testrepository/commands/failing.py +testrepository/commands/help.py +testrepository/commands/init.py +testrepository/commands/last.py +testrepository/commands/list_tests.py +testrepository/commands/load.py +testrepository/commands/quickstart.py +testrepository/commands/run.py +testrepository/commands/slowest.py +testrepository/commands/stats.py +testrepository/repository/__init__.py +testrepository/repository/file.py +testrepository/repository/memory.py +testrepository/repository/samba_buildfarm.py +testrepository/tests/__init__.py +testrepository/tests/monkeypatch.py +testrepository/tests/stubpackage.py +testrepository/tests/test_arguments.py +testrepository/tests/test_commands.py +testrepository/tests/test_matchers.py +testrepository/tests/test_monkeypatch.py +testrepository/tests/test_repository.py +testrepository/tests/test_results.py +testrepository/tests/test_setup.py +testrepository/tests/test_stubpackage.py +testrepository/tests/test_testcommand.py +testrepository/tests/test_testr.py +testrepository/tests/test_ui.py +testrepository/tests/arguments/__init__.py +testrepository/tests/arguments/test_command.py +testrepository/tests/arguments/test_doubledash.py +testrepository/tests/arguments/test_path.py +testrepository/tests/arguments/test_string.py +testrepository/tests/commands/__init__.py +testrepository/tests/commands/test_commands.py +testrepository/tests/commands/test_failing.py +testrepository/tests/commands/test_help.py +testrepository/tests/commands/test_init.py +testrepository/tests/commands/test_last.py +testrepository/tests/commands/test_list_tests.py +testrepository/tests/commands/test_load.py +testrepository/tests/commands/test_quickstart.py +testrepository/tests/commands/test_run.py +testrepository/tests/commands/test_slowest.py +testrepository/tests/commands/test_stats.py +testrepository/tests/repository/__init__.py +testrepository/tests/repository/test_file.py +testrepository/tests/ui/__init__.py +testrepository/tests/ui/test_cli.py +testrepository/tests/ui/test_decorator.py +testrepository/ui/__init__.py +testrepository/ui/cli.py +testrepository/ui/decorator.py +testrepository/ui/model.py \ No newline at end of file diff -Nru testrepository-0.0.5/testrepository.egg-info/top_level.txt testrepository-0.0.18/testrepository.egg-info/top_level.txt --- testrepository-0.0.5/testrepository.egg-info/top_level.txt 1970-01-01 00:00:00.000000000 +0000 +++ testrepository-0.0.18/testrepository.egg-info/top_level.txt 2013-11-05 14:40:31.000000000 +0000 @@ -0,0 +1 @@ +testrepository