Author: mattip <matti.pi...@gmail.com>
Branch: ufuncapi
Changeset: r73729:2cfa83babbb9
Date: 2014-09-28 00:25 +0300
http://bitbucket.org/pypy/pypy/changeset/2cfa83babbb9/

Log:    merge default into branch

diff too long, truncating to 2000 out of 67964 lines

diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -35,280 +35,290 @@
 the beginning of each file) the files in the 'pypy' directory are each
 copyrighted by one or more of the following people and organizations:    
 
-    Armin Rigo
-    Maciej Fijalkowski
-    Carl Friedrich Bolz
-    Antonio Cuni
-    Amaury Forgeot d'Arc
-    Samuele Pedroni
-    Alex Gaynor
-    Michael Hudson
-    David Schneider
-    Matti Picus
-    Brian Kearns
-    Philip Jenvey
-    Holger Krekel
-    Christian Tismer
-    Hakan Ardo
-    Benjamin Peterson
-    Manuel Jacob
-    Anders Chrigstrom
-    Eric van Riet Paap
-    Wim Lavrijsen
-    Ronan Lamy
-    Richard Emslie
-    Alexander Schremmer
-    Dan Villiom Podlaski Christiansen
-    Lukas Diekmann
-    Sven Hager
-    Anders Lehmann
-    Aurelien Campeas
-    Niklaus Haldimann
-    Camillo Bruni
-    Laura Creighton
-    Toon Verwaest
-    Remi Meier
-    Leonardo Santagada
-    Seo Sanghyeon
-    Romain Guillebert
-    Justin Peel
-    Ronny Pfannschmidt
-    David Edelsohn
-    Anders Hammarquist
-    Jakub Gustak
-    Guido Wesdorp
-    Lawrence Oluyede
-    Bartosz Skowron
-    Daniel Roberts
-    Niko Matsakis
-    Adrien Di Mascio
-    Alexander Hesse
-    Ludovic Aubry
-    Jacob Hallen
-    Jason Creighton
-    Alex Martelli
-    Michal Bendowski
-    Jan de Mooij
-    stian
-    Michael Foord
-    Stephan Diehl
-    Stefan Schwarzer
-    Valentino Volonghi
-    Tomek Meka
-    Patrick Maupin
-    Bob Ippolito
-    Bruno Gola
-    Jean-Paul Calderone
-    Timo Paulssen
-    Squeaky
-    Alexandre Fayolle
-    Simon Burton
-    Marius Gedminas
-    John Witulski
-    Konstantin Lopuhin
-    Greg Price
-    Dario Bertini
-    Mark Pearse
-    Simon Cross
-    Andreas St&#252;hrk
-    Jean-Philippe St. Pierre
-    Guido van Rossum
-    Pavel Vinogradov
-    Pawe&#322; Piotr Przeradowski
-    Paul deGrandis
-    Ilya Osadchiy
-    Tobias Oberstein
-    Adrian Kuhn
-    Boris Feigin
-    Stefano Rivera
-    tav
-    Taavi Burns
-    Georg Brandl
-    Bert Freudenberg
-    Stian Andreassen
-    Laurence Tratt
-    Wanja Saatkamp
-    Ivan Sichmann Freitas
-    Gerald Klix
-    Mike Blume
-    Oscar Nierstrasz
-    Stefan H. Muller
-    Jeremy Thurgood
-    Gregor Wegberg
-    Rami Chowdhury
-    Tobias Pape
-    Edd Barrett
-    David Malcolm
-    Eugene Oden
-    Henry Mason
-    Preston Timmons
-    Jeff Terrace
-    David Ripton
-    Dusty Phillips
-    Lukas Renggli
-    Guenter Jantzen
-    Ned Batchelder
-    Amit Regmi
-    Ben Young
-    Nicolas Chauvat
-    Andrew Durdin
-    Andrew Chambers
-    Michael Schneider
-    Nicholas Riley
-    Jason Chu
-    Igor Trindade Oliveira
-    Rocco Moretti
-    Gintautas Miliauskas
-    Michael Twomey
-    Lucian Branescu Mihaila
-    Tim Felgentreff
-    Tyler Wade
-    Gabriel Lavoie
-    Olivier Dormond
-    Jared Grubb
-    Karl Bartel
-    Brian Dorsey
-    Victor Stinner
-    Andrews Medina
-    Stuart Williams
-    Jasper Schulz
-    Christian Hudon
-    Toby Watson
-    Antoine Pitrou
-    Aaron Iles
-    Michael Cheng
-    Justas Sadzevicius
-    Mikael Sch&#246;nenberg
-    Gasper Zejn
-    Neil Shepperd
-    Elmo M&#228;ntynen
-    Jonathan David Riehl
-    Stanislaw Halik
-    Anders Qvist
-    Chirag Jadwani
-    Beatrice During
-    Alex Perry
-    Vincent Legoll
-    Alan McIntyre
-    Alexander Sedov
-    Corbin Simpson
-    Christopher Pope
-    wenzhuman
-    Christian Tismer 
-    Marc Abramowitz
-    Dan Stromberg
-    Stefano Parmesan
-    Alexis Daboville
-    Jens-Uwe Mager
-    Carl Meyer
-    Karl Ramm
-    Pieter Zieschang
-    Gabriel
-    Lukas Vacek
-    Andrew Dalke
-    Sylvain Thenault
-    Nathan Taylor
-    Vladimir Kryachko
-    Jacek Generowicz
-    Alejandro J. Cura
-    Jacob Oscarson
-    Travis Francis Athougies
-    Ryan Gonzalez
-    Kristjan Valur Jonsson
-    Sebastian Pawlu&#347;
-    Neil Blakey-Milner
-    anatoly techtonik
-    Lutz Paelike
-    Lucio Torre
-    Lars Wassermann
-    Henrik Vendelbo
-    Dan Buch
-    Miguel de Val Borro
-    Artur Lisiecki
-    Sergey Kishchenko
-    Ignas Mikalajunas
-    Christoph Gerum
-    Martin Blais
-    Lene Wagner
-    Tomo Cocoa
-    roberto@goyle
-    Yury V. Zaytsev
-    Anna Katrina Dominguez
-    William Leslie
-    Bobby Impollonia
-    t...@eistee.fritz.box
-    Andrew Thompson
-    Ben Darnell
-    Roberto De Ioris
-    Juan Francisco Cantero Hurtado
-    Godefroid Chappelle
-    Joshua Gilbert
-    Dan Colish
-    Christopher Armstrong
-    Michael Hudson-Doyle
-    Anders Sigfridsson
-    Yasir Suhail
-    rafalgalczyn...@gmail.com
-    Floris Bruynooghe
-    Laurens Van Houtven
-    Akira Li
-    Gustavo Niemeyer
-    Stephan Busemann
-    Rafa&#322; Ga&#322;czy&#324;ski
-    Yusei Tahara
-    Christian Muirhead
-    James Lan
-    shoma hosaka
-    Daniel Neuh?user
-    Matthew Miller
-    Buck Golemon
-    Konrad Delong
-    Dinu Gherman
-    Chris Lambacher
-    coolbutusel...@gmail.com
-    Rodrigo Ara&#250;jo
-    w31rd0
-    Jim Baker
-    James Robert
-    Armin Ronacher
-    Brett Cannon
-    yrttyr
-    aliceinwire
-    OlivierBlanvillain
-    Zooko Wilcox-O Hearn
-    Tomer Chachamu
-    Christopher Groskopf
-    Asmo Soinio
-    Stefan Marr
-    jiaaro
-    opassembler.py
-    Antony Lee
-    Jim Hunziker
-    Markus Unterwaditzer
-    Even Wiik Thomassen
-    jbs
-    soareschen
-    Kurt Griffiths
-    Mike Bayer
-    Flavio Percoco
-    Kristoffer Kleine
-    yasirs
-    Michael Chermside
-    Anna Ravencroft
-    Julien Phalip
-    Dan Loewenherz
+  Armin Rigo
+  Maciej Fijalkowski
+  Carl Friedrich Bolz
+  Antonio Cuni
+  Amaury Forgeot d'Arc
+  Samuele Pedroni
+  Alex Gaynor
+  Michael Hudson
+  David Schneider
+  Matti Picus
+  Brian Kearns
+  Philip Jenvey
+  Holger Krekel
+  Christian Tismer
+  Hakan Ardo
+  Benjamin Peterson
+  Manuel Jacob
+  Anders Chrigstrom
+  Eric van Riet Paap
+  Ronan Lamy
+  Wim Lavrijsen
+  Richard Emslie
+  Alexander Schremmer
+  Dan Villiom Podlaski Christiansen
+  Lukas Diekmann
+  Sven Hager
+  Anders Lehmann
+  Aurelien Campeas
+  Niklaus Haldimann
+  Remi Meier
+  Camillo Bruni
+  Laura Creighton
+  Toon Verwaest
+  Leonardo Santagada
+  Seo Sanghyeon
+  Romain Guillebert
+  Justin Peel
+  Ronny Pfannschmidt
+  David Edelsohn
+  Anders Hammarquist
+  Jakub Gustak
+  Guido Wesdorp
+  Lawrence Oluyede
+  Bartosz Skowron
+  Gregor Wegberg
+  Daniel Roberts
+  Niko Matsakis
+  Adrien Di Mascio
+  Alexander Hesse
+  Ludovic Aubry
+  Jacob Hallen
+  Jason Creighton
+  Alex Martelli
+  Michal Bendowski
+  Jan de Mooij
+  stian
+  Michael Foord
+  Stephan Diehl
+  Tyler Wade
+  Stefan Schwarzer
+  Valentino Volonghi
+  Tomek Meka
+  Patrick Maupin
+  Bob Ippolito
+  Bruno Gola
+  Jean-Paul Calderone
+  Timo Paulssen
+  Squeaky
+  Alexandre Fayolle
+  Simon Burton
+  Marius Gedminas
+  Martin Matusiak
+  Konstantin Lopuhin
+  John Witulski
+  Wenzhu Man
+  Greg Price
+  Dario Bertini
+  Mark Pearse
+  Simon Cross
+  Ivan Sichmann Freitas
+  Andreas St&#252;hrk
+  Jean-Philippe St. Pierre
+  Guido van Rossum
+  Pavel Vinogradov
+  Stefano Rivera
+  Pawe&#322; Piotr Przeradowski
+  Paul deGrandis
+  Ilya Osadchiy
+  Tobias Oberstein
+  Adrian Kuhn
+  Boris Feigin
+  tav
+  Taavi Burns
+  Georg Brandl
+  Laurence Tratt
+  Bert Freudenberg
+  Stian Andreassen
+  Wanja Saatkamp
+  Gerald Klix
+  Mike Blume
+  Oscar Nierstrasz
+  Stefan H. Muller
+  Edd Barrett
+  Jeremy Thurgood
+  Rami Chowdhury
+  Tobias Pape
+  David Malcolm
+  Eugene Oden
+  Henry Mason
+  Vasily Kuznetsov
+  Preston Timmons
+  Jeff Terrace
+  David Ripton
+  Dusty Phillips
+  Lukas Renggli
+  Guenter Jantzen
+  Ned Batchelder
+  Amit Regmi
+  Ben Young
+  Nicolas Chauvat
+  Andrew Durdin
+  Andrew Chambers
+  Michael Schneider
+  Nicholas Riley
+  Jason Chu
+  Igor Trindade Oliveira
+  Tim Felgentreff
+  Rocco Moretti
+  Gintautas Miliauskas
+  Michael Twomey
+  Lucian Branescu Mihaila
+  Gabriel Lavoie
+  Olivier Dormond
+  Jared Grubb
+  Karl Bartel
+  Brian Dorsey
+  Victor Stinner
+  Andrews Medina
+  Stuart Williams
+  Jasper Schulz
+  Christian Hudon
+  Toby Watson
+  Antoine Pitrou
+  Aaron Iles
+  Michael Cheng
+  Justas Sadzevicius
+  Gasper Zejn
+  anatoly techtonik
+  Neil Shepperd
+  Mikael Sch&#246;nenberg
+  Elmo M?ntynen
+  Jonathan David Riehl
+  Stanislaw Halik
+  Anders Qvist
+  Corbin Simpson
+  Chirag Jadwani
+  Beatrice During
+  Alex Perry
+  Vincent Legoll
+  Alan McIntyre
+  Alexander Sedov
+  Christopher Pope
+  Christian Tismer 
+  Marc Abramowitz
+  Dan Stromberg
+  Stefano Parmesan
+  Alexis Daboville
+  Jens-Uwe Mager
+  Carl Meyer
+  Karl Ramm
+  Pieter Zieschang
+  Sebastian Pawlu&#347;
+  Gabriel
+  Lukas Vacek
+  Andrew Dalke
+  Sylvain Thenault
+  Nathan Taylor
+  Vladimir Kryachko
+  Arjun Naik
+  Attila Gobi
+  Jacek Generowicz
+  Alejandro J. Cura
+  Jacob Oscarson
+  Travis Francis Athougies
+  Ryan Gonzalez
+  Ian Foote
+  Kristjan Valur Jonsson
+  Neil Blakey-Milner
+  Lutz Paelike
+  Lucio Torre
+  Lars Wassermann
+  Valentina Mukhamedzhanova
+  Henrik Vendelbo
+  Dan Buch
+  Miguel de Val Borro
+  Artur Lisiecki
+  Sergey Kishchenko
+  Yichao Yu
+  Ignas Mikalajunas
+  Christoph Gerum
+  Martin Blais
+  Lene Wagner
+  Tomo Cocoa
+  roberto@goyle
+  Yury V. Zaytsev
+  Anna Katrina Dominguez
+  William Leslie
+  Bobby Impollonia
+  t...@eistee.fritz.box
+  Andrew Thompson
+  Yusei Tahara
+  Ben Darnell
+  Roberto De Ioris
+  Juan Francisco Cantero Hurtado
+  Godefroid Chappelle
+  Joshua Gilbert
+  Dan Colish
+  Christopher Armstrong
+  Michael Hudson-Doyle
+  Anders Sigfridsson
+  Yasir Suhail
+  Jason Michalski
+  rafalgalczyn...@gmail.com
+  Floris Bruynooghe
+  Laurens Van Houtven
+  Akira Li
+  Gustavo Niemeyer
+  Stephan Busemann
+  Rafa&#322; Ga&#322;czy&#324;ski
+  Christian Muirhead
+  James Lan
+  shoma hosaka
+  Daniel Neuh?user
+  Matthew Miller
+  Buck Golemon
+  Konrad Delong
+  Dinu Gherman
+  Chris Lambacher
+  coolbutusel...@gmail.com
+  Rodrigo Ara&#250;jo
+  Jim Baker
+  James Robert
+  Armin Ronacher
+  Brett Cannon
+  yrttyr
+  aliceinwire
+  OlivierBlanvillain
+  Zooko Wilcox-O Hearn
+  Tomer Chachamu
+  Christopher Groskopf
+  Asmo Soinio
+  Stefan Marr
+  jiaaro
+  Mads Kiilerich
+  opassembler.py
+  Antony Lee
+  Jim Hunziker
+  Markus Unterwaditzer
+  Even Wiik Thomassen
+  jbs
+  soareschen
+  Kurt Griffiths
+  Mike Bayer
+  Matthew Miller
+  Flavio Percoco
+  Kristoffer Kleine
+  yasirs
+  Michael Chermside
+  Anna Ravencroft
+  Dan Crosta
+  Julien Phalip
+  Dan Loewenherz
 
-    Heinrich-Heine University, Germany 
-    Open End AB (formerly AB Strakt), Sweden
-    merlinux GmbH, Germany 
-    tismerysoft GmbH, Germany 
-    Logilab Paris, France 
-    DFKI GmbH, Germany 
-    Impara, Germany
-    Change Maker, Sweden 
-    University of California Berkeley, USA
-    Google Inc.
-    King's College London
+  Heinrich-Heine University, Germany 
+  Open End AB (formerly AB Strakt), Sweden
+  merlinux GmbH, Germany 
+  tismerysoft GmbH, Germany 
+  Logilab Paris, France 
+  DFKI GmbH, Germany 
+  Impara, Germany
+  Change Maker, Sweden 
+  University of California Berkeley, USA
+  Google Inc.
+  King's College London
 
 The PyPy Logo as used by http://speed.pypy.org and others was created
 by Samuel Reis and is distributed on terms of Creative Commons Share Alike
@@ -354,6 +364,6 @@
 See the License for the specific language governing permissions and
 limitations under the License.
 
-Detailled license information is contained in the NOTICE file in the
+Detailed license information is contained in the NOTICE file in the
 directory.
 
diff --git a/_pytest/README-BEFORE-UPDATING b/_pytest/README-BEFORE-UPDATING
new file mode 100644
--- /dev/null
+++ b/_pytest/README-BEFORE-UPDATING
@@ -0,0 +1,17 @@
+This is PyPy's code of the pytest lib.  We don't expect to upgrade it
+very often, but once we do:
+
+    WARNING!
+
+    WE HAVE MADE A FEW TWEAKS HERE!
+
+Please be sure that you don't just copy the newer version from
+upstream without checking the few changes that we did.  This
+can be done like this:
+
+    cd <this directory>
+    hg log . -v | less
+
+then search for all " _pytest/" in that list to know which are the
+relevant checkins.  (Look for the checkins that only edit one
+or two files in this directory.)
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.2.4.dev2'
+__version__ = '2.5.2'
diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py
new file mode 100644
--- /dev/null
+++ b/_pytest/_argcomplete.py
@@ -0,0 +1,104 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn<TAB>"
+instead of the default "dirname ":
+
+   optparser.add_argument(Config._file_or_dir, nargs='*'
+                               ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+  # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked  with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+    # PYTHON_ARGCOMPLETE_OK
+  near the top of the main python entry point
+- include in the file calling parse_args():
+    from _argcomplete import try_argcomplete, filescompleter
+   , call try_argcomplete just before parse_args(), and optionally add
+   filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+  completers):
+    export _ARC_DEBUG=1
+- run:
+    python-argcomplete-check-easy-install-script $(which appname)
+    echo $?
+  will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+    _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+  which should throw a KeyError: 'COMPLINE' (which is properly set by the
+  global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+    'Fast file completer class'
+    def __init__(self, directories=True):
+        self.directories = directories
+
+    def __call__(self, prefix, **kwargs):
+        """only called on non option completions"""
+        if os.path.sep in prefix[1:]: #
+            prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+        else:
+            prefix_dir = 0
+        completion = []
+        globbed = []
+        if '*' not in prefix and '?' not in prefix:
+            if prefix[-1] == os.path.sep:  # we are on unix, otherwise no bash
+                globbed.extend(glob(prefix + '.*'))
+            prefix += '*'
+        globbed.extend(glob(prefix))
+        for x in sorted(globbed):
+            if os.path.isdir(x):
+                x += '/'
+            # append stripping the prefix (like bash, not like compgen)
+            completion.append(x[prefix_dir:])
+        return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+    # argcomplete 0.5.6 is not compatible with python 2.5.6: print/with/format
+    if sys.version_info[:2] < (2, 6):
+        sys.exit(1)
+    try:
+        import argcomplete.completers
+    except ImportError:
+        sys.exit(-1)
+    filescompleter = FastFilesCompleter()
+
+    def try_argcomplete(parser):
+        argcomplete.autocomplete(parser)
+else:
+    def try_argcomplete(parser): pass
+    filescompleter = None
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -3,7 +3,6 @@
 """
 import py
 import sys
-import pytest
 from _pytest.monkeypatch import monkeypatch
 from _pytest.assertion import util
 
@@ -19,8 +18,8 @@
 to provide assert expression information. """)
     group.addoption('--no-assert', action="store_true", default=False,
         dest="noassert", help="DEPRECATED equivalent to --assert=plain")
-    group.addoption('--nomagic', action="store_true", default=False,
-        dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
+    group.addoption('--nomagic', '--no-magic', action="store_true",
+        default=False, help="DEPRECATED equivalent to --assert=plain")
 
 class AssertionState:
     """State for the assertion plugin."""
@@ -35,22 +34,25 @@
         mode = "plain"
     if mode == "rewrite":
         try:
-            import ast
+            import ast  # noqa
         except ImportError:
             mode = "reinterp"
         else:
-            if sys.platform.startswith('java'):
+            # Both Jython and CPython 2.6.0 have AST bugs that make the
+            # assertion rewriting hook malfunction.
+            if (sys.platform.startswith('java') or
+                sys.version_info[:3] == (2, 6, 0)):
                 mode = "reinterp"
     if mode != "plain":
         _load_modules(mode)
         m = monkeypatch()
         config._cleanup.append(m.undo)
         m.setattr(py.builtin.builtins, 'AssertionError',
-                  reinterpret.AssertionError)
+                  reinterpret.AssertionError)  # noqa
     hook = None
     if mode == "rewrite":
-        hook = rewrite.AssertionRewritingHook()
-        sys.meta_path.append(hook)
+        hook = rewrite.AssertionRewritingHook()  # noqa
+        sys.meta_path.insert(0, hook)
     warn_about_missing_assertion(mode)
     config._assertstate = AssertionState(config, mode)
     config._assertstate.hook = hook
@@ -73,9 +75,16 @@
     def callbinrepr(op, left, right):
         hook_result = item.ihook.pytest_assertrepr_compare(
             config=item.config, op=op, left=left, right=right)
+
         for new_expl in hook_result:
             if new_expl:
-                res = '\n~'.join(new_expl)
+                # Don't include pageloads of data unless we are very
+                # verbose (-vv)
+                if (sum(len(p) for p in new_expl[1:]) > 80*8
+                        and item.config.option.verbose < 2):
+                    new_expl[1:] = [py.builtin._totext(
+                        'Detailed information truncated, use "-vv" to show')]
+                res = py.builtin._totext('\n~').join(new_expl)
                 if item.config.getvalue("assertmode") == "rewrite":
                     # The result will be fed back a python % formatting
                     # operation, which will fail if there are extraneous
@@ -95,9 +104,9 @@
 def _load_modules(mode):
     """Lazily import assertion related code."""
     global rewrite, reinterpret
-    from _pytest.assertion import reinterpret
+    from _pytest.assertion import reinterpret  # noqa
     if mode == "rewrite":
-        from _pytest.assertion import rewrite
+        from _pytest.assertion import rewrite  # noqa
 
 def warn_about_missing_assertion(mode):
     try:
diff --git a/_pytest/assertion/newinterpret.py 
b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -11,7 +11,7 @@
 from _pytest.assertion.reinterpret import BuiltinAssertionError
 
 
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+if sys.platform.startswith("java"):
     # See http://bugs.jython.org/issue1497
     _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
               "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
diff --git a/_pytest/assertion/oldinterpret.py 
b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -526,10 +526,13 @@
     # example:
     def f():
         return 5
+
     def g():
         return 3
+
     def h(x):
         return 'never'
+
     check("f() * g() == 5")
     check("not f()")
     check("not (f() and g() or 0)")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,18 +1,26 @@
 import sys
 import py
 from _pytest.assertion.util import BuiltinAssertionError
+u = py.builtin._totext
+
 
 class AssertionError(BuiltinAssertionError):
     def __init__(self, *args):
         BuiltinAssertionError.__init__(self, *args)
         if args:
+            # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+            # on Python2.7 and above we always get len(args) == 1
+            # with args[0] being the (x,y) tuple.
+            if len(args) > 1:
+                toprint = args
+            else:
+                toprint = args[0]
             try:
-                self.msg = str(args[0])
-            except py.builtin._sysex:
-                raise
-            except:
-                self.msg = "<[broken __repr__] %s at %0xd>" %(
-                    args[0].__class__, id(args[0]))
+                self.msg = u(toprint)
+            except Exception:
+                self.msg = u(
+                    "<[broken __repr__] %s at %0xd>"
+                    % (toprint.__class__, id(toprint)))
         else:
             f = py.code.Frame(sys._getframe(1))
             try:
@@ -44,4 +52,3 @@
     from _pytest.assertion.newinterpret import interpret as reinterpret
 else:
     reinterpret = reinterpret_old
-
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -6,6 +6,7 @@
 import imp
 import marshal
 import os
+import re
 import struct
 import sys
 import types
@@ -14,13 +15,7 @@
 from _pytest.assertion import util
 
 
-# Windows gives ENOENT in places *nix gives ENOTDIR.
-if sys.platform.startswith("win"):
-    PATH_COMPONENT_NOT_DIR = errno.ENOENT
-else:
-    PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
-
-# py.test caches rewritten pycs in __pycache__.
+# pytest caches rewritten pycs in __pycache__.
 if hasattr(imp, "get_tag"):
     PYTEST_TAG = imp.get_tag() + "-PYTEST"
 else:
@@ -34,17 +29,19 @@
     PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
     del ver, impl
 
-PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
 PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
 
 REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
 
 class AssertionRewritingHook(object):
-    """Import hook which rewrites asserts."""
+    """PEP302 Import hook which rewrites asserts."""
 
     def __init__(self):
         self.session = None
         self.modules = {}
+        self._register_with_pkg_resources()
 
     def set_session(self, session):
         self.fnpats = session.config.getini("python_files")
@@ -59,8 +56,12 @@
         names = name.rsplit(".", 1)
         lastname = names[-1]
         pth = None
-        if path is not None and len(path) == 1:
-            pth = path[0]
+        if path is not None:
+            # Starting with Python 3.3, path is a _NamespacePath(), which
+            # causes problems if not converted to list.
+            path = list(path)
+            if len(path) == 1:
+                pth = path[0]
         if pth is None:
             try:
                 fd, fn, desc = imp.find_module(lastname, path)
@@ -95,12 +96,13 @@
             finally:
                 self.session = sess
         else:
-            state.trace("matched test file (was specified on cmdline): %r" % 
(fn,))
+            state.trace("matched test file (was specified on cmdline): %r" %
+                        (fn,))
         # The requested module looks like a test file, so rewrite it. This is
         # the most magical part of the process: load the source, rewrite the
         # asserts, and load the rewritten source. We also cache the rewritten
         # module code in a special pyc. We must be aware of the possibility of
-        # concurrent py.test processes rewriting and loading pycs. To avoid
+        # concurrent pytest processes rewriting and loading pycs. To avoid
         # tricky race conditions, we maintain the following invariant: The
         # cached pyc is always a complete, valid pyc. Operations on it must be
         # atomic. POSIX's atomic rename comes in handy.
@@ -116,19 +118,19 @@
                     # common case) or it's blocked by a non-dir node. In the
                     # latter case, we'll ignore it in _write_pyc.
                     pass
-                elif e == PATH_COMPONENT_NOT_DIR:
+                elif e in [errno.ENOENT, errno.ENOTDIR]:
                     # One of the path components was not a directory, likely
                     # because we're in a zip file.
                     write = False
                 elif e == errno.EACCES:
-                    state.trace("read only directory: %r" % 
(fn_pypath.dirname,))
+                    state.trace("read only directory: %r" % fn_pypath.dirname)
                     write = False
                 else:
                     raise
         cache_name = fn_pypath.basename[:-3] + PYC_TAIL
         pyc = os.path.join(cache_dir, cache_name)
-        # Notice that even if we're in a read-only directory, I'm going to 
check
-        # for a cached pyc. This may not be optimal...
+        # Notice that even if we're in a read-only directory, I'm going
+        # to check for a cached pyc. This may not be optimal...
         co = _read_pyc(fn_pypath, pyc)
         if co is None:
             state.trace("rewriting %r" % (fn,))
@@ -153,27 +155,59 @@
             mod.__file__ = co.co_filename
             # Normally, this attribute is 3.2+.
             mod.__cached__ = pyc
+            mod.__loader__ = self
             py.builtin.exec_(co, mod.__dict__)
         except:
             del sys.modules[name]
             raise
         return sys.modules[name]
 
-def _write_pyc(co, source_path, pyc):
-    # Technically, we don't have to have the same pyc format as (C)Python, 
since
-    # these "pycs" should never be seen by builtin import. However, there's
-    # little reason deviate, and I hope sometime to be able to use
-    # imp.load_compiled to load them. (See the comment in load_module above.)
+
+
+    def is_package(self, name):
+        try:
+            fd, fn, desc = imp.find_module(name)
+        except ImportError:
+            return False
+        if fd is not None:
+            fd.close()
+        tp = desc[2]
+        return tp == imp.PKG_DIRECTORY
+
+    @classmethod
+    def _register_with_pkg_resources(cls):
+        """
+        Ensure package resources can be loaded from this loader. May be called
+        multiple times, as the operation is idempotent.
+        """
+        try:
+            import pkg_resources
+            # access an attribute in case a deferred importer is present
+            pkg_resources.__name__
+        except ImportError:
+            return
+
+        # Since pytest tests are always located in the file system, the
+        #  DefaultProvider is appropriate.
+        pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+
+def _write_pyc(state, co, source_path, pyc):
+    # Technically, we don't have to have the same pyc format as
+    # (C)Python, since these "pycs" should never be seen by builtin
+    # import. However, there's little reason deviate, and I hope
+    # sometime to be able to use imp.load_compiled to load them. (See
+    # the comment in load_module above.)
     mtime = int(source_path.mtime())
     try:
         fp = open(pyc, "wb")
     except IOError:
         err = sys.exc_info()[1].errno
-        if err == PATH_COMPONENT_NOT_DIR:
-            # This happens when we get a EEXIST in find_module creating the
-            # __pycache__ directory and __pycache__ is by some non-dir node.
-            return False
-        raise
+        state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+        # we ignore any failure to write the cache file
+        # there are many reasons, permission-denied, __pycache__ being a
+        # file etc.
+        return False
     try:
         fp.write(imp.get_magic())
         fp.write(struct.pack("<l", mtime))
@@ -185,12 +219,43 @@
 RN = "\r\n".encode("utf-8")
 N = "\n".encode("utf-8")
 
+cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
+BOM_UTF8 = '\xef\xbb\xbf'
+
 def _rewrite_test(state, fn):
     """Try to read and rewrite *fn* and return the code object."""
     try:
         source = fn.read("rb")
     except EnvironmentError:
         return None
+    if ASCII_IS_DEFAULT_ENCODING:
+        # ASCII is the default encoding in Python 2. Without a coding
+        # declaration, Python 2 will complain about any bytes in the file
+        # outside the ASCII range. Sadly, this behavior does not extend to
+        # compile() or ast.parse(), which prefer to interpret the bytes as
+        # latin-1. (At least they properly handle explicit coding cookies.) To
+        # preserve this error behavior, we could force ast.parse() to use ASCII
+        # as the encoding by inserting a coding cookie. Unfortunately, that
+        # messes up line numbers. Thus, we have to check ourselves if anything
+        # is outside the ASCII range in the case no encoding is explicitly
+        # declared. For more context, see issue #269. Yay for Python 3 which
+        # gets this right.
+        end1 = source.find("\n")
+        end2 = source.find("\n", end1 + 1)
+        if (not source.startswith(BOM_UTF8) and
+            cookie_re.match(source[0:end1]) is None and
+            cookie_re.match(source[end1 + 1:end2]) is None):
+            if hasattr(state, "_indecode"):
+                return None  # encodings imported us again, we don't rewrite
+            state._indecode = True
+            try:
+                try:
+                    source.decode("ascii")
+                except UnicodeDecodeError:
+                    # Let it fail in real import.
+                    return None
+            finally:
+                del state._indecode
     # On Python versions which are not 2.7 and less than or equal to 3.1, the
     # parser expects *nix newlines.
     if REWRITE_NEWLINES:
@@ -216,16 +281,16 @@
     if sys.platform.startswith("win"):
         # Windows grants exclusive access to open files and doesn't have atomic
         # rename, so just write into the final file.
-        _write_pyc(co, fn, pyc)
+        _write_pyc(state, co, fn, pyc)
     else:
         # When not on windows, assume rename is atomic. Dump the code object
         # into a file specific to this process and atomically replace it.
         proc_pyc = pyc + "." + str(os.getpid())
-        if _write_pyc(co, fn, proc_pyc):
+        if _write_pyc(state, co, fn, proc_pyc):
             os.rename(proc_pyc, pyc)
 
 def _read_pyc(source, pyc):
-    """Possibly read a py.test pyc containing rewritten code.
+    """Possibly read a pytest pyc containing rewritten code.
 
     Return rewritten code if successful or None if not.
     """
@@ -240,9 +305,8 @@
         except EnvironmentError:
             return None
         # Check for invalid or out of date pyc file.
-        if (len(data) != 8 or
-            data[:4] != imp.get_magic() or
-            struct.unpack("<l", data[4:])[0] != mtime):
+        if (len(data) != 8 or data[:4] != imp.get_magic() or
+                struct.unpack("<l", data[4:])[0] != mtime):
             return None
         co = marshal.load(fp)
         if not isinstance(co, types.CodeType):
@@ -259,7 +323,10 @@
 
 
 _saferepr = py.io.saferepr
-from _pytest.assertion.util import format_explanation as _format_explanation
+from _pytest.assertion.util import format_explanation as _format_explanation # 
noqa
+
+def _should_repr_global_name(obj):
+    return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
 
 def _format_boolop(explanations, is_or):
     return "(" + (is_or and " or " or " and ").join(explanations) + ")"
@@ -280,35 +347,35 @@
 
 
 unary_map = {
-    ast.Not : "not %s",
-    ast.Invert : "~%s",
-    ast.USub : "-%s",
-    ast.UAdd : "+%s"
+    ast.Not: "not %s",
+    ast.Invert: "~%s",
+    ast.USub: "-%s",
+    ast.UAdd: "+%s"
 }
 
 binop_map = {
-    ast.BitOr : "|",
-    ast.BitXor : "^",
-    ast.BitAnd : "&",
-    ast.LShift : "<<",
-    ast.RShift : ">>",
-    ast.Add : "+",
-    ast.Sub : "-",
-    ast.Mult : "*",
-    ast.Div : "/",
-    ast.FloorDiv : "//",
-    ast.Mod : "%",
-    ast.Eq : "==",
-    ast.NotEq : "!=",
-    ast.Lt : "<",
-    ast.LtE : "<=",
-    ast.Gt : ">",
-    ast.GtE : ">=",
-    ast.Pow : "**",
-    ast.Is : "is",
-    ast.IsNot : "is not",
-    ast.In : "in",
-    ast.NotIn : "not in"
+    ast.BitOr: "|",
+    ast.BitXor: "^",
+    ast.BitAnd: "&",
+    ast.LShift: "<<",
+    ast.RShift: ">>",
+    ast.Add: "+",
+    ast.Sub: "-",
+    ast.Mult: "*",
+    ast.Div: "/",
+    ast.FloorDiv: "//",
+    ast.Mod: "%%", # escaped for string formatting
+    ast.Eq: "==",
+    ast.NotEq: "!=",
+    ast.Lt: "<",
+    ast.LtE: "<=",
+    ast.Gt: ">",
+    ast.GtE: ">=",
+    ast.Pow: "**",
+    ast.Is: "is",
+    ast.IsNot: "is not",
+    ast.In: "in",
+    ast.NotIn: "not in"
 }
 
 
@@ -341,7 +408,7 @@
         lineno = 0
         for item in mod.body:
             if (expect_docstring and isinstance(item, ast.Expr) and
-                isinstance(item.value, ast.Str)):
+                    isinstance(item.value, ast.Str)):
                 doc = item.value.s
                 if "PYTEST_DONT_REWRITE" in doc:
                     # The module has disabled assertion rewriting.
@@ -462,7 +529,8 @@
         body.append(raise_)
         # Clear temporary variables by setting them to None.
         if self.variables:
-            variables = [ast.Name(name, ast.Store()) for name in 
self.variables]
+            variables = [ast.Name(name, ast.Store())
+                         for name in self.variables]
             clear = ast.Assign(variables, ast.Name("None", ast.Load()))
             self.statements.append(clear)
         # Fix line numbers.
@@ -471,11 +539,12 @@
         return self.statements
 
     def visit_Name(self, name):
-        # Check if the name is local or not.
+        # Display the repr of the name if it's a local variable or
+        # _should_repr_global_name() thinks it's acceptable.
         locs = ast.Call(self.builtin("locals"), [], [], None, None)
-        globs = ast.Call(self.builtin("globals"), [], [], None, None)
-        ops = [ast.In(), ast.IsNot()]
-        test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+        inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+        dorepr = self.helper("should_repr_global_name", name)
+        test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
         expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
         return name, self.explanation_param(expr)
 
@@ -492,7 +561,8 @@
         for i, v in enumerate(boolop.values):
             if i:
                 fail_inner = []
-                self.on_failure.append(ast.If(cond, fail_inner, []))
+                # cond is set in a prior loop iteration below
+                self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
                 self.on_failure = fail_inner
             self.push_format_context()
             res, expl = self.visit(v)
@@ -548,7 +618,8 @@
             new_kwarg, expl = self.visit(call.kwargs)
             arg_expls.append("**" + expl)
         expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
-        new_call = ast.Call(new_func, new_args, new_kwargs, new_star, 
new_kwarg)
+        new_call = ast.Call(new_func, new_args, new_kwargs,
+                            new_star, new_kwarg)
         res = self.assign(new_call)
         res_expl = self.explanation_param(self.display(res))
         outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
@@ -584,7 +655,7 @@
             res_expr = ast.Compare(left_res, [op], [next_res])
             self.statements.append(ast.Assign([store_names[i]], res_expr))
             left_res, left_expl = next_res, next_expl
-        # Use py.code._reprcompare if that's available.
+        # Use pytest.assertion.util._reprcompare if that's available.
         expl_call = self.helper("call_reprcompare",
                                 ast.Tuple(syms, ast.Load()),
                                 ast.Tuple(load_names, ast.Load()),
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -1,8 +1,13 @@
 """Utilities for assertion debugging"""
 
 import py
+try:
+    from collections import Sequence
+except ImportError:
+    Sequence = list
 
 BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
 
 # The _reprcompare attribute on the util module is used by the new assertion
 # interpretation code and assertion rewriter to detect this plugin was
@@ -10,6 +15,7 @@
 # DebugInterpreter.
 _reprcompare = None
 
+
 def format_explanation(explanation):
     """This formats an explanation
 
@@ -20,7 +26,18 @@
     for when one explanation needs to span multiple lines, e.g. when
     displaying diffs.
     """
-    # simplify 'assert False where False = ...'
+    explanation = _collapse_false(explanation)
+    lines = _split_explanation(explanation)
+    result = _format_lines(lines)
+    return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+    """Collapse expansions of False
+
+    So this strips out any "assert False\n{where False = ...\n}"
+    blocks.
+    """
     where = 0
     while True:
         start = where = explanation.find("False\n{False = ", where)
@@ -42,28 +59,48 @@
             explanation = (explanation[:start] + explanation[start+15:end-1] +
                            explanation[end+1:])
             where -= 17
-    raw_lines = (explanation or '').split('\n')
-    # escape newlines not followed by {, } and ~
+    return explanation
+
+
+def _split_explanation(explanation):
+    """Return a list of individual lines in the explanation
+
+    This will return a list of lines split on '\n{', '\n}' and '\n~'.
+    Any other newlines will be escaped and appear in the line as the
+    literal '\n' characters.
+    """
+    raw_lines = (explanation or u('')).split('\n')
     lines = [raw_lines[0]]
     for l in raw_lines[1:]:
         if l.startswith('{') or l.startswith('}') or l.startswith('~'):
             lines.append(l)
         else:
             lines[-1] += '\\n' + l
+    return lines
 
+
+def _format_lines(lines):
+    """Format the individual lines
+
+    This will replace the '{', '}' and '~' characters of our mini
+    formatting language with the proper 'where ...', 'and ...' and ' +
+    ...' text, taking care of indentation along the way.
+
+    Return a list of formatted lines.
+    """
     result = lines[:1]
     stack = [0]
     stackcnt = [0]
     for line in lines[1:]:
         if line.startswith('{'):
             if stackcnt[-1]:
-                s = 'and   '
+                s = u('and   ')
             else:
-                s = 'where '
+                s = u('where ')
             stack.append(len(result))
             stackcnt[-1] += 1
             stackcnt.append(0)
-            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
+            result.append(u(' +') + u('  ')*(len(stack)-1) + s + line[1:])
         elif line.startswith('}'):
             assert line.startswith('}')
             stack.pop()
@@ -71,9 +108,9 @@
             result[stack[-1]] += line[1:]
         else:
             assert line.startswith('~')
-            result.append('  '*len(stack) + line[1:])
+            result.append(u('  ')*len(stack) + line[1:])
     assert len(stack) == 1
-    return '\n'.join(result)
+    return result
 
 
 # Provide basestring in python3
@@ -83,132 +120,163 @@
     basestring = str
 
 
-def assertrepr_compare(op, left, right):
-    """return specialised explanations for some operators/operands"""
-    width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+def assertrepr_compare(config, op, left, right):
+    """Return specialised explanations for some operators/operands"""
+    width = 80 - 15 - len(op) - 2  # 15 chars indentation, 1 space around op
     left_repr = py.io.saferepr(left, maxsize=int(width/2))
     right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
-    summary = '%s %s %s' % (left_repr, op, right_repr)
+    summary = u('%s %s %s') % (left_repr, op, right_repr)
 
-    issequence = lambda x: isinstance(x, (list, tuple))
+    issequence = lambda x: (isinstance(x, (list, tuple, Sequence))
+                            and not isinstance(x, basestring))
     istext = lambda x: isinstance(x, basestring)
     isdict = lambda x: isinstance(x, dict)
-    isset = lambda x: isinstance(x, set)
+    isset = lambda x: isinstance(x, (set, frozenset))
 
+    verbose = config.getoption('verbose')
     explanation = None
     try:
         if op == '==':
             if istext(left) and istext(right):
-                explanation = _diff_text(left, right)
+                explanation = _diff_text(left, right, verbose)
             elif issequence(left) and issequence(right):
-                explanation = _compare_eq_sequence(left, right)
+                explanation = _compare_eq_sequence(left, right, verbose)
             elif isset(left) and isset(right):
-                explanation = _compare_eq_set(left, right)
+                explanation = _compare_eq_set(left, right, verbose)
             elif isdict(left) and isdict(right):
-                explanation = _diff_text(py.std.pprint.pformat(left),
-                                         py.std.pprint.pformat(right))
+                explanation = _compare_eq_dict(left, right, verbose)
         elif op == 'not in':
             if istext(left) and istext(right):
-                explanation = _notin_text(left, right)
-    except py.builtin._sysex:
-        raise
-    except:
+                explanation = _notin_text(left, right, verbose)
+    except Exception:
         excinfo = py.code.ExceptionInfo()
-        explanation = ['(pytest_assertion plugin: representation of '
-            'details failed. Probably an object has a faulty __repr__.)',
-            str(excinfo)
-            ]
-
+        explanation = [
+            u('(pytest_assertion plugin: representation of details failed.  '
+              'Probably an object has a faulty __repr__.)'),
+            u(excinfo)]
 
     if not explanation:
         return None
 
-    # Don't include pageloads of data, should be configurable
-    if len(''.join(explanation)) > 80*8:
-        explanation = ['Detailed information too verbose, truncated']
-
     return [summary] + explanation
 
 
-def _diff_text(left, right):
-    """Return the explanation for the diff between text
+def _diff_text(left, right, verbose=False):
+    """Return the explanation for the diff between text or bytes
 
-    This will skip leading and trailing characters which are
-    identical to keep the diff minimal.
+    Unless --verbose is used this will skip leading and trailing
+    characters which are identical to keep the diff minimal.
+
+    If the input are bytes they will be safely converted to text.
     """
     explanation = []
-    i = 0 # just in case left or right has zero length
-    for i in range(min(len(left), len(right))):
-        if left[i] != right[i]:
-            break
-    if i > 42:
-        i -= 10                 # Provide some context
-        explanation = ['Skipping %s identical '
-                       'leading characters in diff' % i]
-        left = left[i:]
-        right = right[i:]
-    if len(left) == len(right):
-        for i in range(len(left)):
-            if left[-i] != right[-i]:
+    if isinstance(left, py.builtin.bytes):
+        left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+    if isinstance(right, py.builtin.bytes):
+        right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+    if not verbose:
+        i = 0  # just in case left or right has zero length
+        for i in range(min(len(left), len(right))):
+            if left[i] != right[i]:
                 break
         if i > 42:
-            i -= 10     # Provide some context
-            explanation += ['Skipping %s identical '
-                            'trailing characters in diff' % i]
-            left = left[:-i]
-            right = right[:-i]
+            i -= 10                 # Provide some context
+            explanation = [u('Skipping %s identical leading '
+                             'characters in diff, use -v to show') % i]
+            left = left[i:]
+            right = right[i:]
+        if len(left) == len(right):
+            for i in range(len(left)):
+                if left[-i] != right[-i]:
+                    break
+            if i > 42:
+                i -= 10     # Provide some context
+                explanation += [u('Skipping %s identical trailing '
+                                  'characters in diff, use -v to show') % i]
+                left = left[:-i]
+                right = right[:-i]
     explanation += [line.strip('\n')
                     for line in py.std.difflib.ndiff(left.splitlines(),
                                                      right.splitlines())]
     return explanation
 
 
-def _compare_eq_sequence(left, right):
+def _compare_eq_sequence(left, right, verbose=False):
     explanation = []
     for i in range(min(len(left), len(right))):
         if left[i] != right[i]:
-            explanation += ['At index %s diff: %r != %r' %
-                            (i, left[i], right[i])]
+            explanation += [u('At index %s diff: %r != %r')
+                            % (i, left[i], right[i])]
             break
     if len(left) > len(right):
-        explanation += ['Left contains more items, '
-            'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+        explanation += [u('Left contains more items, first extra item: %s')
+                        % py.io.saferepr(left[len(right)],)]
     elif len(left) < len(right):
-        explanation += ['Right contains more items, '
-            'first extra item: %s' % py.io.saferepr(right[len(left)],)]
-    return explanation # + _diff_text(py.std.pprint.pformat(left),
-                       #             py.std.pprint.pformat(right))
+        explanation += [
+            u('Right contains more items, first extra item: %s') %
+            py.io.saferepr(right[len(left)],)]
+    return explanation  # + _diff_text(py.std.pprint.pformat(left),
+                        #             py.std.pprint.pformat(right))
 
 
-def _compare_eq_set(left, right):
+def _compare_eq_set(left, right, verbose=False):
     explanation = []
     diff_left = left - right
     diff_right = right - left
     if diff_left:
-        explanation.append('Extra items in the left set:')
+        explanation.append(u('Extra items in the left set:'))
         for item in diff_left:
             explanation.append(py.io.saferepr(item))
     if diff_right:
-        explanation.append('Extra items in the right set:')
+        explanation.append(u('Extra items in the right set:'))
         for item in diff_right:
             explanation.append(py.io.saferepr(item))
     return explanation
 
 
-def _notin_text(term, text):
+def _compare_eq_dict(left, right, verbose=False):
+    explanation = []
+    common = set(left).intersection(set(right))
+    same = dict((k, left[k]) for k in common if left[k] == right[k])
+    if same and not verbose:
+        explanation += [u('Omitting %s identical items, use -v to show') %
+                        len(same)]
+    elif same:
+        explanation += [u('Common items:')]
+        explanation += py.std.pprint.pformat(same).splitlines()
+    diff = set(k for k in common if left[k] != right[k])
+    if diff:
+        explanation += [u('Differing items:')]
+        for k in diff:
+            explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+                            py.io.saferepr({k: right[k]})]
+    extra_left = set(left) - set(right)
+    if extra_left:
+        explanation.append(u('Left contains more items:'))
+        explanation.extend(py.std.pprint.pformat(
+            dict((k, left[k]) for k in extra_left)).splitlines())
+    extra_right = set(right) - set(left)
+    if extra_right:
+        explanation.append(u('Right contains more items:'))
+        explanation.extend(py.std.pprint.pformat(
+            dict((k, right[k]) for k in extra_right)).splitlines())
+    return explanation
+
+
+def _notin_text(term, text, verbose=False):
     index = text.find(term)
     head = text[:index]
     tail = text[index+len(term):]
     correct_text = head + tail
-    diff = _diff_text(correct_text, text)
-    newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+    diff = _diff_text(correct_text, text, verbose)
+    newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
     for line in diff:
-        if line.startswith('Skipping'):
+        if line.startswith(u('Skipping')):
             continue
-        if line.startswith('- '):
+        if line.startswith(u('- ')):
             continue
-        if line.startswith('+ '):
-            newdiff.append('  ' + line[2:])
+        if line.startswith(u('+ ')):
+            newdiff.append(u('  ') + line[2:])
         else:
             newdiff.append(line)
     return newdiff
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -1,43 +1,114 @@
-""" per-test stdout/stderr capturing mechanisms, ``capsys`` and ``capfd`` 
function arguments.  """
+"""
+    per-test stdout/stderr capturing mechanisms,
+    ``capsys`` and ``capfd`` function arguments.
+"""
+# note: py.io capture was where copied from
+# pylib 1.4.20.dev2 (rev 13d9af95547e)
+import sys
+import os
+import tempfile
 
-import pytest, py
-import os
+import py
+import pytest
+
+try:
+    from io import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+try:
+    from io import BytesIO
+except ImportError:
+    class BytesIO(StringIO):
+        def write(self, data):
+            if isinstance(data, unicode):
+                raise TypeError("not a byte value: %r" % (data,))
+            StringIO.write(self, data)
+
+if sys.version_info < (3, 0):
+    class TextIO(StringIO):
+        def write(self, data):
+            if not isinstance(data, unicode):
+                enc = getattr(self, '_encoding', 'UTF-8')
+                data = unicode(data, enc, 'replace')
+            StringIO.write(self, data)
+else:
+    TextIO = StringIO
+
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
 
 def pytest_addoption(parser):
     group = parser.getgroup("general")
-    group._addoption('--capture', action="store", default=None,
-        metavar="method", type="choice", choices=['fd', 'sys', 'no'],
+    group._addoption(
+        '--capture', action="store", default=None,
+        metavar="method", choices=['fd', 'sys', 'no'],
         help="per-test capturing method: one of fd (default)|sys|no.")
-    group._addoption('-s', action="store_const", const="no", dest="capture",
+    group._addoption(
+        '-s', action="store_const", const="no", dest="capture",
         help="shortcut for --capture=no.")
 
+
 @pytest.mark.tryfirst
-def pytest_cmdline_parse(pluginmanager, args):
-    # we want to perform capturing already for plugin/conftest loading
-    if '-s' in args or "--capture=no" in args:
-        method = "no"
-    elif hasattr(os, 'dup') and '--capture=sys' not in args:
+def pytest_load_initial_conftests(early_config, parser, args, __multicall__):
+    ns = parser.parse_known_args(args)
+    method = ns.capture
+    if not method:
         method = "fd"
-    else:
+    if method == "fd" and not hasattr(os, "dup"):
         method = "sys"
     capman = CaptureManager(method)
-    pluginmanager.register(capman, "capturemanager")
+    early_config.pluginmanager.register(capman, "capturemanager")
+
+    # make sure that capturemanager is properly reset at final shutdown
+    def teardown():
+        try:
+            capman.reset_capturings()
+        except ValueError:
+            pass
+
+    early_config.pluginmanager.add_shutdown(teardown)
+
+    # make sure logging does not raise exceptions at the end
+    def silence_logging_at_shutdown():
+        if "logging" in sys.modules:
+            sys.modules["logging"].raiseExceptions = False
+    early_config.pluginmanager.add_shutdown(silence_logging_at_shutdown)
+
+    # finally trigger conftest loading but while capturing (issue93)
+    capman.resumecapture()
+    try:
+        try:
+            return __multicall__.execute()
+        finally:
+            out, err = capman.suspendcapture()
+    except:
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+        raise
+
 
 def addouterr(rep, outerr):
     for secname, content in zip(["out", "err"], outerr):
         if content:
             rep.sections.append(("Captured std%s" % secname, content))
 
+
 class NoCapture:
     def startall(self):
         pass
+
     def resume(self):
         pass
+
     def reset(self):
         pass
+
     def suspend(self):
         return "", ""
 
+
 class CaptureManager:
     def __init__(self, defaultmethod=None):
         self._method2capture = {}
@@ -45,21 +116,23 @@
 
     def _maketempfile(self):
         f = py.std.tempfile.TemporaryFile()
-        newf = py.io.dupfile(f, encoding="UTF-8")
+        newf = dupfile(f, encoding="UTF-8")
         f.close()
         return newf
 
     def _makestringio(self):
-        return py.io.TextIO()
+        return TextIO()
 
     def _getcapture(self, method):
         if method == "fd":
-            return py.io.StdCaptureFD(now=False,
-                out=self._maketempfile(), err=self._maketempfile()
+            return StdCaptureFD(
+                out=self._maketempfile(),
+                err=self._maketempfile(),
             )
         elif method == "sys":
-            return py.io.StdCapture(now=False,
-                out=self._makestringio(), err=self._makestringio()
+            return StdCapture(
+                out=self._makestringio(),
+                err=self._makestringio(),
             )
         elif method == "no":
             return NoCapture()
@@ -74,23 +147,24 @@
                 method = config._conftest.rget("option_capture", path=fspath)
             except KeyError:
                 method = "fd"
-        if method == "fd" and not hasattr(os, 'dup'): # e.g. jython
+        if method == "fd" and not hasattr(os, 'dup'):  # e.g. jython
             method = "sys"
         return method
 
     def reset_capturings(self):
-        for name, cap in self._method2capture.items():
+        for cap in self._method2capture.values():
             cap.reset()
 
     def resumecapture_item(self, item):
         method = self._getmethod(item.config, item.fspath)
         if not hasattr(item, 'outerr'):
-            item.outerr = ('', '') # we accumulate outerr on the item
+            item.outerr = ('', '')  # we accumulate outerr on the item
         return self.resumecapture(method)
 
     def resumecapture(self, method=None):
         if hasattr(self, '_capturing'):
-            raise ValueError("cannot resume, already capturing with %r" %
+            raise ValueError(
+                "cannot resume, already capturing with %r" %
                 (self._capturing,))
         if method is None:
             method = self._defaultmethod
@@ -119,30 +193,29 @@
         return "", ""
 
     def activate_funcargs(self, pyfuncitem):
-        if not hasattr(pyfuncitem, 'funcargs'):
-            return
-        assert not hasattr(self, '_capturing_funcargs')
-        self._capturing_funcargs = capturing_funcargs = []
-        for name, capfuncarg in pyfuncitem.funcargs.items():
-            if name in ('capsys', 'capfd'):
-                capturing_funcargs.append(capfuncarg)
-                capfuncarg._start()
+        funcargs = getattr(pyfuncitem, "funcargs", None)
+        if funcargs is not None:
+            for name, capfuncarg in funcargs.items():
+                if name in ('capsys', 'capfd'):
+                    assert not hasattr(self, '_capturing_funcarg')
+                    self._capturing_funcarg = capfuncarg
+                    capfuncarg._start()
 
     def deactivate_funcargs(self):
-        capturing_funcargs = getattr(self, '_capturing_funcargs', None)
-        if capturing_funcargs is not None:
-            while capturing_funcargs:
-                capfuncarg = capturing_funcargs.pop()
-                capfuncarg._finalize()
-            del self._capturing_funcargs
+        capturing_funcarg = getattr(self, '_capturing_funcarg', None)
+        if capturing_funcarg:
+            outerr = capturing_funcarg._finalize()
+            del self._capturing_funcarg
+            return outerr
 
     def pytest_make_collect_report(self, __multicall__, collector):
         method = self._getmethod(collector.config, collector.fspath)
         try:
             self.resumecapture(method)
         except ValueError:
-            return # recursive collect, XXX refactor capturing
-                   # to allow for more lightweight recursive capturing
+            # recursive collect, XXX refactor capturing
+            # to allow for more lightweight recursive capturing
+            return
         try:
             rep = __multicall__.execute()
         finally:
@@ -169,46 +242,371 @@
 
     @pytest.mark.tryfirst
     def pytest_runtest_makereport(self, __multicall__, item, call):
-        self.deactivate_funcargs()
+        funcarg_outerr = self.deactivate_funcargs()
         rep = __multicall__.execute()
         outerr = self.suspendcapture(item)
-        if not rep.passed:
-            addouterr(rep, outerr)
+        if funcarg_outerr is not None:
+            outerr = (outerr[0] + funcarg_outerr[0],
+                      outerr[1] + funcarg_outerr[1])
+        addouterr(rep, outerr)
         if not rep.passed or rep.when == "teardown":
             outerr = ('', '')
         item.outerr = outerr
         return rep
 
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
 def pytest_funcarg__capsys(request):
     """enables capturing of writes to sys.stdout/sys.stderr and makes
     captured output available via ``capsys.readouterr()`` method calls
     which return a ``(out, err)`` tuple.
     """
-    return CaptureFuncarg(py.io.StdCapture)
+    if "capfd" in request._funcargs:
+        raise request.raiseerror(error_capsysfderror)
+    return CaptureFixture(StdCapture)
+
 
 def pytest_funcarg__capfd(request):
     """enables capturing of writes to file descriptors 1 and 2 and makes
     captured output available via ``capsys.readouterr()`` method calls
     which return a ``(out, err)`` tuple.
     """
+    if "capsys" in request._funcargs:
+        request.raiseerror(error_capsysfderror)
     if not hasattr(os, 'dup'):
-        py.test.skip("capfd funcarg needs os.dup")
-    return CaptureFuncarg(py.io.StdCaptureFD)
+        pytest.skip("capfd funcarg needs os.dup")
+    return CaptureFixture(StdCaptureFD)
 
-class CaptureFuncarg:
+
+class CaptureFixture:
     def __init__(self, captureclass):
-        self.capture = captureclass(now=False)
+        self._capture = captureclass()
 
     def _start(self):
-        self.capture.startall()
+        self._capture.startall()
 
     def _finalize(self):
-        if hasattr(self, 'capture'):
-            self.capture.reset()
-            del self.capture
+        if hasattr(self, '_capture'):
+            outerr = self._outerr = self._capture.reset()
+            del self._capture
+            return outerr
 
     def readouterr(self):
-        return self.capture.readouterr()
+        try:
+            return self._capture.readouterr()
+        except AttributeError:
+            return self._outerr
 
     def close(self):
         self._finalize()
+
+
+class FDCapture:
+    """ Capture IO to/from a given os-level filedescriptor. """
+
+    def __init__(self, targetfd, tmpfile=None, patchsys=False):
+        """ save targetfd descriptor, and open a new
+            temporary file there.  If no tmpfile is
+            specified a tempfile.Tempfile() will be opened
+            in text mode.
+        """
+        self.targetfd = targetfd
+        if tmpfile is None and targetfd != 0:
+            f = tempfile.TemporaryFile('wb+')
+            tmpfile = dupfile(f, encoding="UTF-8")
+            f.close()
+        self.tmpfile = tmpfile
+        self._savefd = os.dup(self.targetfd)
+        if patchsys:
+            self._oldsys = getattr(sys, patchsysdict[targetfd])
+
+    def start(self):
+        try:
+            os.fstat(self._savefd)
+        except OSError:
+            raise ValueError(
+                "saved filedescriptor not valid, "
+                "did you call start() twice?")
+        if self.targetfd == 0 and not self.tmpfile:
+            fd = os.open(os.devnull, os.O_RDONLY)
+            os.dup2(fd, 0)
+            os.close(fd)
+            if hasattr(self, '_oldsys'):
+                setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
+        else:
+            os.dup2(self.tmpfile.fileno(), self.targetfd)
+            if hasattr(self, '_oldsys'):
+                setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
+
+    def done(self):
+        """ unpatch and clean up, returns the self.tmpfile (file object)
+        """
+        os.dup2(self._savefd, self.targetfd)
+        os.close(self._savefd)
+        if self.targetfd != 0:
+            self.tmpfile.seek(0)
+        if hasattr(self, '_oldsys'):
+            setattr(sys, patchsysdict[self.targetfd], self._oldsys)
+        return self.tmpfile
+
+    def writeorg(self, data):
+        """ write a string to the original file descriptor
+        """
+        tempfp = tempfile.TemporaryFile()
+        try:
+            os.dup2(self._savefd, tempfp.fileno())
+            tempfp.write(data)
+        finally:
+            tempfp.close()
+
+
+def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
+    """ return a new open file object that's a duplicate of f
+
+        mode is duplicated if not given, 'buffering' controls
+        buffer size (defaulting to no buffering) and 'raising'
+        defines whether an exception is raised when an incompatible
+        file object is passed in (if raising is False, the file
+        object itself will be returned)
+    """
+    try:
+        fd = f.fileno()
+        mode = mode or f.mode
+    except AttributeError:
+        if raising:
+            raise
+        return f
+    newfd = os.dup(fd)
+    if sys.version_info >= (3, 0):
+        if encoding is not None:
+            mode = mode.replace("b", "")
+            buffering = True
+        return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
+    else:
+        f = os.fdopen(newfd, mode, buffering)
+        if encoding is not None:
+            return EncodedFile(f, encoding)
+        return f
+
+
+class EncodedFile(object):
+    def __init__(self, _stream, encoding):
+        self._stream = _stream
+        self.encoding = encoding
+
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to