Author: Wim Lavrijsen <[email protected]>
Branch: cppyy-packaging
Changeset: r92055:5fb13e8e0a8f
Date: 2017-08-01 09:23 -0700
http://bitbucket.org/pypy/pypy/changeset/5fb13e8e0a8f/

Log:    merge default into branch

diff too long, truncating to 2000 out of 5319 lines

diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py
--- a/lib_pypy/_tkinter/tklib_build.py
+++ b/lib_pypy/_tkinter/tklib_build.py
@@ -22,12 +22,27 @@
     linklibs = ['tcl', 'tk']
     libdirs = []
 else:
-    for _ver in ['', '8.6', '8.5', '']:
+    # On some Linux distributions, the tcl and tk libraries are
+    # stored in /usr/include, so we must check this case also
+    libdirs = []
+    found = False
+    for _ver in ['', '8.6', '8.5']:
         incdirs = ['/usr/include/tcl' + _ver]
         linklibs = ['tcl' + _ver, 'tk' + _ver]
-        libdirs = []
         if os.path.isdir(incdirs[0]):
+            found = True
             break
+    if not found:
+        for _ver in ['8.6', '8.5', '']:
+            incdirs = []
+            linklibs = ['tcl' + _ver, 'tk' + _ver]
+            if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])):
+                found = True
+                break
+    if not found:
+        sys.stderr.write("*** TCL libraries not found!  Falling back...\n")
+        incdirs = []
+        linklibs = ['tcl', 'tk']
 
 config_ffi = FFI()
 config_ffi.cdef("""
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -95,6 +95,7 @@
 #define _cffi_from_c_ulong PyLong_FromUnsignedLong
 #define _cffi_from_c_longlong PyLong_FromLongLong
 #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
+#define _cffi_from_c__Bool PyBool_FromLong
 
 #define _cffi_to_c_double PyFloat_AsDouble
 #define _cffi_to_c_float PyFloat_AsDouble
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -1,7 +1,12 @@
 
 /***** Support code for embedding *****/
 
-#if defined(_MSC_VER)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#if defined(_WIN32)
 #  define CFFI_DLLEXPORT  __declspec(dllexport)
 #elif defined(__GNUC__)
 #  define CFFI_DLLEXPORT  __attribute__((visibility("default")))
@@ -525,3 +530,7 @@
 #undef cffi_compare_and_swap
 #undef cffi_write_barrier
 #undef cffi_read_barrier
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -412,6 +412,9 @@
             prnt('    }')
         prnt('    p[0] = (const void *)0x%x;' % self._version)
         prnt('    p[1] = &_cffi_type_context;')
+        prnt('#if PY_MAJOR_VERSION >= 3')
+        prnt('    return NULL;')
+        prnt('#endif')
         prnt('}')
         # on Windows, distutils insists on putting init_cffi_xyz in
         # 'export_symbols', so instead of fighting it, just give up and
@@ -578,7 +581,7 @@
 
     def _convert_expr_from_c(self, tp, var, context):
         if isinstance(tp, model.BasePrimitiveType):
-            if tp.is_integer_type():
+            if tp.is_integer_type() and tp.name != '_Bool':
                 return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
             elif isinstance(tp, model.UnknownFloatType):
                 return '_cffi_from_c_double(%s)' % (var,)
diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py
--- a/lib_pypy/cffi/vengine_cpy.py
+++ b/lib_pypy/cffi/vengine_cpy.py
@@ -296,7 +296,7 @@
 
     def _convert_expr_from_c(self, tp, var, context):
         if isinstance(tp, model.PrimitiveType):
-            if tp.is_integer_type():
+            if tp.is_integer_type() and tp.name != '_Bool':
                 return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
             elif tp.name != 'long double':
                 return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
@@ -872,6 +872,7 @@
 #define _cffi_from_c_ulong PyLong_FromUnsignedLong
 #define _cffi_from_c_longlong PyLong_FromLongLong
 #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
+#define _cffi_from_c__Bool PyBool_FromLong
 
 #define _cffi_to_c_double PyFloat_AsDouble
 #define _cffi_to_c_float PyFloat_AsDouble
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -10,6 +10,18 @@
 minutes on a fast machine -- and RAM-hungry.  You will need **at least** 2 GB
 of memory on a 32-bit machine and 4GB on a 64-bit machine.
 
+Before you start
+----------------
+
+Our normal development workflow avoids a full translation by using test-driven
+development. You can read more about how to develop PyPy here_, and latest
+translated (hopefully functional) binary packages are available on our
+buildbot's `nightly builds`_
+
+.. _here: getting-started-dev.html
+.. _`nightly builds`: http://buildbot.pypy.org/nightly
+
+You will need the build dependencies below to run the tests.
 
 Clone the repository
 --------------------
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -330,6 +330,8 @@
 
  - ``frozenset`` (empty frozenset only)
 
+ - unbound method objects (for Python 2 only)
+
 This change requires some changes to ``id`` as well. ``id`` fulfills the
 following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the
 above types will return a value that is computed from the argument, and can
diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst
--- a/pypy/doc/getting-started-dev.rst
+++ b/pypy/doc/getting-started-dev.rst
@@ -35,8 +35,8 @@
 
 * Edit things.  Use ``hg diff`` to see what you changed.  Use ``hg add``
   to make Mercurial aware of new files you added, e.g. new test files.
-  Use ``hg status`` to see if there are such files.  Run tests!  (See
-  the rest of this page.)
+  Use ``hg status`` to see if there are such files.  Write and run tests!
+  (See the rest of this page.)
 
 * Commit regularly with ``hg commit``.  A one-line commit message is
   fine.  We love to have tons of commits; make one as soon as you have
@@ -113,6 +113,10 @@
 make sure you have the correct version installed which
 you can find out with the ``--version`` switch.
 
+You will need the `build requirements`_ to run tests successfully, since many 
of
+them compile little pieces of PyPy and then run the tests inside that minimal
+interpreter
+
 Now on to running some tests.  PyPy has many different test directories
 and you can use shell completion to point at directories or files::
 
@@ -141,7 +145,7 @@
 
 .. _py.test testing tool: http://pytest.org
 .. _py.test usage and invocations: http://pytest.org/latest/usage.html#usage
-
+.. _`build requirements`: build.html#install-build-time-dependencies
 
 Special Introspection Features of the Untranslated Python Interpreter
 ---------------------------------------------------------------------
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -40,6 +40,9 @@
   sure things are ported back to the trunk and to the branch as
   necessary.
 
+* Maybe bump the SOABI number in module/imp/importing. This has many
+  implications, so make sure the PyPy community agrees to the change.
+
 * Update and write documentation
 
   * update pypy/doc/contributor.rst (and possibly LICENSE)
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,14 @@
 .. this is a revision shortly after release-pypy2.7-v5.8.0
 .. startrev: 558bd00b3dd8
 
+In previous versions of PyPy, ``instance.method`` would return always
+the same bound method object, when gotten out of the same instance (as
+far as ``is`` and ``id()`` can tell).  CPython doesn't do that.  Now
+PyPy, like CPython, returns a different bound method object every time.
+For ``type.method``, PyPy2 still returns always the same *unbound*
+method object; CPython does it for built-in types but not for
+user-defined types.
+
 .. branch: cffi-complex
 .. branch: cffi-char16-char32
 
@@ -30,3 +38,20 @@
 
 Renaming of ``cppyy`` to ``_cppyy``.
 The former is now an external package installable with ``pip install cppyy``.
+
+.. branch: Enable_PGO_for_clang
+
+.. branch: nopax
+
+At the end of translation, run ``attr -q -s pax.flags -V m`` on
+PAX-enabled systems on the produced binary.  This seems necessary
+because PyPy uses a JIT.
+
+.. branch: pypy_bytearray
+
+Improve ``bytearray`` performance (backported from py3.5)
+
+.. branch: gc-del-limit-growth
+
+Fix the bounds in the GC when allocating a lot of objects with finalizers,
+fixes issue #2590
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -559,21 +559,29 @@
         return space.newbool(space.eq_w(self.w_function, w_other.w_function))
 
     def is_w(self, space, other):
+        if self.w_instance is not None:
+            return W_Root.is_w(self, space, other)
+        # The following special-case is only for *unbound* method objects.
+        # Motivation: in CPython, it seems that no strange internal type
+        # exists where the equivalent of ``x.method is x.method`` would
+        # return True.  This is unlike unbound methods, where e.g.
+        # ``list.append is list.append`` returns True.  The following code
+        # is here to emulate that behaviour.  Unlike CPython, we return
+        # True for all equal unbound methods, not just for built-in types.
         if not isinstance(other, Method):
             return False
-        return (self.w_instance is other.w_instance and
+        return (other.w_instance is None and
                 self.w_function is other.w_function and
                 self.w_class is other.w_class)
 
     def immutable_unique_id(self, space):
-        from pypy.objspace.std.util import IDTAG_METHOD as tag
+        if self.w_instance is not None:
+            return W_Root.immutable_unique_id(self, space)
+        # the special-case is only for *unbound* method objects
+        #
+        from pypy.objspace.std.util import IDTAG_UNBOUND_METHOD as tag
         from pypy.objspace.std.util import IDTAG_SHIFT
-        if self.w_instance is not None:
-            id = space.bigint_w(space.id(self.w_instance))
-            id = id.lshift(LONG_BIT)
-        else:
-            id = rbigint.fromint(0)
-        id = id.or_(space.bigint_w(space.id(self.w_function)))
+        id = space.bigint_w(space.id(self.w_function))
         id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class)))
         id = id.lshift(IDTAG_SHIFT).int_or_(tag)
         return space.newlong_from_rbigint(id)
diff --git a/pypy/interpreter/test/test_function.py 
b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -1,4 +1,4 @@
-import pytest
+import pytest, sys
 from pypy.interpreter import eval
 from pypy.interpreter.function import Function, Method, descr_function_get
 from pypy.interpreter.pycode import PyCode
@@ -342,6 +342,11 @@
         raises(ValueError, type(f).__setstate__, f, (1, 2, 3))
 
 class AppTestMethod:
+    def setup_class(cls):
+        cls.w_runappdirect_on_cpython = cls.space.wrap(
+            cls.runappdirect and
+            '__pypy__' not in sys.builtin_module_names)
+
     def test_simple_call(self):
         class A(object):
             def func(self, arg2):
@@ -572,7 +577,6 @@
         assert meth == meth
         assert meth == MethodType(func, object)
 
-    @pytest.mark.skipif("config.option.runappdirect")
     def test_method_identity(self):
         class A(object):
             def m(self):
@@ -589,19 +593,24 @@
 
         a = A()
         a2 = A()
-        assert a.m is a.m
-        assert id(a.m) == id(a.m)
-        assert a.m is not a.n
-        assert id(a.m) != id(a.n)
-        assert a.m is not a2.m
-        assert id(a.m) != id(a2.m)
+        x = a.m; y = a.m
+        assert x is not y
+        assert id(x) != id(y)
+        assert x == y
+        assert x is not a.n
+        assert id(x) != id(a.n)
+        assert x is not a2.m
+        assert id(x) != id(a2.m)
 
-        assert A.m is A.m
-        assert id(A.m) == id(A.m)
-        assert A.m is not A.n
-        assert id(A.m) != id(A.n)
-        assert A.m is not B.m
-        assert id(A.m) != id(B.m)
+        if not self.runappdirect_on_cpython:
+            assert A.m is A.m
+            assert id(A.m) == id(A.m)
+        assert A.m == A.m
+        x = A.m
+        assert x is not A.n
+        assert id(x) != id(A.n)
+        assert x is not B.m
+        assert id(x) != id(B.m)
 
 
 class TestMethod:
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py 
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -3838,6 +3838,7 @@
         assert result == samples
         for i in range(len(samples)):
             assert result[i] == p[i] and type(result[i]) is type(p[i])
+            assert (type(result[i]) is bool) == (type(samples[i]) is bool)
     #
     BInt = new_primitive_type("int")
     py.test.raises(TypeError, unpack, p)
diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py
--- a/pypy/module/_vmprof/__init__.py
+++ b/pypy/module/_vmprof/__init__.py
@@ -11,7 +11,6 @@
     interpleveldefs = {
         'enable': 'interp_vmprof.enable',
         'disable': 'interp_vmprof.disable',
-        'write_all_code_objects': 'interp_vmprof.write_all_code_objects',
         'is_enabled': 'interp_vmprof.is_enabled',
         'get_profile_path': 'interp_vmprof.get_profile_path',
         'stop_sampling': 'interp_vmprof.stop_sampling',
diff --git a/pypy/module/_vmprof/interp_vmprof.py 
b/pypy/module/_vmprof/interp_vmprof.py
--- a/pypy/module/_vmprof/interp_vmprof.py
+++ b/pypy/module/_vmprof/interp_vmprof.py
@@ -70,11 +70,6 @@
     except rvmprof.VMProfError as e:
         raise VMProfError(space, e)
 
-def write_all_code_objects(space):
-    """ Needed on cpython, just empty function here
-    """
-    pass
-
 def disable(space):
     """Disable vmprof.  Remember to close the file descriptor afterwards
     if necessary.
diff --git a/pypy/module/_vmprof/test/test__vmprof.py 
b/pypy/module/_vmprof/test/test__vmprof.py
--- a/pypy/module/_vmprof/test/test__vmprof.py
+++ b/pypy/module/_vmprof/test/test__vmprof.py
@@ -1,3 +1,4 @@
+import sys
 from rpython.tool.udir import udir
 from pypy.tool.pytest.objspace import gettestobjspace
 
@@ -7,6 +8,8 @@
     def setup_class(cls):
         cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1')))
         cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2')))
+        cls.w_plain = cls.space.wrap(not cls.runappdirect and
+            '__pypy__' not in sys.builtin_module_names)
 
     def test_import_vmprof(self):
         tmpfile = open(self.tmpfilename, 'wb')
@@ -115,3 +118,33 @@
                 assert fd1.read() == tmpfile.read()
         _vmprof.disable()
         assert _vmprof.get_profile_path() is None
+
+    def test_stop_sampling(self):
+        if not self.plain:
+            skip("unreliable test except on CPython without -A")
+        import os
+        import _vmprof
+        tmpfile = open(self.tmpfilename, 'wb')
+        native = 1
+        def f():
+            import sys
+            import math
+            j = sys.maxsize
+            for i in range(500):
+                j = math.sqrt(j)
+        _vmprof.enable(tmpfile.fileno(), 0.01, 0, native, 0, 0)
+        # get_vmprof_stack() always returns 0 here!
+        # see vmprof_common.c and assume RPYTHON_LL2CTYPES is defined!
+        f()
+        fileno = _vmprof.stop_sampling()
+        pos = os.lseek(fileno, 0, os.SEEK_CUR)
+        f()
+        pos2 = os.lseek(fileno, 0, os.SEEK_CUR)
+        assert pos == pos2
+        _vmprof.start_sampling()
+        f()
+        fileno = _vmprof.stop_sampling()
+        pos3 = os.lseek(fileno, 0, os.SEEK_CUR)
+        assert pos3 > pos
+        _vmprof.disable()
+
diff --git a/pypy/module/cppyy/test/test_cint.py 
b/pypy/module/cppyy/test/test_cint.py
deleted file mode 100644
--- a/pypy/module/cppyy/test/test_cint.py
+++ /dev/null
@@ -1,710 +0,0 @@
-import py, os, sys
-
-# These tests are for the CINT backend only (they exercise ROOT features
-# and classes that are not loaded/available with the Reflex backend). At
-# some point, these tests are likely covered by the CLang/LLVM backend.
-from pypy.module.cppyy import capi
-if capi.identify() != 'CINT':
-    py.test.skip("backend-specific: CINT-only tests")
-
-# load _cffi_backend early, or its global vars are counted as leaks in the
-# test (note that the module is not otherwise used in the test itself)
-from pypy.module._cffi_backend import newtype
-
-currpath = py.path.local(__file__).dirpath()
-iotypes_dct = str(currpath.join("iotypesDict.so"))
-
-def setup_module(mod):
-    if sys.platform == 'win32':
-        py.test.skip("win32 not supported so far")
-    err = os.system("cd '%s' && make CINT=t iotypesDict.so" % currpath)
-    if err:
-        raise OSError("'make' failed (see stderr)")
-
-class AppTestCINT:
-    spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools'])
-
-    def test01_globals(self):
-        """Test the availability of ROOT globals"""
-
-        import cppyy
-
-        assert cppyy.gbl.gROOT
-        assert cppyy.gbl.gApplication
-        assert cppyy.gbl.gSystem
-        assert cppyy.gbl.TInterpreter.Instance()           # compiled
-        assert cppyy.gbl.TInterpreter                      # interpreted
-        assert cppyy.gbl.TDirectory.CurrentDirectory()     # compiled
-        assert cppyy.gbl.TDirectory                        # interpreted
-
-    def test02_write_access_to_globals(self):
-        """Test overwritability of ROOT globals"""
-
-        import cppyy
-
-        oldval = cppyy.gbl.gDebug
-        assert oldval != 3
-
-        proxy = cppyy.gbl.__class__.__dict__['gDebug']
-        cppyy.gbl.gDebug = 3
-        assert proxy.__get__(proxy, None) == 3
-
-        # this is where this test differs from test03_write_access_to_globals
-        # in test_pythonify.py
-        cppyy.gbl.gROOT.ProcessLine('int gDebugCopy = gDebug;')
-        assert cppyy.gbl.gDebugCopy == 3
-
-        cppyy.gbl.gDebug = oldval
-
-    def test03_create_access_to_globals(self):
-        """Test creation and access of new ROOT globals"""
-
-        import cppyy
-
-        cppyy.gbl.gROOT.ProcessLine('double gMyOwnGlobal = 3.1415')
-        assert cppyy.gbl.gMyOwnGlobal == 3.1415
-
-        proxy = cppyy.gbl.__class__.__dict__['gMyOwnGlobal']
-        assert proxy.__get__(proxy, None) == 3.1415
-
-    def test04_auto_loading(self):
-        """Test auto-loading by retrieving a non-preloaded class"""
-
-        import cppyy
-
-        l = cppyy.gbl.TLorentzVector()
-        assert isinstance(l, cppyy.gbl.TLorentzVector)
-
-    def test05_macro_loading(self):
-        """Test accessibility to macro classes"""
-
-        import cppyy
-
-        loadres = cppyy.gbl.gROOT.LoadMacro('simple_class.C')
-        assert loadres == 0
-
-        base = cppyy.gbl.MySimpleBase
-        simple = cppyy.gbl.MySimpleDerived
-        simple_t = cppyy.gbl.MySimpleDerived_t
-
-        assert issubclass(simple, base)
-        assert simple is simple_t
-
-        c = simple()
-        assert isinstance(c, simple)
-        assert c.m_data == c.get_data()
-
-        c.set_data(13)
-        assert c.m_data == 13
-        assert c.get_data() == 13
-
-
-class AppTestCINTPYTHONIZATIONS:
-    spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools'])
-
-    def test01_strings(self):
-        """Test TString/TObjString compatibility"""
-
-        import cppyy
-
-        pyteststr = "aap noot mies"
-        def test_string(s1, s2):
-            assert len(s1) == len(s2)
-            assert s1 == s1
-            assert s1 == s2
-            assert s1 == str(s1)
-            assert s1 == pyteststr
-            assert s1 != "aap"
-            assert s1 != ""
-            assert s1 < "noot"
-            assert repr(s1) == repr(s2)
-
-        s1 = cppyy.gbl.TString(pyteststr)
-        test_string(s1, pyteststr)
-
-        s3 = cppyy.gbl.TObjString(pyteststr)
-        test_string(s3, pyteststr)
-
-    def test03_TVector(self):
-        """Test TVector2/3/T behavior"""
-
-        import cppyy, math
-
-        N = 51
-
-        # TVectorF is a typedef of floats
-        v = cppyy.gbl.TVectorF(N)
-        for i in range(N):
-            v[i] = i*i
-
-        assert len(v) == N
-        for j in v:
-            assert round(v[int(math.sqrt(j)+0.5)]-j, 5) == 0.
-
-    def test04_TStringTObjString(self):
-        """Test string/TString interchangebility"""
-
-        import cppyy
-
-        test = "aap noot mies"
-
-        s1 = cppyy.gbl.TString(test )
-        s2 = str(s1)
-
-        assert s1 == test
-        assert test == s2
-        assert s1 == s2
-
-        s3 = cppyy.gbl.TObjString(s2)
-        assert s3 == test
-        assert s2 == s3
-
-        # force use of: TNamed(const TString &name, const TString &title)
-        n = cppyy.gbl.TNamed(test, cppyy.gbl.TString("title"))
-        assert n.GetTitle() == "title"
-        assert n.GetName() == test
-
-
-class AppTestCINTTTREE:
-    spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools'])
-
-    def setup_class(cls):
-        cls.w_N = cls.space.newint(5)
-        cls.w_M = cls.space.newint(10)
-        cls.w_fname = cls.space.newtext("test.root")
-        cls.w_tname = cls.space.newtext("test")
-        cls.w_title = cls.space.newtext("test tree")
-        cls.w_iotypes = cls.space.appexec([], """():
-            import cppyy
-            return cppyy.load_reflection_info(%r)""" % (iotypes_dct,))
-
-    def test01_write_stdvector(self):
-        """Test writing of a single branched TTree with an 
std::vector<double>"""
-
-        from cppyy import gbl               # bootstraps, only needed for tests
-        from cppyy.gbl import TFile, TTree
-        from cppyy.gbl.std import vector
-
-        f = TFile(self.fname, "RECREATE")
-        mytree = TTree(self.tname, self.title)
-        mytree._python_owns = False
-
-        v = vector("double")()
-        raises(TypeError, TTree.Branch, None, "mydata", v.__class__.__name__, 
v)
-        raises(TypeError, TTree.Branch, v, "mydata", v.__class__.__name__, v)
-
-        mytree.Branch("mydata", v.__class__.__name__, v)
-
-        for i in range(self.N):
-            for j in range(self.M):
-                v.push_back(i*self.M+j)
-            mytree.Fill()
-            v.clear()
-        f.Write()
-        f.Close()
-
-    def test02_file_open(self):
-
-        from cppyy import gbl
-
-        f = gbl.TFile.Open(self.fname)
-        s = str(f)            # should not raise
-        r = repr(f)
-
-        f.Close()
-
-    def test03_read_stdvector(self):
-        """Test reading of a single branched TTree with an 
std::vector<double>"""
-
-        from cppyy import gbl
-        from cppyy.gbl import TFile
-
-        f = TFile(self.fname)
-        mytree = f.Get(self.tname)
-
-        i = 0
-        for event in mytree:
-            assert len(event.mydata) == self.M
-            for entry in event.mydata:
-                assert i == int(entry)
-                i += 1
-        assert i == self.N * self.M
-
-        f.Close()
-
-    def test04_write_some_data_object(self):
-        """Test writing of a complex data object"""
-
-        from cppyy import gbl
-        from cppyy.gbl import TFile, TTree, IO
-        from cppyy.gbl.IO import SomeDataObject
-
-        f = TFile(self.fname, "RECREATE")
-        mytree = TTree(self.tname, self.title)
-
-        d = SomeDataObject()
-        b = mytree.Branch("data", d)
-        mytree._python_owns = False
-        assert b
-
-        for i in range(self.N):
-            for j in range(self.M):
-                d.add_float(i*self.M+j)
-            d.add_tuple(d.get_floats())
-
-            mytree.Fill()
-
-        f.Write()
-        f.Close()
-
-    def test05_read_some_data_object(self):
-        """Test reading of a complex data object"""
-
-        from cppyy import gbl
-        from cppyy.gbl import TFile
-
-        f = TFile(self.fname)
-        mytree = f.Get(self.tname)
-
-        j = 1
-        for event in mytree:
-            i = 0
-            assert len(event.data.get_floats()) == j*self.M
-            for entry in event.data.get_floats():
-                assert i == int(entry)
-                i += 1
-
-            k = 1
-            assert len(event.data.get_tuples()) == j
-            for mytuple in event.data.get_tuples():
-                i = 0
-                assert len(mytuple) == k*self.M
-                for entry in mytuple:
-                    assert i == int(entry)
-                    i += 1
-                k += 1
-            j += 1
-        assert j-1 == self.N
-        #
-        f.Close()
-
-    def test06_branch_activation(self):
-        """Test of automatic branch activation"""
-
-        from cppyy import gbl
-        from cppyy.gbl import TFile, TTree
-        from cppyy.gbl.std import vector
-
-        L = 5
-
-        # writing
-        f = TFile(self.fname, "RECREATE")
-        mytree = TTree(self.tname, self.title)
-        mytree._python_owns = False
-
-        for i in range(L):
-            v = vector("double")()
-            mytree.Branch("mydata_%d"%i, v.__class__.__name__, v)
-            mytree.__dict__["v_%d"%i] = v
-
-        for i in range(self.N):
-            for k in range(L):
-                v = mytree.__dict__["v_%d"%k]
-                for j in range(self.M):
-                    mytree.__dict__["v_%d"%k].push_back(i*self.M+j*L+k)
-            mytree.Fill()
-            for k in range(L):
-                v = mytree.__dict__["v_%d"%k]
-                v.clear()
-        f.Write()
-        f.Close()
-
-        del mytree, f
-        import gc
-        gc.collect()
-
-        # reading
-        f = TFile(self.fname)
-        mytree = f.Get(self.tname)
-
-        # force (initial) disabling of all branches
-        mytree.SetBranchStatus("*",0);
-
-        i = 0
-        for event in mytree:
-            for k in range(L):
-                j = 0
-                data = getattr(mytree, "mydata_%d"%k)
-                assert len(data) == self.M
-                for entry in data:
-                    assert entry == i*self.M+j*L+k
-                    j += 1
-                assert j == self.M
-            i += 1
-        assert i == self.N
-
-        f.Close()
-
-    def test07_write_builtin(self):
-        """Test writing of builtins"""
-
-        from cppyy import gbl               # bootstraps, only needed for tests
-        from cppyy.gbl import TFile, TTree
-        from cppyy.gbl.std import vector
-
-        f = TFile(self.fname, "RECREATE")
-        mytree = TTree(self.tname, self.title)
-        mytree._python_owns = False
-
-        import array
-        mytree.ba = array.array('c', [chr(0)])
-        mytree.ia = array.array('i', [0])
-        mytree.da = array.array('d', [0.])
-
-        mytree.Branch("my_bool",   mytree.ba, "my_bool/O")
-        mytree.Branch("my_int",    mytree.ia, "my_int/I")
-        mytree.Branch("my_int2",   mytree.ia, "my_int2/I")
-        mytree.Branch("my_double", mytree.da, "my_double/D")
-
-        for i in range(self.N):
-            # make sure value is different from default (0)
-            mytree.ba[0] = i%2 and chr(0) or chr(1)
-            mytree.ia[0] = i+1
-            mytree.da[0] = (i+1)/2.
-            mytree.Fill()
-        f.Write()
-        f.Close()
-
-    def test08_read_builtin(self):
-        """Test reading of builtins"""
-
-        from cppyy import gbl
-        from cppyy.gbl import TFile
-
-        f = TFile(self.fname)
-        mytree = f.Get(self.tname)
-
-        raises(AttributeError, getattr, mytree, "does_not_exist")
-
-        i = 1
-        for event in mytree:
-            assert event.my_bool   == (i-1)%2 and 0 or 1
-            assert event.my_int    == i
-            assert event.my_double == i/2.
-            i += 1
-        assert (i-1) == self.N
-
-        f.Close()
-
-    def test09_user_read_builtin(self):
-        """Test user-directed reading of builtins"""
-
-        from cppyy import gbl
-        from cppyy.gbl import TFile
-
-        f = TFile(self.fname)
-        mytree = f.Get(self.tname)
-
-        # note, this is an old, annoted tree from test08
-        for i in range(3, mytree.GetEntriesFast()):
-            mytree.GetEntry(i)
-            assert mytree.my_int  == i+1
-            assert mytree.my_int2 == i+1
-
-        f.Close()
-
-class AppTestCINTREGRESSION:
-    spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools'])
-
-    # these are tests that at some point in the past resulted in failures on
-    # PyROOT; kept here to confirm no regression from PyROOT
-
-    def test01_regression(self):
-        """TPaveText::AddText() used to result in KeyError"""
-
-        # This is where the original problem was discovered, and the test is
-        # left in. However, the detailed underlying problem, as well as the
-        # solution to it, is tested in test_fragile.py
-
-        from cppyy import gbl
-        from cppyy.gbl import TPaveText
-
-        hello = TPaveText( .1, .8, .9, .97 )
-        hello.AddText( 'Hello, World!' )
-
-
-class AppTestCINTFUNCTION:
-    spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools'])
-    _pypytest_leaks = None   # TODO: figure out the false positives
-
-    # test the function callbacks; this does not work with Reflex, as it can
-    # not generate functions on the fly (it might with cffi?)
-
-    @py.test.mark.dont_track_allocations("TODO: understand; initialization 
left-over?")
-    def test01_global_function_callback(self):
-        """Test callback of a python global function"""
-
-        import cppyy, gc
-        TF1 = cppyy.gbl.TF1
-
-        def identity(x):
-            return x[0]
-
-        f = TF1("pyf1", identity, -1., 1., 0)
-
-        assert f.Eval(0.5)  == 0.5
-        assert f.Eval(-10.) == -10.
-        assert f.Eval(1.0)  == 1.0
-
-        # check proper propagation of default value
-        f = TF1("pyf1d", identity, -1., 1.)
-
-        assert f.Eval(0.5) == 0.5
-
-        del f      # force here, to prevent leak-check complaints
-        gc.collect()
-
-    def test02_callable_object_callback(self):
-        """Test callback of a python callable object"""
-
-        import cppyy, gc
-        TF1 = cppyy.gbl.TF1
-
-        class Linear:
-            def __call__(self, x, par):
-                return par[0] + x[0]*par[1]
-
-        f = TF1("pyf2", Linear(), -1., 1., 2)
-        f.SetParameters(5., 2.)
-
-        assert f.Eval(-0.1) == 4.8
-        assert f.Eval(1.3)  == 7.6
-
-        del f      # force here, to prevent leak-check complaints
-        gc.collect()
-
-    def test03_fit_with_python_gaussian(self):
-        """Test fitting with a python global function"""
-
-        # note: this function is dread-fully slow when running testing 
un-translated
-
-        import cppyy, gc, math
-        TF1, TH1F = cppyy.gbl.TF1, cppyy.gbl.TH1F
-
-        def pygaus(x, par):
-            arg1 = 0
-            scale1 = 0
-            ddx = 0.01
-
-            if (par[2] != 0.0):
-                arg1 = (x[0]-par[1])/par[2]
-                scale1 = (ddx*0.39894228)/par[2]
-                h1 = par[0]/(1+par[3])
-
-                gauss = h1*scale1*math.exp(-0.5*arg1*arg1)
-            else:
-                gauss = 0.
-            return gauss
-
-        f = TF1("pygaus", pygaus, -4, 4, 4)
-        f.SetParameters(600, 0.43, 0.35, 600)
-
-        h = TH1F("h", "test", 100, -4, 4)
-        h.FillRandom("gaus", 200000)
-        h.Fit(f, "0Q")
-
-        assert f.GetNDF() == 96
-        result = f.GetParameters()
-        assert round(result[1] - 0., 1) == 0  # mean
-        assert round(result[2] - 1., 1) == 0  # s.d.
-
-        del f      # force here, to prevent leak-check complaints
-        gc.collect()
-
-
-class AppTestSURPLUS:
-    spaceconfig = dict(usemodules=['cppyy', '_rawffi', 'itertools'])
-
-    # these are tests that were historically exercised on ROOT classes and
-    # have twins on custom classes; kept here just in case differences crop
-    # up between the ROOT classes and the custom ones
-
-    def test01_class_enum(self):
-        """Test class enum access and values"""
-
-        import cppyy
-        TObject = cppyy.gbl.TObject
-        gROOT = cppyy.gbl.gROOT
-
-        assert TObject.kBitMask    == gROOT.ProcessLine("return 
TObject::kBitMask;")
-        assert TObject.kIsOnHeap   == gROOT.ProcessLine("return 
TObject::kIsOnHeap;")
-        assert TObject.kNotDeleted == gROOT.ProcessLine("return 
TObject::kNotDeleted;")
-        assert TObject.kZombie     == gROOT.ProcessLine("return 
TObject::kZombie;")
-
-        t = TObject()
-
-        assert TObject.kBitMask    == t.kBitMask
-        assert TObject.kIsOnHeap   == t.kIsOnHeap
-        assert TObject.kNotDeleted == t.kNotDeleted
-        assert TObject.kZombie     == t.kZombie
-
-    def test02_global_enum(self):
-        """Test global enums access and values"""
-
-        import cppyy
-        from cppyy import gbl
-
-        assert gbl.kRed   == gbl.gROOT.ProcessLine("return kRed;")
-        assert gbl.kGreen == gbl.gROOT.ProcessLine("return kGreen;")
-        assert gbl.kBlue  == gbl.gROOT.ProcessLine("return kBlue;")
-
-    def test03_copy_contructor(self):
-        """Test copy constructor"""
-
-        import cppyy
-        TLorentzVector = cppyy.gbl.TLorentzVector
-
-        t1 = TLorentzVector(1., 2., 3., -4.)
-        t2 = TLorentzVector(0., 0., 0.,  0.)
-        t3 = TLorentzVector(t1)
-
-        assert t1 == t3
-        assert t1 != t2
-
-        for i in range(4):
-            assert t1[i] == t3[i]
-
-    def test04_object_validity(self):
-        """Test object validity checking"""
-
-        import cppyy
-
-        t1 = cppyy.gbl.TObject()
-
-        assert t1
-        assert not not t1
-
-        t2 = cppyy.gbl.gROOT.FindObject("Nah, I don't exist")
-
-        assert not t2
-
-    def test05_element_access(self):
-        """Test access to elements in matrix and array objects."""
-
-        from cppyy import gbl
-
-        N = 3
-        v = gbl.TVectorF(N)
-        m = gbl.TMatrixD(N, N)
-
-        for i in range(N):
-            assert v[i] == 0.0
-
-            for j in range(N):
-                assert m[i][j] == 0.0
-
-    def test06_static_function_call( self ):
-        """Test call to static function."""
-
-        import cppyy
-        TROOT, gROOT = cppyy.gbl.TROOT, cppyy.gbl.gROOT
-
-        c1 = TROOT.Class()
-        assert not not c1
-
-        c2 = gROOT.Class()
-
-        assert c1 == c2
-
-        old = gROOT.GetDirLevel()
-        TROOT.SetDirLevel(2)
-        assert 2 == gROOT.GetDirLevel()
-        gROOT.SetDirLevel(old)
-
-        old = TROOT.GetDirLevel()
-        gROOT.SetDirLevel(3)
-        assert 3 == TROOT.GetDirLevel()
-        TROOT.SetDirLevel(old)
-
-    def test07_macro(self):
-        """Test access to cpp macro's"""
-
-        from cppyy import gbl
-
-        assert gbl.NULL == 0
-
-        gbl.gROOT.ProcessLine('#define aap "aap"')
-        gbl.gROOT.ProcessLine('#define noot 1')
-        gbl.gROOT.ProcessLine('#define mies 2.0')
-
-        # TODO: macro's assumed to always be of long type ...
-        #assert gbl.aap  == "aap"
-        assert gbl.noot == 1
-        #assert gbl.mies == 2.0
-
-    def test08_opaque_pointer_passing(self):
-        """Test passing around of opaque pointers"""
-
-        import cppyy
-
-        # TODO: figure out CObject (see also test_advanced.py)
-
-        s = cppyy.gbl.TString("Hello World!")
-        #cobj = cppyy.as_cobject(s)
-        addr = cppyy.addressof(s)
-
-        #assert s == cppyy.bind_object(cobj, s.__class__)
-        #assert s == cppyy.bind_object(cobj, "TString")
-        assert s == cppyy.bind_object(addr, s.__class__)
-        assert s == cppyy.bind_object(addr, "TString")
-
-    def test09_object_and_pointer_comparisons(self):
-        """Verify object and pointer comparisons"""
-
-        import cppyy
-        gbl = cppyy.gbl
-
-        c1 = cppyy.bind_object(0, gbl.TCanvas)
-        assert c1 == None
-        assert None == c1
-
-        c2 = cppyy.bind_object(0, gbl.TCanvas)
-        assert c1 == c2
-        assert c2 == c1
-
-        # TLorentzVector overrides operator==
-        l1 = cppyy.bind_object(0, gbl.TLorentzVector)
-        assert l1 == None
-        assert None == l1
-
-        assert c1 != l1
-        assert l1 != c1
-
-        l2 = cppyy.bind_object(0, gbl.TLorentzVector)
-        assert l1 == l2
-        assert l2 == l1 
-
-        l3 = gbl.TLorentzVector(1, 2, 3, 4)
-        l4 = gbl.TLorentzVector(1, 2, 3, 4)
-        l5 = gbl.TLorentzVector(4, 3, 2, 1)
-        assert l3 == l4
-        assert l4 == l3
-
-        assert l3 != None                 # like this to ensure __ne__ is 
called
-        assert None != l3                 # id.
-        assert l3 != l5
-        assert l5 != l3
-
-    def test10_recursive_remove(self):
-        """Verify that objects are recursively removed when destroyed"""
-
-        import cppyy
-
-        c = cppyy.gbl.TClass.GetClass("TObject")
-
-        o = cppyy.gbl.TObject()
-        assert o
-
-        o.SetBit(cppyy.gbl.TObject.kMustCleanup)
-        c.Destructor(o)
-        assert not o
diff --git a/pypy/module/cpyext/test/test_api.py 
b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -64,14 +64,7 @@
         except OperationError as e:
             print e.errorstr(self.space)
             raise
-
-        try:
-            self.space.getexecutioncontext().cleanup_cpyext_state()
-        except AttributeError:
-            pass
-
-        if self.check_and_print_leaks():
-            assert False, "Test leaks or loses object(s)."
+        self.cleanup()
 
 @slot_function([PyObject], lltype.Void)
 def PyPy_GetWrapped(space, w_arg):
diff --git a/pypy/module/cpyext/test/test_cpyext.py 
b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -1,16 +1,12 @@
 import sys
-import weakref
 
 import pytest
 
-from pypy.tool.cpyext.extbuild import (
-    SystemCompilationInfo, HERE, get_sys_info_app)
+from pypy.tool.cpyext.extbuild import SystemCompilationInfo, HERE
 from pypy.interpreter.gateway import unwrap_spec, interp2app
-from rpython.rtyper.lltypesystem import lltype, ll2ctypes
+from rpython.rtyper.lltypesystem import lltype
 from pypy.module.cpyext import api
 from pypy.module.cpyext.state import State
-from pypy.module.cpyext.pyobject import Py_DecRef
-from rpython.tool.identity_dict import identity_dict
 from rpython.tool import leakfinder
 from rpython.rlib import rawrefcount
 from rpython.tool.udir import udir
@@ -76,13 +72,6 @@
 
 def freeze_refcnts(self):
     rawrefcount._dont_free_any_more()
-    return #ZZZ
-    state = self.space.fromcache(RefcountState)
-    self.frozen_refcounts = {}
-    for w_obj, obj in state.py_objects_w2r.iteritems():
-        self.frozen_refcounts[w_obj] = obj.c_ob_refcnt
-    #state.print_refcounts()
-    self.frozen_ll2callocations = set(ll2ctypes.ALLOCATED.values())
 
 class LeakCheckingTest(object):
     """Base class for all cpyext tests."""
@@ -91,78 +80,13 @@
                                    'micronumpy', 'mmap'
                                    ])
 
-    enable_leak_checking = True
+    def cleanup(self):
+        self.space.getexecutioncontext().cleanup_cpyext_state()
+        rawrefcount._collect()
+        self.space.user_del_action._run_finalizers()
+        leakfinder.stop_tracking_allocations(check=False)
+        assert not self.space.finalizer_queue.next_dead()
 
-    @staticmethod
-    def cleanup_references(space):
-        return #ZZZ
-        state = space.fromcache(RefcountState)
-
-        import gc; gc.collect()
-        # Clear all lifelines, objects won't resurrect
-        for w_obj, obj in state.lifeline_dict._dict.items():
-            if w_obj not in state.py_objects_w2r:
-                state.lifeline_dict.set(w_obj, None)
-            del obj
-        import gc; gc.collect()
-
-
-        for w_obj in state.non_heaptypes_w:
-            Py_DecRef(space, w_obj)
-        state.non_heaptypes_w[:] = []
-        state.reset_borrowed_references()
-
-    def check_and_print_leaks(self):
-        rawrefcount._collect()
-        # check for sane refcnts
-        import gc
-
-        if 1:  #ZZZ  not self.enable_leak_checking:
-            leakfinder.stop_tracking_allocations(check=False)
-            return False
-
-        leaking = False
-        state = self.space.fromcache(RefcountState)
-        gc.collect()
-        lost_objects_w = identity_dict()
-        lost_objects_w.update((key, None) for key in 
self.frozen_refcounts.keys())
-
-        for w_obj, obj in state.py_objects_w2r.iteritems():
-            base_refcnt = self.frozen_refcounts.get(w_obj)
-            delta = obj.c_ob_refcnt
-            if base_refcnt is not None:
-                delta -= base_refcnt
-                lost_objects_w.pop(w_obj)
-            if delta != 0:
-                leaking = True
-                print >>sys.stderr, "Leaking %r: %i references" % (w_obj, 
delta)
-                try:
-                    weakref.ref(w_obj)
-                except TypeError:
-                    lifeline = None
-                else:
-                    lifeline = state.lifeline_dict.get(w_obj)
-                if lifeline is not None:
-                    refcnt = lifeline.pyo.c_ob_refcnt
-                    if refcnt > 0:
-                        print >>sys.stderr, "\tThe object also held by C code."
-                    else:
-                        referrers_repr = []
-                        for o in gc.get_referrers(w_obj):
-                            try:
-                                repr_str = repr(o)
-                            except TypeError as e:
-                                repr_str = "%s (type of o is %s)" % (str(e), 
type(o))
-                            referrers_repr.append(repr_str)
-                        referrers = ", ".join(referrers_repr)
-                        print >>sys.stderr, "\tThe object is referenced by 
these objects:", \
-                                referrers
-        for w_obj in lost_objects_w:
-            print >>sys.stderr, "Lost object %r" % (w_obj, )
-            leaking = True
-        # the actual low-level leak checking is done by pypy.tool.leakfinder,
-        # enabled automatically by pypy.conftest.
-        return leaking
 
 class AppTestApi(LeakCheckingTest):
     def setup_class(cls):
@@ -179,15 +103,7 @@
     def teardown_method(self, meth):
         if self.runappdirect:
             return
-        self.space.getexecutioncontext().cleanup_cpyext_state()
-        self.cleanup_references(self.space)
-        # XXX: like AppTestCpythonExtensionBase.teardown_method:
-        # find out how to disable check_and_print_leaks() if the
-        # test failed
-        assert not self.check_and_print_leaks(), (
-            "Test leaks or loses object(s).  You should also check if "
-            "the test actually passed in the first place; if it failed "
-            "it is likely to reach this place.")
+        self.cleanup()
 
     @pytest.mark.skipif(only_pypy, reason='pypy only test')
     def test_only_import(self):
@@ -355,7 +271,6 @@
         self.space.call_method(self.space.sys.get("stdout"), "flush")
 
         freeze_refcnts(self)
-        #self.check_and_print_leaks()
 
     def unimport_module(self, name):
         """
@@ -367,17 +282,12 @@
 
     def teardown_method(self, func):
         if self.runappdirect:
+            self.w_debug_collect()
             return
+        debug_collect(self.space)
         for name in self.imported_module_names:
             self.unimport_module(name)
-        self.space.getexecutioncontext().cleanup_cpyext_state()
-        self.cleanup_references(self.space)
-        # XXX: find out how to disable check_and_print_leaks() if the
-        # test failed...
-        assert not self.check_and_print_leaks(), (
-            "Test leaks or loses object(s).  You should also check if "
-            "the test actually passed in the first place; if it failed "
-            "it is likely to reach this place.")
+        self.cleanup()
 
 
 class AppTestCpythonExtension(AppTestCpythonExtensionBase):
@@ -415,7 +325,6 @@
 
 
     def test_export_docstring(self):
-        import sys
         init = """
         if (Py_IsInitialized())
             Py_InitModule("foo", methods);
@@ -534,7 +443,6 @@
 
 
     def test_export_function2(self):
-        import sys
         init = """
         if (Py_IsInitialized())
             Py_InitModule("foo", methods);
diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py
--- a/pypy/module/gc/referents.py
+++ b/pypy/module/gc/referents.py
@@ -47,57 +47,6 @@
 
 # ____________________________________________________________
 
-class PathEntry(object):
-    # PathEntries are nodes of a complete tree of all objects, but
-    # built lazily (there is only one branch alive at any time).
-    # Each node has a 'gcref' and the list of referents from this gcref.
-    def __init__(self, prev, gcref, referents):
-        self.prev = prev
-        self.gcref = gcref
-        self.referents = referents
-        self.remaining = len(referents)
-
-    def get_most_recent_w_obj(self):
-        entry = self
-        while entry is not None:
-            if entry.gcref:
-                w_obj = try_cast_gcref_to_w_root(entry.gcref)
-                if w_obj is not None:
-                    return w_obj
-            entry = entry.prev
-        return None
-
-def do_get_referrers(w_arg):
-    result_w = []
-    gcarg = rgc.cast_instance_to_gcref(w_arg)
-    roots = [gcref for gcref in rgc.get_rpy_roots() if gcref]
-    head = PathEntry(None, rgc.NULL_GCREF, roots)
-    while True:
-        head.remaining -= 1
-        if head.remaining >= 0:
-            gcref = head.referents[head.remaining]
-            if not rgc.get_gcflag_extra(gcref):
-                # not visited so far
-                if gcref == gcarg:
-                    w_obj = head.get_most_recent_w_obj()
-                    if w_obj is not None:
-                        result_w.append(w_obj)   # found!
-                        rgc.toggle_gcflag_extra(gcref)  # toggle twice
-                rgc.toggle_gcflag_extra(gcref)
-                head = PathEntry(head, gcref, rgc.get_rpy_referents(gcref))
-        else:
-            # no more referents to visit
-            head = head.prev
-            if head is None:
-                break
-    # done.  Clear flags carefully
-    rgc.toggle_gcflag_extra(gcarg)
-    rgc.clear_gcflag_extra(roots)
-    rgc.clear_gcflag_extra([gcarg])
-    return result_w
-
-# ____________________________________________________________
-
 def _list_w_obj_referents(gcref, result_w):
     # Get all W_Root reachable directly from gcref, and add them to
     # the list 'result_w'.
@@ -184,9 +133,22 @@
     """Return the list of objects that directly refer to any of objs."""
     if not rgc.has_gcflag_extra():
         raise missing_operation(space)
+    # xxx uses a lot of memory to make the list of all W_Root objects,
+    # but it's simpler this way and more correct than the previous
+    # version of this code (issue #2612).  It is potentially very slow
+    # because each of the n calls to _list_w_obj_referents() could take
+    # O(n) time as well, in theory, but I hope in practice the whole
+    # thing takes much less than O(n^2).  We could re-add an algorithm
+    # that visits most objects only once, if needed...
+    all_objects_w = rgc.do_get_objects(try_cast_gcref_to_w_root)
     result_w = []
-    for w_arg in args_w:
-        result_w += do_get_referrers(w_arg)
+    for w_obj in all_objects_w:
+        refs_w = []
+        gcref = rgc.cast_instance_to_gcref(w_obj)
+        _list_w_obj_referents(gcref, refs_w)
+        for w_arg in args_w:
+            if w_arg in refs_w:
+                result_w.append(w_obj)
     rgc.assert_no_more_gcflags()
     return space.newlist(result_w)
 
diff --git a/pypy/module/gc/test/test_referents.py 
b/pypy/module/gc/test/test_referents.py
--- a/pypy/module/gc/test/test_referents.py
+++ b/pypy/module/gc/test/test_referents.py
@@ -116,3 +116,37 @@
                 break   # found
         else:
             assert 0, "the tuple (7,) is not found as gc.get_referrers(7)"
+
+
+class AppTestReferentsMore(object):
+
+    def setup_class(cls):
+        from rpython.rlib import rgc
+        cls._backup = [rgc.get_rpy_roots]
+        l4 = cls.space.newlist([])
+        cls.ALL_ROOTS = [l4]
+        cls.w_ALL_ROOTS = cls.space.newlist(cls.ALL_ROOTS)
+        rgc.get_rpy_roots = lambda: (
+            map(rgc._GcRef, cls.ALL_ROOTS) + [rgc.NULL_GCREF]*2)
+        cls.w_runappdirect = cls.space.wrap(option.runappdirect)
+
+    def teardown_class(cls):
+        from rpython.rlib import rgc
+        rgc.get_rpy_roots = cls._backup[0]
+
+    def test_get_referrers(self):
+        import gc
+        class A(object):
+            pass
+        a = A()
+        if not self.runappdirect:
+            l4 = self.ALL_ROOTS[0]
+            l4.append(a)              # add 'a' to the list which is in roots
+        lst = gc.get_referrers(A)
+        assert a in lst
+        lst = gc.get_referrers(A)
+        assert a in lst
+        lst = gc.get_referrers(A)
+        assert a in lst
+        lst = gc.get_referrers(A)
+        assert a in lst
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py 
b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py
@@ -1450,20 +1450,30 @@
         py.test.skip("_Bool not in MSVC")
     ffi = FFI()
     ffi.cdef("struct foo_s { _Bool x; };"
-             "_Bool foo(_Bool);")
+             "_Bool foo(_Bool); _Bool (*foop)(_Bool);")
     lib = ffi.verify("""
         struct foo_s { _Bool x; };
         int foo(int arg) {
             return !arg;
         }
+        _Bool _foofunc(_Bool x) {
+            return !x;
+        }
+        _Bool (*foop)(_Bool) = _foofunc;
     """)
     p = ffi.new("struct foo_s *")
     p.x = 1
-    assert p.x == 1
+    assert p.x is True
     py.test.raises(OverflowError, "p.x = -1")
     py.test.raises(TypeError, "p.x = 0.0")
-    assert lib.foo(1) == 0
-    assert lib.foo(0) == 1
+    assert lib.foop(1) is False
+    assert lib.foop(True) is False
+    assert lib.foop(0) is True
+    py.test.raises(OverflowError, lib.foop, 42)
+    py.test.raises(TypeError, lib.foop, 0.0)
+    assert lib.foo(1) is False
+    assert lib.foo(True) is False
+    assert lib.foo(0) is True
     py.test.raises(OverflowError, lib.foo, 42)
     py.test.raises(TypeError, lib.foo, 0.0)
     assert int(ffi.cast("_Bool", long(1))) == 1
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py 
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1939,7 +1939,7 @@
     ffi = FFI()
     ffi.cdef("bool f(void);")
     lib = verify(ffi, "test_bool_in_cpp", "char f(void) { return 2; }")
-    assert lib.f() == 1
+    assert lib.f() is True
 
 def test_bool_in_cpp_2():
     ffi = FFI()
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py 
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py
@@ -1419,20 +1419,30 @@
         py.test.skip("_Bool not in MSVC")
     ffi = FFI()
     ffi.cdef("struct foo_s { _Bool x; };"
-             "_Bool foo(_Bool);")
+             "_Bool foo(_Bool); _Bool (*foop)(_Bool);")
     lib = ffi.verify("""
         struct foo_s { _Bool x; };
         int foo(int arg) {
             return !arg;
         }
+        _Bool _foofunc(_Bool x) {
+            return !x;
+        }
+        _Bool (*foop)(_Bool) = _foofunc;
     """)
     p = ffi.new("struct foo_s *")
     p.x = 1
-    assert p.x == 1
+    assert p.x is True
     py.test.raises(OverflowError, "p.x = -1")
     py.test.raises(TypeError, "p.x = 0.0")
-    assert lib.foo(1) == 0
-    assert lib.foo(0) == 1
+    assert lib.foop(1) is False
+    assert lib.foop(True) is False
+    assert lib.foop(0) is True
+    py.test.raises(OverflowError, lib.foop, 42)
+    py.test.raises(TypeError, lib.foop, 0.0)
+    assert lib.foo(1) is False
+    assert lib.foo(True) is False
+    assert lib.foo(0) is True
     py.test.raises(OverflowError, lib.foo, 42)
     py.test.raises(TypeError, lib.foo, 0.0)
     assert int(ffi.cast("_Bool", long(1))) == 1
diff --git a/pypy/objspace/std/bytearrayobject.py 
b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -567,13 +567,38 @@
             raise
     else:
         return list(buf.as_str())
+    return _from_byte_sequence(space, w_source)
 
-    # sequence of bytes
+def _get_printable_location(w_type):
+    return ('bytearray_from_byte_sequence [w_type=%s]' %
+            w_type.getname(w_type.space))
+
+_byteseq_jitdriver = jit.JitDriver(
+    name='bytearray_from_byte_sequence',
+    greens=['w_type'],
+    reds=['w_iter', 'data'],
+    get_printable_location=_get_printable_location)
+
+def _from_byte_sequence(space, w_source):
+    # Split off in a separate function for the JIT's benefit
+    # and add a jitdriver with the type of w_iter as the green key
     w_iter = space.iter(w_source)
     length_hint = space.length_hint(w_source, 0)
     data = newlist_hint(length_hint)
-    extended = 0
+    #
+    _from_byte_sequence_loop(space, w_iter, data)
+    #
+    extended = len(data)
+    if extended < length_hint:
+        resizelist_hint(data, extended)
+    return data
+
+def _from_byte_sequence_loop(space, w_iter, data):
+    w_type = space.type(w_iter)
     while True:
+        _byteseq_jitdriver.jit_merge_point(w_type=w_type,
+                                           w_iter=w_iter,
+                                           data=data)
         try:
             w_item = space.next(w_iter)
         except OperationError as e:
@@ -581,10 +606,6 @@
                 raise
             break
         data.append(space.byte_w(w_item))
-        extended += 1
-    if extended < length_hint:
-        resizelist_hint(data, extended)
-    return data
 
 
 def _hex_digit_to_int(d):
diff --git a/pypy/objspace/std/test/test_bytearrayobject.py 
b/pypy/objspace/std/test/test_bytearrayobject.py
--- a/pypy/objspace/std/test/test_bytearrayobject.py
+++ b/pypy/objspace/std/test/test_bytearrayobject.py
@@ -448,6 +448,13 @@
         raises(TypeError, b.extend, [object()])
         raises(TypeError, b.extend, u"unicode")
 
+    def test_extend_calls_len_or_lengthhint(self):
+        class BadLen(object):
+            def __iter__(self): return iter(range(10))
+            def __len__(self): raise RuntimeError('hello')
+        b = bytearray()
+        raises(RuntimeError, b.extend, BadLen())
+
     def test_setitem_from_front(self):
         b = bytearray(b'abcdefghij')
         b[:2] = b''
diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py
--- a/pypy/objspace/std/util.py
+++ b/pypy/objspace/std/util.py
@@ -8,7 +8,7 @@
 IDTAG_LONG    = 3
 IDTAG_FLOAT   = 5
 IDTAG_COMPLEX = 7
-IDTAG_METHOD  = 9
+IDTAG_UNBOUND_METHOD = 9
 IDTAG_SPECIAL = 11    # -1 - (-maxunicode-1): unichar
                       # 0 - 255: char
                       # 256: empty string
diff --git a/rpython/config/support.py b/rpython/config/support.py
--- a/rpython/config/support.py
+++ b/rpython/config/support.py
@@ -35,3 +35,15 @@
         return int(count)
     except (OSError, ValueError):
         return 1
+
+def detect_pax():
+    """
+    Function to determine if your system comes with PAX protection.
+    """
+    if sys.platform.startswith('linux'):
+        # we need a running process PID and 1 is always running
+        with open("/proc/1/status") as fd:
+            data = fd.read()
+        if 'PaX' in data:
+            return True
+    return False
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -2308,6 +2308,7 @@
                 ll_assert(not (self.probably_young_objects_with_finalizers
                                .non_empty()),
                     "probably_young_objects_with_finalizers should be empty")
+                self.kept_alive_by_finalizer = r_uint(0)
                 if self.old_objects_with_finalizers.non_empty():
                     self.deal_with_objects_with_finalizers()
                 elif self.old_objects_with_weakrefs.non_empty():
@@ -2380,6 +2381,9 @@
                 # we currently have -- but no more than 'max_delta' more than
                 # we currently have.
                 total_memory_used = float(self.get_total_memory_used())
+                total_memory_used -= float(self.kept_alive_by_finalizer)
+                if total_memory_used < 0:
+                    total_memory_used = 0
                 bounded = self.set_major_threshold_from(
                     min(total_memory_used * self.major_collection_threshold,
                         total_memory_used + self.max_delta),
@@ -2418,7 +2422,7 @@
             self.execute_finalizers()
             #END FINALIZING
         else:
-            pass #XXX which exception to raise here. Should be unreachable.
+            ll_assert(False, "bogus gc_state")
 
         debug_print("stopping, now in gc state: ", GC_STATES[self.gc_state])
         debug_stop("gc-collect-step")
@@ -2784,8 +2788,17 @@
     def _bump_finalization_state_from_0_to_1(self, obj):
         ll_assert(self._finalization_state(obj) == 0,
                   "unexpected finalization state != 0")
+        size_gc_header = self.gcheaderbuilder.size_gc_header
+        totalsize = size_gc_header + self.get_size(obj)
         hdr = self.header(obj)
         hdr.tid |= GCFLAG_FINALIZATION_ORDERING
+        # A bit hackish, but we will not count these objects as "alive"
+        # for the purpose of computing when the next major GC should
+        # occur.  This is done for issue #2590: without this, if we
+        # allocate mostly objects with finalizers, the
+        # next_major_collection_threshold grows forever and actual
+        # memory usage is not bounded.
+        self.kept_alive_by_finalizer += raw_malloc_usage(totalsize)
 
     def _recursively_bump_finalization_state_from_2_to_3(self, obj):
         ll_assert(self._finalization_state(obj) == 2,
diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py
--- a/rpython/memory/gc/minimark.py
+++ b/rpython/memory/gc/minimark.py
@@ -1636,6 +1636,7 @@
         # with a finalizer and all objects reachable from there (and also
         # moves some objects from 'objects_with_finalizers' to
         # 'run_finalizers').
+        self.kept_alive_by_finalizer = r_uint(0)
         if self.old_objects_with_finalizers.non_empty():
             self.deal_with_objects_with_finalizers()
         #
@@ -1678,6 +1679,9 @@
         # we currently have -- but no more than 'max_delta' more than
         # we currently have.
         total_memory_used = float(self.get_total_memory_used())
+        total_memory_used -= float(self.kept_alive_by_finalizer)
+        if total_memory_used < 0:
+            total_memory_used = 0
         bounded = self.set_major_threshold_from(
             min(total_memory_used * self.major_collection_threshold,
                 total_memory_used + self.max_delta),
@@ -1999,8 +2003,11 @@
     def _bump_finalization_state_from_0_to_1(self, obj):
         ll_assert(self._finalization_state(obj) == 0,
                   "unexpected finalization state != 0")
+        size_gc_header = self.gcheaderbuilder.size_gc_header
+        totalsize = size_gc_header + self.get_size(obj)
         hdr = self.header(obj)
         hdr.tid |= GCFLAG_FINALIZATION_ORDERING
+        self.kept_alive_by_finalizer += raw_malloc_usage(totalsize)
 
     def _recursively_bump_finalization_state_from_2_to_3(self, obj):
         ll_assert(self._finalization_state(obj) == 2,
diff --git a/rpython/memory/test/test_minimark_gc.py 
b/rpython/memory/test/test_minimark_gc.py
--- a/rpython/memory/test/test_minimark_gc.py
+++ b/rpython/memory/test/test_minimark_gc.py
@@ -1,3 +1,4 @@
+from rpython.rlib import rgc
 from rpython.rlib.rarithmetic import LONG_BIT
 
 from rpython.memory.test import test_semispace_gc
@@ -9,3 +10,39 @@
     GC_CAN_SHRINK_BIG_ARRAY = False
     GC_CAN_MALLOC_NONMOVABLE = True
     BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD
+
+    def test_bounded_memory_when_allocating_with_finalizers(self):
+        # Issue #2590: when allocating a lot of objects with a finalizer
+        # and little else, the bounds in the (inc)minimark GC are not
+        # set up reasonably and the total memory usage grows without
+        # limit.
+        class B(object):
+            pass
+        b = B()
+        b.num_deleted = 0
+        class A(object):
+            def __init__(self):
+                fq.register_finalizer(self)
+        class FQ(rgc.FinalizerQueue):
+            Class = A
+            def finalizer_trigger(self):
+                while True:
+                    a = self.next_dead()
+                    if a is None:
+                        break
+                    b.num_deleted += 1
+        fq = FQ()
+        def f(x, y):
+            i = 0
+            alive_max = 0
+            while i < x:
+                i += 1
+                a = A()
+                a.x = a.y = a.z = i
+                #print i - b.num_deleted, b.num_deleted
+                alive = i - b.num_deleted
+                assert alive >= 0
+                alive_max = max(alive_max, alive)
+            return alive_max
+        res = self.interpret(f, [1000, 0])
+        assert res < 100
diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py
--- a/rpython/rlib/_rsocket_rffi.py
+++ b/rpython/rlib/_rsocket_rffi.py
@@ -20,7 +20,7 @@
     includes = ('sys/types.h',
                 'sys/socket.h',
                 'sys/un.h',
-                'sys/poll.h',
+                'poll.h',
                 'sys/select.h',
                 'sys/types.h',
                 'netinet/in.h',
diff --git a/rpython/rlib/rsre/rpy/_sre.py b/rpython/rlib/rsre/rpy/_sre.py
--- a/rpython/rlib/rsre/rpy/_sre.py
+++ b/rpython/rlib/rsre/rpy/_sre.py
@@ -16,6 +16,8 @@
 
 
 def get_code(regexp, flags=0, allargs=False):
+    """NOT_RPYTHON: you can't compile new regexps in an RPython program,
+    you can only use precompiled ones"""
     from . import sre_compile
     try:
         sre_compile.compile(regexp, flags)
diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py
--- a/rpython/rlib/runicode.py
+++ b/rpython/rlib/runicode.py
@@ -16,6 +16,8 @@
     allow_surrogate_by_default = True
 
 BYTEORDER = sys.byteorder
+BYTEORDER2 = BYTEORDER[0] + 'e'      # either "le" or "be"
+assert BYTEORDER2 in ('le', 'be')
 
 # python 2.7 has a preview of py3k behavior, so those functions
 # are used either when we're testing wide pypy on narrow cpython
@@ -486,9 +488,31 @@
                                                          errorhandler, 
"little")
     return result, length
 
+def py3k_str_decode_utf_16(s, size, errors, final=True,
+                      errorhandler=None):
+    result, length, byteorder = str_decode_utf_16_helper(s, size, errors, 
final,
+                                                         errorhandler, 
"native",
+                                                         'utf-16-' + 
BYTEORDER2)
+    return result, length
+
+def py3k_str_decode_utf_16_be(s, size, errors, final=True,
+                         errorhandler=None):
+    result, length, byteorder = str_decode_utf_16_helper(s, size, errors, 
final,
+                                                         errorhandler, "big",
+                                                         'utf-16-be')
+    return result, length
+
+def py3k_str_decode_utf_16_le(s, size, errors, final=True,
+                         errorhandler=None):
+    result, length, byteorder = str_decode_utf_16_helper(s, size, errors, 
final,
+                                                         errorhandler, 
"little",
+                                                         'utf-16-le')
+    return result, length
+
 def str_decode_utf_16_helper(s, size, errors, final=True,
                              errorhandler=None,
-                             byteorder="native"):
+                             byteorder="native",
+                             public_encoding_name='utf16'):
     if errorhandler is None:
         errorhandler = default_unicode_error_decode
     bo = 0
@@ -546,7 +570,8 @@
         if len(s) - pos < 2:
             if not final:
                 break
-            r, pos = errorhandler(errors, 'utf16', "truncated data",
+            r, pos = errorhandler(errors, public_encoding_name,
+                                  "truncated data",
                                   s, pos, len(s))
             result.append(r)
             if len(s) - pos < 2:
@@ -562,7 +587,8 @@
             if not final:
                 break
             errmsg = "unexpected end of data"
-            r, pos = errorhandler(errors, 'utf16', errmsg, s, pos, len(s))
+            r, pos = errorhandler(errors, public_encoding_name,
+                                  errmsg, s, pos, len(s))
             result.append(r)
             if len(s) - pos < 2:
                 break
@@ -578,12 +604,12 @@
                                            (ch2 & 0x3FF)) + 0x10000))
                 continue
             else:
-                r, pos = errorhandler(errors, 'utf16',
+                r, pos = errorhandler(errors, public_encoding_name,
                                       "illegal UTF-16 surrogate",
                                       s, pos - 4, pos - 2)
                 result.append(r)
         else:
-            r, pos = errorhandler(errors, 'utf16',
+            r, pos = errorhandler(errors, public_encoding_name,
                                   "illegal encoding",
                                   s, pos - 2, pos)
             result.append(r)
@@ -592,7 +618,8 @@
 def unicode_encode_utf_16_helper(s, size, errors,
                                  errorhandler=None,
                                  allow_surrogates=True,
-                                 byteorder='little'):
+                                 byteorder='little',
+                                 public_encoding_name='utf16'):
     if errorhandler is None:
         errorhandler = default_unicode_error_encode
     if size == 0:
@@ -620,13 +647,13 @@
         elif ch >= 0xE000 or allow_surrogates:
             _STORECHAR(result, ch, byteorder)
         else:
-            ru, rs, pos = errorhandler(errors, 'utf16',
+            ru, rs, pos = errorhandler(errors, public_encoding_name,
                                        'surrogates not allowed',
                                        s, pos-1, pos)
             if rs is not None:
                 # py3k only
                 if len(rs) % 2 != 0:
-                    errorhandler('strict', 'utf16',
+                    errorhandler('strict', public_encoding_name,
                                  'surrogates not allowed',
                                  s, pos-1, pos)
                 result.append(rs)
@@ -635,7 +662,7 @@
                 if ord(ch) < 0xD800:
                     _STORECHAR(result, ord(ch), byteorder)
                 else:
-                    errorhandler('strict', 'utf16',
+                    errorhandler('strict', public_encoding_name,
                                  'surrogates not allowed',
                                  s, pos-1, pos)
             continue
@@ -648,20 +675,39 @@
     return unicode_encode_utf_16_helper(s, size, errors, errorhandler,
                                         allow_surrogates, "native")
 
-
 def unicode_encode_utf_16_be(s, size, errors,
                              errorhandler=None,
                              allow_surrogates=True):
     return unicode_encode_utf_16_helper(s, size, errors, errorhandler,
                                         allow_surrogates, "big")
 
-
 def unicode_encode_utf_16_le(s, size, errors,
                              errorhandler=None,
                              allow_surrogates=True):
     return unicode_encode_utf_16_helper(s, size, errors, errorhandler,
                                         allow_surrogates, "little")
 
+def py3k_unicode_encode_utf_16(s, size, errors,
+                          errorhandler=None,
+                          allow_surrogates=True):
+    return unicode_encode_utf_16_helper(s, size, errors, errorhandler,
+                                        allow_surrogates, "native",
+                                        'utf-16-' + BYTEORDER2)
+
+def py3k_unicode_encode_utf_16_be(s, size, errors,
+                             errorhandler=None,
+                             allow_surrogates=True):
+    return unicode_encode_utf_16_helper(s, size, errors, errorhandler,
+                                        allow_surrogates, "big",
+                                        'utf-16-be')
+
+def py3k_unicode_encode_utf_16_le(s, size, errors,
+                             errorhandler=None,
+                             allow_surrogates=True):
+    return unicode_encode_utf_16_helper(s, size, errors, errorhandler,
+                                        allow_surrogates, "little",
+                                        'utf-16-le')
+
 
 # ____________________________________________________________
 # utf-32
@@ -684,12 +730,34 @@
                                                          errorhandler, 
"little")
     return result, length
 
+def py3k_str_decode_utf_32(s, size, errors, final=True,
+                           errorhandler=None):
+    result, length, byteorder = str_decode_utf_32_helper(s, size, errors, 
final,
+                                                         errorhandler, 
"native",
+                                                         'utf-32-' + 
BYTEORDER2)
+    return result, length
+
+def py3k_str_decode_utf_32_be(s, size, errors, final=True,
+                              errorhandler=None):
+    result, length, byteorder = str_decode_utf_32_helper(s, size, errors, 
final,
+                                                         errorhandler, "big",
+                                                         'utf-32-be')
+    return result, length
+
+def py3k_str_decode_utf_32_le(s, size, errors, final=True,
+                              errorhandler=None):
+    result, length, byteorder = str_decode_utf_32_helper(s, size, errors, 
final,
+                                                         errorhandler, 
"little",
+                                                         'utf-32-le')
+    return result, length
+
 BOM32_DIRECT  = intmask(0x0000FEFF)
 BOM32_REVERSE = intmask(0xFFFE0000)
 
 def str_decode_utf_32_helper(s, size, errors, final=True,
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to