Author: mattip <[email protected]>
Branch: object-dtype2
Changeset: r76646:6e392eb89276
Date: 2015-03-29 22:46 +0300
http://bitbucket.org/pypy/pypy/changeset/6e392eb89276/

Log:    merge default into branch

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -3,20 +3,10 @@
 d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6
 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7
 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1
-9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm
-9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm
 ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm
 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0
 394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3
 32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1
 32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1
 10f1b29a2bd21f837090286174a9ca030b8680b2 release-2.5.0
-8e24dac0b8e2db30d46d59f2c4daa3d4aaab7861 release-2.5.1
-8e24dac0b8e2db30d46d59f2c4daa3d4aaab7861 release-2.5.1
-0000000000000000000000000000000000000000 release-2.5.1
-0000000000000000000000000000000000000000 release-2.5.1
-e3d046c43451403f5969580fc1c41d5df6c4082a release-2.5.1
-e3d046c43451403f5969580fc1c41d5df6c4082a release-2.5.1
-0000000000000000000000000000000000000000 release-2.5.1
-0000000000000000000000000000000000000000 release-2.5.1
 9c4588d731b7fe0b08669bd732c2b676cb0a8233 release-2.5.1
diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst
--- a/pypy/doc/stm.rst
+++ b/pypy/doc/stm.rst
@@ -29,7 +29,8 @@
 ====================
 
 ``pypy-stm`` is a variant of the regular PyPy interpreter.  (This
-version supports Python 2.7; see below for `Python 3`_.)  With caveats_
+version supports Python 2.7; see below for `Python 3, CPython,
+and others`_.)  With caveats_
 listed below, it should be in theory within 20%-50% slower than a
 regular PyPy, comparing the JIT version in both cases (but see below!).
 It is called
@@ -178,8 +179,8 @@
 
 
 
-Python 3
-========
+Python 3, CPython, and others
+=============================
 
 In this document I describe "pypy-stm", which is based on PyPy's Python
 2.7 interpreter.  Supporting Python 3 should take about half an
@@ -194,6 +195,29 @@
 framework, although the amount of work to put there might vary, because
 the STM framework within RPython is currently targeting the PyPy
 interpreter and other ones might have slightly different needs.
+But in general, all the tedious transformations are done by RPython
+and you're only left with the (hopefully few) hard and interesting bits.
+
+The core of STM works as a library written in C (see `reference to
+implementation details`_ below).  It means that it can be used on
+other interpreters than the ones produced by RPython.  Duhton_ is an
+early example of that.  At this point, you might think about adapting
+this library for CPython.  You're warned, though: as far as I can
+tell, it is a doomed idea.  I had a hard time debugging Duhton, and
+that's infinitely simpler than CPython.  Even ignoring that, you can
+see in the C sources of Duhton that many core design decisions are
+different than in CPython: no refcounting; limited support for
+prebuilt "static" objects; ``stm_read()`` and ``stm_write()`` macro
+calls everywhere (and getting very rare and very obscure bugs if you
+forget one); and so on.  You could imagine some custom special-purpose
+extension of the C language, which you would preprocess to regular C.
+In my opinion that's starting to look a lot like RPython itself, but
+maybe you'd prefer this approach.  Of course you still have to worry
+about each and every C extension module you need, but maybe you'd have
+a way forward.
+
+.. _Duhton: https://bitbucket.org/pypy/duhton
+
 
 
 User Guide
@@ -372,18 +396,49 @@
   and ``y`` that are thread-local: reading or writing them from
   concurrently-running transactions will return independent results.
   (Any other attributes of ``Foo`` instances will be globally visible
-  from all threads, as usual.)  The optional argument to
-  ``threadlocalproperty()`` is the default value factory: in case no
-  value was assigned in the current thread yet, the factory is called
-  and its result becomes the value in that thread (like
-  ``collections.defaultdict``).  If no default value factory is
-  specified, uninitialized reads raise ``AttributeError``.  Note that
-  with ``TransactionQueue`` you get a pool of a fixed number of
-  threads, each running the transactions one after the other; such
-  thread-local properties will have the value last stored in them in
-  the same thread,, which may come from a random previous transaction.
-  This means that ``threadlocalproperty`` is useful mainly to avoid
-  conflicts from cache-like data structures.
+  from all threads, as usual.)  This is useful together with
+  ``TransactionQueue`` for these two cases:
+
+  - For attributes of long-lived objects that change during one
+    transaction, but should always be reset to some initial value
+    around transaction (for example, initialized to 0 at the start of
+    a transaction; or, if used for a list of pending things to do
+    within this transaction, it will always be empty at the end of one
+    transaction).
+
+  - For general caches across transactions.  With ``TransactionQueue``
+    you get a pool of a fixed number N of threads, each running the
+    transactions serially.  A thread-local property will have the
+    value last stored in it by the same thread, which may come from a
+    random previous transaction.  Basically, you get N copies of the
+    property's value, and each transaction accesses a random copy.  It
+    works fine for caches.
+
+  In more details, the optional argument to ``threadlocalproperty()``
+  is the default value factory: in case no value was assigned in the
+  current thread yet, the factory is called and its result becomes the
+  value in that thread (like ``collections.defaultdict``).  If no
+  default value factory is specified, uninitialized reads raise
+  ``AttributeError``.
+
+* In addition to all of the above, there are cases where write-write
+  conflicts are caused by writing the same value to an attribute again
+  and again.  See for example ea2e519614ab_: this fixes two such
+  issues where we write an object field without first checking if we
+  already did it.  The ``dont_change_any_more`` field is a flag set to
+  ``True`` in that part of the code, but usually this
+  ``rtyper_makekey()`` method will be called many times for the same
+  object; the code used to repeatedly set the flag to ``True``, but
+  now it first checks and only does the write if it is ``False``.
+  Similarly, in the second half of the checkin, the method
+  ``setup_block_entry()`` used to both assign the ``concretetype``
+  fields and return a list, but its two callers were different: one
+  would really need the ``concretetype`` fields initialized, whereas
+  the other would only need to get its result list --- the
+  ``concretetype`` field in that case might already be set or not, but
+  that would not matter.
+
+.. _ea2e519614ab: https://bitbucket.org/pypy/pypy/commits/ea2e519614ab
 
 Note that Python is a complicated language; there are a number of less
 common cases that may cause conflict (of any kind) where we might not
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,3 +5,9 @@
 .. this is a revision shortly after release-2.5.1
 .. startrev: 397b96217b85
 
+.. branch: gc-incminimark-pinning-improve
+Object Pinning is now used in `bz2` and `rzlib` (therefore also affects
+Python's `zlib`). In case the data to compress/decompress is inside the nursery
+(incminimark) it no longer needs to create a non-moving copy of it. This saves
+one `malloc` and copying the data.  Additionally a new GC environment variable
+is introduced (`PYPY_GC_MAX_PINNED`) primarily for debugging purposes.
diff --git a/pypy/goal/targetpypystandalone.py 
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -141,7 +141,7 @@
         res = _pypy_execute_source(source)
         before = rffi.aroundstate.before
         if before: before()
-        return rffi.cast(rffi.INT, res)        
+        return rffi.cast(rffi.INT, res)
 
     @entrypoint('main', [], c_name='pypy_init_threads')
     def pypy_init_threads():
@@ -312,7 +312,7 @@
         w_dict = app.getwdict(space)
         entry_point, _ = create_entry_point(space, w_dict)
 
-        return entry_point, None, PyPyAnnotatorPolicy(single_space = space)
+        return entry_point, None, PyPyAnnotatorPolicy()
 
     def interface(self, ns):
         for name in ['take_options', 'handle_config', 'print_help', 'target',
diff --git a/pypy/module/_hashlib/interp_hashlib.py 
b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -144,10 +144,10 @@
             with self.lock:
                 ropenssl.EVP_MD_CTX_copy(ctx, self.ctx)
             digest_size = self.digest_size
-            with lltype.scoped_alloc(rffi.CCHARP.TO, digest_size) as digest:
-                ropenssl.EVP_DigestFinal(ctx, digest, None)
+            with rffi.scoped_alloc_buffer(digest_size) as buf:
+                ropenssl.EVP_DigestFinal(ctx, buf.raw, None)
                 ropenssl.EVP_MD_CTX_cleanup(ctx)
-                return rffi.charpsize2str(digest, digest_size)
+                return buf.str(digest_size)
 
 
 W_Hash.typedef = TypeDef(
diff --git a/pypy/module/_minimal_curses/interp_curses.py 
b/pypy/module/_minimal_curses/interp_curses.py
--- a/pypy/module/_minimal_curses/interp_curses.py
+++ b/pypy/module/_minimal_curses/interp_curses.py
@@ -13,7 +13,7 @@
     def __init__(self, msg):
         self.msg = msg
 
-from rpython.annotator.classdef import FORCE_ATTRIBUTES_INTO_CLASSES
+from rpython.annotator.description import FORCE_ATTRIBUTES_INTO_CLASSES
 from rpython.annotator.model import SomeString
 
 # this is necessary due to annmixlevel
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -33,7 +33,8 @@
 PY_SSL_CLIENT, PY_SSL_SERVER = 0, 1
 
 (PY_SSL_VERSION_SSL2, PY_SSL_VERSION_SSL3,
- PY_SSL_VERSION_SSL23, PY_SSL_VERSION_TLS1) = range(4)
+ PY_SSL_VERSION_SSL23, PY_SSL_VERSION_TLS1, PY_SSL_VERSION_TLS1_1,
+ PY_SSL_VERSION_TLS1_2) = range(6)
 
 SOCKET_IS_NONBLOCKING, SOCKET_IS_BLOCKING = 0, 1
 SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3
@@ -72,6 +73,9 @@
     constants["PROTOCOL_SSLv3"]  = PY_SSL_VERSION_SSL3
 constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23
 constants["PROTOCOL_TLSv1"]  = PY_SSL_VERSION_TLS1
+if HAVE_TLSv1_2:
+    constants["PROTOCOL_TLSv1_1"] = PY_SSL_VERSION_TLS1_1
+    constants["PROTOCOL_TLSv1_2"] = PY_SSL_VERSION_TLS1_2
 
 constants["OP_ALL"] = SSL_OP_ALL &~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS
 constants["OP_NO_SSLv2"] = SSL_OP_NO_SSLv2
@@ -140,7 +144,7 @@
 
     def __del__(self):
         rffi.free_nonmovingbuffer(
-            self.protos, self.buf, self.pinned, self.is_raw)    
+            self.protos, self.buf, self.pinned, self.is_raw)
 
     @staticmethod
     def advertiseNPN_cb(s, data_ptr, len_ptr, args):
@@ -162,7 +166,7 @@
             client_len = len(npn.protos)
         else:
             client = lltype.nullptr(rffi.CCHARP.TO)
-            client_len = 0            
+            client_len = 0
 
         libssl_SSL_select_next_proto(out_ptr, outlen_ptr,
                                      server, server_len,
@@ -593,14 +597,14 @@
         CB_MAXLEN = 128
 
         with lltype.scoped_alloc(rffi.CCHARP.TO, CB_MAXLEN) as buf:
-            if (libssl_SSL_session_reused(self.ssl) ^ 
+            if (libssl_SSL_session_reused(self.ssl) ^
                 (self.socket_type == PY_SSL_CLIENT)):
                 # if session is resumed XOR we are the client
                 length = libssl_SSL_get_finished(self.ssl, buf, CB_MAXLEN)
             else:
                 # if a new session XOR we are the server
                 length = libssl_SSL_get_peer_finished(self.ssl, buf, CB_MAXLEN)
-            
+
             if length > 0:
                 return space.wrap(rffi.charpsize2str(buf, intmask(length)))
 
@@ -1107,7 +1111,7 @@
             except OperationError as e:
                 if not e.match(space, space.w_TypeError):
                     raise
-                raise oefmt(space.w_TypeError, 
+                raise oefmt(space.w_TypeError,
                             "password callback must return a string")
         except OperationError as e:
             pw_info.operationerror = e
@@ -1196,6 +1200,10 @@
             method = libssl_SSLv2_method()
         elif protocol == PY_SSL_VERSION_SSL23:
             method = libssl_SSLv23_method()
+        elif protocol == PY_SSL_VERSION_TLS1_1 and HAVE_TLSv1_2:
+            method = libssl_TLSv1_1_method()
+        elif protocol == PY_SSL_VERSION_TLS1_2 and HAVE_TLSv1_2:
+            method = libssl_TLSv1_2_method()
         else:
             raise oefmt(space.w_ValueError, "invalid protocol version")
         ctx = libssl_SSL_CTX_new(method)
@@ -1348,7 +1356,7 @@
                 except OperationError as e:
                     if not e.match(space, space.w_TypeError):
                         raise
-                    raise oefmt(space.w_TypeError, 
+                    raise oefmt(space.w_TypeError,
                                 "password should be a string or callable")
 
             libssl_SSL_CTX_set_default_passwd_cb(
@@ -1452,7 +1460,7 @@
         if cadata is not None:
             with rffi.scoped_nonmovingbuffer(cadata) as buf:
                 self._add_ca_certs(space, buf, len(cadata), ca_file_type)
-            
+
         # load cafile or capath
         if cafile is not None or capath is not None:
             ret = libssl_SSL_CTX_load_verify_locations(
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -562,10 +562,7 @@
         in_bufsize = datasize
 
         with OutBuffer(self.bzs) as out:
-            with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-
-                for i in range(datasize):
-                    in_buf[i] = data[i]
+            with rffi.scoped_nonmovingbuffer(data) as in_buf:
 
                 self.bzs.c_next_in = in_buf
                 rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize)
@@ -663,9 +660,7 @@
 
         in_bufsize = len(data)
 
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
+        with rffi.scoped_nonmovingbuffer(data) as in_buf:
             self.bzs.c_next_in = in_buf
             rffi.setintfield(self.bzs, 'c_avail_in', in_bufsize)
 
@@ -716,9 +711,7 @@
     with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
         in_bufsize = len(data)
 
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
+        with rffi.scoped_nonmovingbuffer(data) as in_buf:
             bzs.c_next_in = in_buf
             rffi.setintfield(bzs, 'c_avail_in', in_bufsize)
 
@@ -758,9 +751,7 @@
         return space.wrap("")
 
     with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
+        with rffi.scoped_nonmovingbuffer(data) as in_buf:
             bzs.c_next_in = in_buf
             rffi.setintfield(bzs, 'c_avail_in', in_bufsize)
 
diff --git a/pypy/module/pypyjit/test_pypy_c/model.py 
b/pypy/module/pypyjit/test_pypy_c/model.py
--- a/pypy/module/pypyjit/test_pypy_c/model.py
+++ b/pypy/module/pypyjit/test_pypy_c/model.py
@@ -271,6 +271,7 @@
     @classmethod
     def parse_ops(cls, src):
         ops = [cls.parse_op(line) for line in src.splitlines()]
+        ops.append(('--end--', None, [], '...', True))
         return [op for op in ops if op is not None]
 
     @classmethod
@@ -403,6 +404,10 @@
             raise InvalidMatch(message, frame=sys._getframe(1))
 
     def match_op(self, op, (exp_opname, exp_res, exp_args, exp_descr, _)):
+        if exp_opname == '--end--':
+            self._assert(op == '--end--', 'got more ops than expected')
+            return
+        self._assert(op != '--end--', 'got less ops than expected')
         self._assert(op.name == exp_opname, "operation mismatch")
         self.match_var(op.res, exp_res)
         if exp_args[-1:] == ['...']:      # exp_args ends with '...'
@@ -415,18 +420,15 @@
         self.match_descr(op.descr, exp_descr)
 
 
-    def _next_op(self, iter_ops, assert_raises=False, ignore_ops=set()):
+    def _next_op(self, iter_ops, ignore_ops=set()):
         try:
             while True:
                 op = iter_ops.next()
                 if op.name not in ignore_ops:
                     break
         except StopIteration:
-            self._assert(assert_raises, "not enough operations")
-            return
-        else:
-            self._assert(not assert_raises, "operation list too long")
-            return op
+            return '--end--'
+        return op
 
     def try_match(self, op, exp_op):
         try:
@@ -493,16 +495,17 @@
                     continue
                 else:
                     op = self._next_op(iter_ops, ignore_ops=ignore_ops)
-                self.match_op(op, exp_op)
-            except InvalidMatch, e:
-                if type(exp_op) is not str and exp_op[4] is False:    # 
optional operation
+                try:
+                    self.match_op(op, exp_op)
+                except InvalidMatch:
+                    if type(exp_op) is str or exp_op[4] is not False:
+                        raise
+                    #else: optional operation
                     iter_ops.revert_one()
                     continue       # try to match with the next exp_op
+            except InvalidMatch, e:
                 e.opindex = iter_ops.index - 1
                 raise
-        #
-        # make sure we exhausted iter_ops
-        self._next_op(iter_ops, assert_raises=True, ignore_ops=ignore_ops)
 
     def match(self, expected_src, ignore_ops=[]):
         def format(src, opindex=None):
@@ -545,9 +548,9 @@
         return self
     def next(self):
         index = self.index
-        if index == len(self.sequence):
+        self.index = index + 1
+        if index >= len(self.sequence):
             raise StopIteration
-        self.index = index + 1
         return self.sequence[index]
     def revert_one(self):
         self.index -= 1
diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py 
b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
--- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
@@ -384,6 +384,25 @@
         """
         assert not self.match(loop, expected)
 
+    def test_match_optional_op(self):
+        loop = """
+            i1 = int_add(i0, 1)
+        """
+        expected = """
+            guard_not_invalidated?
+            i1 = int_add(i0, 1)
+        """
+        assert self.match(loop, expected)
+        #
+        loop = """
+            i1 = int_add(i0, 1)
+        """
+        expected = """
+            i1 = int_add(i0, 1)
+            guard_not_invalidated?
+        """
+        assert self.match(loop, expected)
+
 
 class TestRunPyPyC(BaseTestPyPyC):
     def test_run_function(self):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py 
b/pypy/module/pypyjit/test_pypy_c/test_array.py
--- a/pypy/module/pypyjit/test_pypy_c/test_array.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_array.py
@@ -94,13 +94,25 @@
                 guard_not_invalidated(descr=...)
             # the bound check guard on img has been killed (thanks to the 
asserts)
                 i14 = getarrayitem_raw(i10, i8, descr=<ArrayS .>)
+            # advanced: the following int_add cannot overflow, because:
+            # - i14 fits inside 32 bits
+            # - i9 fits inside 33 bits, because:
+            #     - it comes from the previous iteration's i15
+            #     - prev i19 = prev i18 + prev i15
+            #         - prev i18 fits inside 32 bits
+            #         - prev i19 is guarded to fit inside 32 bits
+            #         - so as a consequence, prev i15 fits inside 33 bits
+            # the new i15 thus fits inside "33.5" bits, which is enough to
+            # guarantee that the next int_add(i18, i15) cannot overflow 
either...
                 i15 = int_add(i9, i14)
                 i17 = int_sub(i8, 640)
             # the bound check guard on intimg has been killed (thanks to the 
asserts)
                 i18 = getarrayitem_raw(i11, i17, descr=<ArrayS .>)
                 i19 = int_add(i18, i15)
-            # on 64bit, there is a guard checking that i19 actually fits into 
32bit
-                ...
+            # guard checking that i19 actually fits into 32bit
+                i20 = int_signext(i19, 4)
+                i65 = int_ne(i20, i19)
+                guard_false(i65, descr=...)
                 setarrayitem_raw(i11, i8, _, descr=<ArrayS .>)
                 i28 = int_add(i8, 1)
                 --TICK--
diff --git a/pypy/tool/ann_override.py b/pypy/tool/ann_override.py
--- a/pypy/tool/ann_override.py
+++ b/pypy/tool/ann_override.py
@@ -13,13 +13,12 @@
 
 
 class PyPyAnnotatorPolicy(AnnotatorPolicy):
-    def __init__(pol, single_space=None):
-        pol.lookups = {}
-        pol.lookups_where = {}
-        pol.pypytypes = {}
-        pol.single_space = single_space
+    def __init__(self):
+        self.lookups = {}
+        self.lookups_where = {}
+        self.pypytypes = {}
 
-    def specialize__wrap(pol,  funcdesc, args_s):
+    def specialize__wrap(self,  funcdesc, args_s):
         from pypy.interpreter.baseobjspace import W_Root
         from rpython.annotator.classdef import ClassDef
         W_Root_def = funcdesc.bookkeeper.getuniqueclassdef(W_Root)
@@ -51,102 +50,102 @@
                 typ = (None, str)
         return funcdesc.cachedgraph(typ)
 
-    def _remember_immutable(pol, t, cached):
+    def _remember_immutable(self, t, cached):
         # for jit benefit
         if cached not in t._immutable_fields_: # accessed this way just
                                                # for convenience
             t._immutable_fields_.append(cached)
 
-    def attach_lookup(pol, t, attr):
+    def attach_lookup(self, t, attr):
         cached = "cached_%s" % attr
         if not t.is_heaptype() and not t.is_cpytype():
-            pol._remember_immutable(t, cached)
+            self._remember_immutable(t, cached)
             setattr(t, cached, t._lookup(attr))
             return True
         return False
 
-    def attach_lookup_in_type_where(pol, t, attr):
+    def attach_lookup_in_type_where(self, t, attr):
         cached = "cached_where_%s" % attr
         if not t.is_heaptype() and not t.is_cpytype():
-            pol._remember_immutable(t, cached)
+            self._remember_immutable(t, cached)
             setattr(t, cached, t._lookup_where(attr))
             return True
         return False
 
-    def consider_lookup(pol, bookkeeper, attr):
+    def consider_lookup(self, bookkeeper, attr):
         from rpython.annotator.classdef import InstanceSource
-        assert attr not in pol.lookups
+        assert attr not in self.lookups
         from pypy.objspace.std import typeobject
         cached = "cached_%s" % attr
         clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject)
         classdesc = clsdef.classdesc
         classdesc.classdict[cached] = Constant(None)
         clsdef.add_source_for_attribute(cached, classdesc)
-        for t in pol.pypytypes:
-            if pol.attach_lookup(t, attr):
+        for t in self.pypytypes:
+            if self.attach_lookup(t, attr):
                 source = InstanceSource(bookkeeper, t)
                 clsdef.add_source_for_attribute(cached, source)
-        pol.lookups[attr] = True
+        self.lookups[attr] = True
 
-    def consider_lookup_in_type_where(pol, bookkeeper, attr):
+    def consider_lookup_in_type_where(self, bookkeeper, attr):
         from rpython.annotator.classdef import InstanceSource
-        assert attr not in pol.lookups_where
+        assert attr not in self.lookups_where
         from pypy.objspace.std import typeobject
         cached = "cached_where_%s" % attr
         clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject)
         classdesc = clsdef.classdesc
         classdesc.classdict[cached] = Constant((None, None))
         clsdef.add_source_for_attribute(cached, classdesc)
-        for t in pol.pypytypes:
-            if pol.attach_lookup_in_type_where(t, attr):
+        for t in self.pypytypes:
+            if self.attach_lookup_in_type_where(t, attr):
                 source = InstanceSource(bookkeeper, t)
                 clsdef.add_source_for_attribute(cached, source)
-        pol.lookups_where[attr] = True
+        self.lookups_where[attr] = True
 
-    def specialize__lookup(pol, funcdesc, args_s):
+    def specialize__lookup(self, funcdesc, args_s):
         s_space, s_obj, s_name = args_s
         if s_name.is_constant():
             attr = s_name.const
             def builder(translator, func):
                 #print "LOOKUP", attr
-                pol.consider_lookup(funcdesc.bookkeeper, attr)
+                self.consider_lookup(funcdesc.bookkeeper, attr)
                 d = {'__name__': '<ann_override_lookup>'}
                 exec CACHED_LOOKUP % {'attr': attr} in d
                 return translator.buildflowgraph(d['lookup_'+attr])
             return funcdesc.cachedgraph(attr, builder=builder)
         else:
-            pol.lookups[None] = True
+            self.lookups[None] = True
             return funcdesc.cachedgraph(None) # don't specialize
 
-    def specialize__lookup_in_type_where(pol, funcdesc, args_s):
+    def specialize__lookup_in_type_where(self, funcdesc, args_s):
         s_space, s_obj, s_name = args_s
         if s_name.is_constant():
             attr = s_name.const
             def builder(translator, func):
                 #print "LOOKUP_IN_TYPE_WHERE", attr
-                pol.consider_lookup_in_type_where(funcdesc.bookkeeper, attr)
+                self.consider_lookup_in_type_where(funcdesc.bookkeeper, attr)
                 d = {'__name__': '<ann_override_lookup>'}
                 exec CACHED_LOOKUP_IN_TYPE_WHERE % {'attr': attr} in d
                 return 
translator.buildflowgraph(d['lookup_in_type_where_'+attr])
             return funcdesc.cachedgraph(attr, builder=builder)
         else:
-            pol.lookups_where[None] = True
+            self.lookups_where[None] = True
             return funcdesc.cachedgraph(None)
 
-    def event(pol, bookkeeper, what, x):
+    def event(self, bookkeeper, what, x):
         from pypy.objspace.std import typeobject
         if isinstance(x, typeobject.W_TypeObject):
             from rpython.annotator.classdef import InstanceSource
             clsdef = bookkeeper.getuniqueclassdef(typeobject.W_TypeObject)
-            pol.pypytypes[x] = True
+            self.pypytypes[x] = True
             #print "TYPE", x
-            for attr in pol.lookups:
-                if attr and pol.attach_lookup(x, attr):
+            for attr in self.lookups:
+                if attr and self.attach_lookup(x, attr):
                     cached = "cached_%s" % attr
                     source = InstanceSource(bookkeeper, x)
                     clsdef.add_source_for_attribute(cached, source)
-            for attr in pol.lookups_where:
-                if attr and pol.attach_lookup_in_type_where(x, attr):
+            for attr in self.lookups_where:
+                if attr and self.attach_lookup_in_type_where(x, attr):
                     cached = "cached_where_%s" % attr
                     source = InstanceSource(bookkeeper, x)
                     clsdef.add_source_for_attribute(cached, source)
diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py
--- a/rpython/annotator/annrpython.py
+++ b/rpython/annotator/annrpython.py
@@ -12,6 +12,7 @@
 from rpython.annotator import model as annmodel, signature
 from rpython.annotator.argument import simple_args
 from rpython.annotator.bookkeeper import Bookkeeper
+from rpython.rtyper.normalizecalls import perform_normalizations
 
 import py
 log = py.log.Producer("annrpython")
@@ -317,6 +318,8 @@
                     graphs[graph] = True
         for graph in graphs:
             simplify.eliminate_empty_blocks(graph)
+        if block_subset is None:
+            perform_normalizations(self)
 
 
     #___ flowing annotations in blocks _____________________
diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py
--- a/rpython/annotator/bookkeeper.py
+++ b/rpython/annotator/bookkeeper.py
@@ -42,7 +42,7 @@
 
     def __setstate__(self, dic):
         self.__dict__.update(dic) # normal action
-        delayed_imports()
+        self.register_builtins()
 
     def __init__(self, annotator):
         self.annotator = annotator
@@ -67,7 +67,13 @@
         self.needs_generic_instantiate = {}
         self.thread_local_fields = set()
 
-        delayed_imports()
+        self.register_builtins()
+
+    def register_builtins(self):
+        import rpython.annotator.builtin  # for side-effects
+        from rpython.annotator.exception import standardexceptions
+        for cls in standardexceptions:
+            self.getuniqueclassdef(cls)
 
     def enter(self, position_key):
         """Start of an operation.
@@ -105,8 +111,7 @@
 
             for pbc, args_s in self.emulated_pbc_calls.itervalues():
                 args = simple_args(args_s)
-                self.consider_call_site_for_pbc(pbc, args,
-                                                s_ImpossibleValue, None)
+                pbc.consider_call_site(args, s_ImpossibleValue, None)
             self.emulated_pbc_calls = {}
         finally:
             self.leave()
@@ -157,15 +162,7 @@
             if s_result is None:
                 s_result = s_ImpossibleValue
             args = call_op.build_args(args_s)
-            self.consider_call_site_for_pbc(s_callable, args,
-                                            s_result, call_op)
-
-    def consider_call_site_for_pbc(self, s_callable, args, s_result,
-                                   call_op):
-        descs = list(s_callable.descriptions)
-        family = descs[0].getcallfamily()
-        s_callable.getKind().consider_call_site(self, family, descs, args,
-                                                s_result, call_op)
+            s_callable.consider_call_site(args, s_result, call_op)
 
     def getuniqueclassdef(self, cls):
         """Get the ClassDef associated with the given user cls.
@@ -605,6 +602,3 @@
 
 def immutablevalue(x):
     return getbookkeeper().immutablevalue(x)
-
-def delayed_imports():
-    import rpython.annotator.builtin
diff --git a/rpython/annotator/classdef.py b/rpython/annotator/classdef.py
--- a/rpython/annotator/classdef.py
+++ b/rpython/annotator/classdef.py
@@ -2,8 +2,7 @@
 Type inference for user-defined classes.
 """
 from rpython.annotator.model import (
-    SomePBC, s_ImpossibleValue, unionof, s_None, SomeInteger,
-    SomeTuple, SomeString, AnnotatorError, SomeUnicodeString)
+    SomePBC, s_ImpossibleValue, unionof, s_None, AnnotatorError)
 from rpython.annotator import description
 
 
@@ -437,18 +436,3 @@
 class NoSuchAttrError(AnnotatorError):
     """Raised when an attribute is found on a class where __slots__
      or _attrs_ forbits it."""
-
-# ____________________________________________________________
-
-FORCE_ATTRIBUTES_INTO_CLASSES = {
-    EnvironmentError: {'errno': SomeInteger(),
-                       'strerror': SomeString(can_be_None=True),
-                       'filename': SomeString(can_be_None=True)},
-}
-
-try:
-    WindowsError
-except NameError:
-    pass
-else:
-    FORCE_ATTRIBUTES_INTO_CLASSES[WindowsError] = {'winerror': SomeInteger()}
diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py
--- a/rpython/annotator/description.py
+++ b/rpython/annotator/description.py
@@ -7,7 +7,7 @@
 from rpython.annotator.argument import rawshape, ArgErr
 from rpython.tool.sourcetools import valid_identifier, func_with_new_name
 from rpython.tool.pairtype import extendabletype
-from rpython.annotator.model import AnnotatorError
+from rpython.annotator.model import AnnotatorError, SomeInteger, SomeString
 
 class CallFamily(object):
     """A family of Desc objects that could be called from common call sites.
@@ -329,9 +329,10 @@
                                              name)
 
     @staticmethod
-    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
+    def consider_call_site(descs, args, s_result, op):
         shape = rawshape(args)
         row = FunctionDesc.row_to_consider(descs, args, op)
+        family = descs[0].getcallfamily()
         family.calltable_add_row(shape, row)
 
     @staticmethod
@@ -404,6 +405,8 @@
                  name=None, basedesc=None, classdict=None,
                  specialize=None):
         super(ClassDesc, self).__init__(bookkeeper, cls)
+        if '__NOT_RPYTHON__' in cls.__dict__:
+            raise AnnotatorError('Bad class')
 
         if name is None:
             name = cls.__module__ + '.' + cls.__name__
@@ -477,8 +480,7 @@
 
         if (self.is_builtin_exception_class() and
                 self.all_enforced_attrs is None):
-            from rpython.annotator import classdef
-            if cls not in classdef.FORCE_ATTRIBUTES_INTO_CLASSES:
+            if cls not in FORCE_ATTRIBUTES_INTO_CLASSES:
                 self.all_enforced_attrs = []    # no attribute allowed
 
     def add_source_attribute(self, name, value, mixin=False):
@@ -573,8 +575,7 @@
         try:
             return self._classdefs[key]
         except KeyError:
-            from rpython.annotator.classdef import (
-                ClassDef, FORCE_ATTRIBUTES_INTO_CLASSES)
+            from rpython.annotator.classdef import ClassDef
             classdef = ClassDef(self.bookkeeper, self)
             self.bookkeeper.classdefs.append(classdef)
             self._classdefs[key] = classdef
@@ -760,7 +761,7 @@
         return s_result     # common case
 
     @staticmethod
-    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
+    def consider_call_site(descs, args, s_result, op):
         from rpython.annotator.model import SomeInstance, SomePBC, s_None
         if len(descs) == 1:
             # call to a single class, look at the result annotation
@@ -795,17 +796,14 @@
                     "unexpected dynamic __init__?")
                 initfuncdesc, = s_init.descriptions
                 if isinstance(initfuncdesc, FunctionDesc):
-                    initmethdesc = bookkeeper.getmethoddesc(initfuncdesc,
-                                                            classdef,
-                                                            classdef,
-                                                            '__init__')
+                    from rpython.annotator.bookkeeper import getbookkeeper
+                    initmethdesc = getbookkeeper().getmethoddesc(
+                        initfuncdesc, classdef, classdef, '__init__')
                     initdescs.append(initmethdesc)
         # register a call to exactly these __init__ methods
         if initdescs:
             initdescs[0].mergecallfamilies(*initdescs[1:])
-            initfamily = initdescs[0].getcallfamily()
-            MethodDesc.consider_call_site(bookkeeper, initfamily, initdescs,
-                                          args, s_None, op)
+            MethodDesc.consider_call_site(initdescs, args, s_None, op)
 
     def getallbases(self):
         desc = self
@@ -897,10 +895,11 @@
                                              flags)
 
     @staticmethod
-    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
+    def consider_call_site(descs, args, s_result, op):
         cnt, keys, star = rawshape(args)
         shape = cnt + 1, keys, star  # account for the extra 'self'
         row = FunctionDesc.row_to_consider(descs, args, op)
+        family = descs[0].getcallfamily()
         family.calltable_add_row(shape, row)
 
     def rowkey(self):
@@ -1058,10 +1057,11 @@
         return self.funcdesc.pycall(schedule, args, s_previous_result, op)
 
     @staticmethod
-    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
+    def consider_call_site(descs, args, s_result, op):
         cnt, keys, star = rawshape(args)
         shape = cnt + 1, keys, star  # account for the extra 'self'
         row = FunctionDesc.row_to_consider(descs, args, op)
+        family = descs[0].getcallfamily()
         family.calltable_add_row(shape, row)
 
     def rowkey(self):
@@ -1077,3 +1077,18 @@
     MemberDescriptorTypes.append(type(OSError.errno))
 except AttributeError:    # on CPython <= 2.4
     pass
+
+# ____________________________________________________________
+
+FORCE_ATTRIBUTES_INTO_CLASSES = {
+    EnvironmentError: {'errno': SomeInteger(),
+                       'strerror': SomeString(can_be_None=True),
+                       'filename': SomeString(can_be_None=True)},
+}
+
+try:
+    WindowsError
+except NameError:
+    pass
+else:
+    FORCE_ATTRIBUTES_INTO_CLASSES[WindowsError] = {'winerror': SomeInteger()}
diff --git a/rpython/annotator/exception.py b/rpython/annotator/exception.py
new file mode 100644
--- /dev/null
+++ b/rpython/annotator/exception.py
@@ -0,0 +1,7 @@
+from rpython.rlib import rstackovf
+
+# the exceptions that can be implicitely raised by some operations
+standardexceptions = set([TypeError, OverflowError, ValueError,
+    ZeroDivisionError, MemoryError, IOError, OSError, StopIteration, KeyError,
+    IndexError, AssertionError, RuntimeError, UnicodeDecodeError,
+    UnicodeEncodeError, NotImplementedError, rstackovf._StackOverflow])
diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py
--- a/rpython/annotator/model.py
+++ b/rpython/annotator/model.py
@@ -495,6 +495,10 @@
         if len(self.descriptions) > 1:
             kind.simplify_desc_set(self.descriptions)
 
+    def consider_call_site(self, args, s_result, call_op):
+        descs = list(self.descriptions)
+        self.getKind().consider_call_site(descs, args, s_result, call_op)
+
     def can_be_none(self):
         return self.can_be_None
 
@@ -588,7 +592,7 @@
 
 
 class SomeProperty(SomeObject):
-    # used for union error only 
+    # used for union error only
     immutable = True
     knowntype = type(property)
 
diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -47,6 +47,11 @@
                          too slow for normal use.  Values are 0 (off),
                          1 (on major collections) or 2 (also on minor
                          collections).
+
+ PYPY_GC_MAX_PINNED      The maximal number of pinned objects at any point
+                         in time.  Defaults to a conservative value depending
+                         on nursery size and maximum object size inside the
+                         nursery.  Useful for debugging by setting it to 0.
 """
 # XXX Should find a way to bound the major collection threshold by the
 # XXX total addressable size.  Maybe by keeping some minimarkpage arenas
@@ -56,6 +61,7 @@
 # XXX try merging old_objects_pointing_to_pinned into
 # XXX old_objects_pointing_to_young (IRC 2014-10-22, fijal and gregor_w)
 import sys
+import os
 from rpython.rtyper.lltypesystem import lltype, llmemory, llarena, llgroup
 from rpython.rtyper.lltypesystem.lloperation import llop
 from rpython.rtyper.lltypesystem.llmemory import raw_malloc_usage
@@ -463,9 +469,19 @@
             self.nursery_size = newsize
             self.allocate_nursery()
         #
-        # Estimate this number conservatively
-        bigobj = self.nonlarge_max + 1
-        self.max_number_of_pinned_objects = self.nursery_size / (bigobj * 2)
+        env_max_number_of_pinned_objects = os.environ.get('PYPY_GC_MAX_PINNED')
+        if env_max_number_of_pinned_objects:
+            try:
+                env_max_number_of_pinned_objects = 
int(env_max_number_of_pinned_objects)
+            except ValueError:
+                env_max_number_of_pinned_objects = 0
+            #
+            if env_max_number_of_pinned_objects >= 0: # 0 allows to disable 
pinning completely
+                self.max_number_of_pinned_objects = 
env_max_number_of_pinned_objects
+        else:
+            # Estimate this number conservatively
+            bigobj = self.nonlarge_max + 1
+            self.max_number_of_pinned_objects = self.nursery_size / (bigobj * 
2)
 
     def _nursery_memory_size(self):
         extra = self.nonlarge_max + 1
diff --git a/rpython/memory/gctransform/framework.py 
b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -36,7 +36,10 @@
         return graphanalyze.BoolGraphAnalyzer.analyze_direct_call(self, graph,
                                                                   seen)
     def analyze_external_call(self, op, seen=None):
-        funcobj = op.args[0].value._obj
+        try:
+            funcobj = op.args[0].value._obj
+        except lltype.DelayedPointer:
+            return True
         if getattr(funcobj, 'random_effects_on_gcobjs', False):
             return True
         return graphanalyze.BoolGraphAnalyzer.analyze_external_call(self, op,
@@ -248,6 +251,8 @@
         annhelper.finish()   # at this point, annotate all mix-level helpers
         annhelper.backend_optimize()
 
+        self.check_custom_trace_funcs(gcdata.gc, translator.rtyper)
+
         self.collect_analyzer = CollectAnalyzer(self.translator)
         self.collect_analyzer.analyze_all()
 
@@ -537,6 +542,24 @@
             self.gcdata._has_got_custom_trace(self.get_type_id(TP))
             specialize.arg(2)(func)
 
+    def check_custom_trace_funcs(self, gc, rtyper):
+        # detect if one of the custom trace functions uses the GC
+        # (it must not!)
+        for TP, func in rtyper.custom_trace_funcs:
+            def no_op_callback(obj, arg):
+                pass
+            def ll_check_no_collect(obj):
+                func(gc, obj, no_op_callback, None)
+            annhelper = annlowlevel.MixLevelHelperAnnotator(rtyper)
+            graph1 = annhelper.getgraph(ll_check_no_collect, [SomeAddress()],
+                                        annmodel.s_None)
+            annhelper.finish()
+            collect_analyzer = CollectAnalyzer(self.translator)
+            if collect_analyzer.analyze_direct_call(graph1):
+                raise Exception(
+                    "the custom trace hook %r for %r can cause "
+                    "the GC to be called!" % (func, TP))
+
     def consider_constant(self, TYPE, value):
         self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)
 
diff --git a/rpython/memory/gctransform/test/test_framework.py 
b/rpython/memory/gctransform/test/test_framework.py
--- a/rpython/memory/gctransform/test/test_framework.py
+++ b/rpython/memory/gctransform/test/test_framework.py
@@ -143,6 +143,31 @@
     expected = "'no_collect' function can trigger collection: <function g at "
     assert str(f.value).startswith(expected)
 
+def test_custom_trace_function_no_collect():
+    from rpython.rlib import rgc
+    from rpython.translator.c.genc import CStandaloneBuilder
+
+    S = lltype.GcStruct("MyStructure")
+    class Glob:
+        pass
+    glob = Glob()
+    def trace_func(gc, obj, callback, arg):
+        glob.foo = (gc, obj)
+    lambda_trace_func = lambda: trace_func
+    def entrypoint(argv):
+        lltype.malloc(S)
+        rgc.register_custom_trace_hook(S, lambda_trace_func)
+        return 0
+
+    t = rtype(entrypoint, [s_list_of_strings])
+    t.config.translation.gc = "minimark"
+    cbuild = CStandaloneBuilder(t, entrypoint, t.config,
+                                gcpolicy=FrameworkGcPolicy2)
+    f = py.test.raises(Exception, cbuild.generate_graphs_for_llinterp)
+    assert 'can cause the GC to be called' in str(f.value)
+    assert 'trace_func' in str(f.value)
+    assert 'MyStructure' in str(f.value)
+ 
 class WriteBarrierTransformer(ShadowStackFrameworkGCTransformer):
     clean_sets = {}
     GC_PARAMS = {}
diff --git a/rpython/rlib/ropenssl.py b/rpython/rlib/ropenssl.py
--- a/rpython/rlib/ropenssl.py
+++ b/rpython/rlib/ropenssl.py
@@ -255,6 +255,8 @@
                              OPENSSL_VERSION_NUMBER != 0x00909000
 if OPENSSL_VERSION_NUMBER < 0x0090800f and not OPENSSL_NO_ECDH:
     OPENSSL_NO_ECDH = True
+HAVE_TLSv1_2 = OPENSSL_VERSION_NUMBER >= 0x10001000
+
 
 def external(name, argtypes, restype, **kw):
     kw['compilation_info'] = eci
@@ -284,6 +286,9 @@
 ssl_external('SSL_get_SSL_CTX', [SSL], SSL_CTX)
 ssl_external('SSL_set_SSL_CTX', [SSL, SSL_CTX], SSL_CTX)
 ssl_external('TLSv1_method', [], SSL_METHOD)
+if HAVE_TLSv1_2:
+    ssl_external('TLSv1_1_method', [], SSL_METHOD)
+    ssl_external('TLSv1_2_method', [], SSL_METHOD)
 ssl_external('SSLv2_method', [], SSL_METHOD)
 ssl_external('SSLv3_method', [], SSL_METHOD)
 ssl_external('SSLv23_method', [], SSL_METHOD)
diff --git a/rpython/rlib/rzlib.py b/rpython/rlib/rzlib.py
--- a/rpython/rlib/rzlib.py
+++ b/rpython/rlib/rzlib.py
@@ -365,10 +365,8 @@
     """Common code for compress() and decompress().
     """
     # Prepare the input buffer for the stream
-    with lltype.scoped_alloc(rffi.CCHARP.TO, len(data)) as inbuf:
-        # XXX (groggi) should be possible to improve this with pinning by
-        # not performing the 'copy_string_to_raw' if non-movable/pinned
-        copy_string_to_raw(llstr(data), inbuf, 0, len(data))
+    assert data is not None # XXX seems to be sane assumption, however not for 
sure
+    with rffi.scoped_nonmovingbuffer(data) as inbuf:
         stream.c_next_in = rffi.cast(Bytefp, inbuf)
         rffi.setintfield(stream, 'c_avail_in', len(data))
 
diff --git a/rpython/rtyper/annlowlevel.py b/rpython/rtyper/annlowlevel.py
--- a/rpython/rtyper/annlowlevel.py
+++ b/rpython/rtyper/annlowlevel.py
@@ -41,9 +41,10 @@
     __repr__ = __str__
 
 class LowLevelAnnotatorPolicy(AnnotatorPolicy):
-    def __init__(pol, rtyper=None):
-        pol.rtyper = rtyper
+    def __init__(self, rtyper=None):
+        self.rtyper = rtyper
 
+    @staticmethod
     def lowlevelspecialize(funcdesc, args_s, key_for_args):
         args_s, key1, builder = flatten_star_args(funcdesc, args_s)
         key = []
@@ -73,21 +74,20 @@
         flowgraph = funcdesc.cachedgraph(key, builder=builder)
         args_s[:] = new_args_s
         return flowgraph
-    lowlevelspecialize = staticmethod(lowlevelspecialize)
 
+    @staticmethod
     def default_specialize(funcdesc, args_s):
         return LowLevelAnnotatorPolicy.lowlevelspecialize(funcdesc, args_s, {})
-    default_specialize = staticmethod(default_specialize)
 
     specialize__ll = default_specialize
 
+    @staticmethod
     def specialize__ll_and_arg(funcdesc, args_s, *argindices):
         keys = {}
         for i in argindices:
             keys[i] = args_s[i].const
         return LowLevelAnnotatorPolicy.lowlevelspecialize(funcdesc, args_s,
                                                           keys)
-    specialize__ll_and_arg = staticmethod(specialize__ll_and_arg)
 
 def annotate_lowlevel_helper(annotator, ll_function, args_s, policy=None):
     if policy is None:
@@ -99,24 +99,23 @@
 
 class MixLevelAnnotatorPolicy(LowLevelAnnotatorPolicy):
 
-    def __init__(pol, annhelper):
-        pol.annhelper = annhelper
-        pol.rtyper = annhelper.rtyper
+    def __init__(self, annhelper):
+        self.rtyper = annhelper.rtyper
 
-    def default_specialize(pol, funcdesc, args_s):
+    def default_specialize(self, funcdesc, args_s):
         name = funcdesc.name
         if name.startswith('ll_') or name.startswith('_ll_'): # xxx can we do 
better?
-            return super(MixLevelAnnotatorPolicy, pol).default_specialize(
+            return super(MixLevelAnnotatorPolicy, self).default_specialize(
                 funcdesc, args_s)
         else:
             return AnnotatorPolicy.default_specialize(funcdesc, args_s)
 
-    def specialize__arglltype(pol, funcdesc, args_s, i):
-        key = pol.rtyper.getrepr(args_s[i]).lowleveltype
+    def specialize__arglltype(self, funcdesc, args_s, i):
+        key = self.rtyper.getrepr(args_s[i]).lowleveltype
         alt_name = funcdesc.name+"__for_%sLlT" % key._short_name()
         return funcdesc.cachedgraph(key, alt_name=valid_identifier(alt_name))
 
-    def specialize__genconst(pol, funcdesc, args_s, i):
+    def specialize__genconst(self, funcdesc, args_s, i):
         # XXX this is specific to the JIT
         TYPE = annotation_to_lltype(args_s[i], 'genconst')
         args_s[i] = lltype_to_annotation(TYPE)
diff --git a/rpython/rtyper/exceptiondata.py b/rpython/rtyper/exceptiondata.py
--- a/rpython/rtyper/exceptiondata.py
+++ b/rpython/rtyper/exceptiondata.py
@@ -1,15 +1,9 @@
 from rpython.annotator import model as annmodel
+from rpython.annotator.exception import standardexceptions
 from rpython.rtyper.llannotation import SomePtr
-from rpython.rlib import rstackovf
 from rpython.rtyper.rclass import (
     ll_issubclass, ll_type, ll_cast_to_object, getclassrepr, getinstancerepr)
 
-# the exceptions that can be implicitely raised by some operations
-standardexceptions = set([TypeError, OverflowError, ValueError,
-    ZeroDivisionError, MemoryError, IOError, OSError, StopIteration, KeyError,
-    IndexError, AssertionError, RuntimeError, UnicodeDecodeError,
-    UnicodeEncodeError, NotImplementedError, rstackovf._StackOverflow])
-
 class UnknownException(Exception):
     pass
 
@@ -20,7 +14,6 @@
     standardexceptions = standardexceptions
 
     def __init__(self, rtyper):
-        self.make_standard_exceptions(rtyper)
         # (NB. rclass identifies 'Exception' and 'object')
         r_type = rtyper.rootclass_repr
         r_instance = getinstancerepr(rtyper, None)
@@ -32,11 +25,6 @@
         self.lltype_of_exception_value = r_instance.lowleveltype
         self.rtyper = rtyper
 
-    def make_standard_exceptions(self, rtyper):
-        bk = rtyper.annotator.bookkeeper
-        for cls in self.standardexceptions:
-            bk.getuniqueclassdef(cls)
-
     def finish(self, rtyper):
         bk = rtyper.annotator.bookkeeper
         for cls in self.standardexceptions:
diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py
--- a/rpython/rtyper/extfunc.py
+++ b/rpython/rtyper/extfunc.py
@@ -157,12 +157,11 @@
         r_result = rtyper.getrepr(s_result)
         ll_result = r_result.lowleveltype
         name = getattr(self, 'name', None) or self.instance.__name__
-        method_name = rtyper.type_system.name[:2] + 'typeimpl'
         fake_method_name = rtyper.type_system.name[:2] + 'typefakeimpl'
-        impl = getattr(self, method_name, None)
-        fakeimpl = getattr(self, fake_method_name, self.instance)
+        impl = getattr(self, 'lltypeimpl', None)
+        fakeimpl = getattr(self, 'lltypefakeimpl', self.instance)
         if impl:
-            if hasattr(self, fake_method_name):
+            if hasattr(self, 'lltypefakeimpl'):
                 # If we have both an llimpl and an llfakeimpl,
                 # we need a wrapper that selects the proper one and calls it
                 from rpython.tool.sourcetools import func_with_new_name
diff --git a/rpython/rtyper/lltypesystem/rbuilder.py 
b/rpython/rtyper/lltypesystem/rbuilder.py
--- a/rpython/rtyper/lltypesystem/rbuilder.py
+++ b/rpython/rtyper/lltypesystem/rbuilder.py
@@ -401,18 +401,6 @@
     def empty(self):
         return nullptr(self.lowleveltype.TO)
 
-    @classmethod
-    def ll_new(cls, init_size):
-        # Clamp 'init_size' to be a value between 0 and 1280.
-        # Negative values are mapped to 1280.
-        init_size = intmask(min(r_uint(init_size), r_uint(1280)))
-        ll_builder = lltype.malloc(cls.lowleveltype.TO)
-        ll_builder.current_buf = ll_builder.mallocfn(init_size)
-        ll_builder.current_pos = 0
-        ll_builder.current_end = init_size
-        ll_builder.total_size = init_size
-        return ll_builder
-
     ll_append               = staticmethod(ll_append)
     ll_append_char          = staticmethod(ll_append_char)
     ll_append_slice         = staticmethod(ll_append_slice)
@@ -431,6 +419,19 @@
         lltype.Ptr(lltype.Array(lltype.Char, hints={'nolength': True}))
     )
 
+    @staticmethod
+    def ll_new(init_size):
+        # Clamp 'init_size' to be a value between 0 and 1280.
+        # Negative values are mapped to 1280.
+        init_size = intmask(min(r_uint(init_size), r_uint(1280)))
+        ll_builder = lltype.malloc(STRINGBUILDER)
+        ll_builder.current_buf = ll_builder.mallocfn(init_size)
+        ll_builder.current_pos = 0
+        ll_builder.current_end = init_size
+        ll_builder.total_size = init_size
+        return ll_builder
+
+
 class UnicodeBuilderRepr(BaseStringBuilderRepr):
     lowleveltype = lltype.Ptr(UNICODEBUILDER)
     basetp = UNICODE
@@ -440,5 +441,18 @@
         lltype.Ptr(lltype.Array(lltype.UniChar, hints={'nolength': True}))
     )
 
+    @staticmethod
+    def ll_new(init_size):
+        # Clamp 'init_size' to be a value between 0 and 1280.
+        # Negative values are mapped to 1280.
+        init_size = intmask(min(r_uint(init_size), r_uint(1280)))
+        ll_builder = lltype.malloc(UNICODEBUILDER)
+        ll_builder.current_buf = ll_builder.mallocfn(init_size)
+        ll_builder.current_pos = 0
+        ll_builder.current_end = init_size
+        ll_builder.total_size = init_size
+        return ll_builder
+
+
 unicodebuilder_repr = UnicodeBuilderRepr()
 stringbuilder_repr = StringBuilderRepr()
diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py
--- a/rpython/rtyper/rmodel.py
+++ b/rpython/rtyper/rmodel.py
@@ -25,6 +25,7 @@
     """
     __metaclass__ = extendabletype
     _initialized = setupstate.NOTINITIALIZED
+    __NOT_RPYTHON__ = True
 
     def __repr__(self):
         return '<%s %s>' % (self.__class__.__name__, self.lowleveltype)
diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py
--- a/rpython/rtyper/rtyper.py
+++ b/rpython/rtyper/rtyper.py
@@ -26,7 +26,6 @@
     attachRuntimeTypeInfo, Primitive)
 from rpython.rtyper.rmodel import Repr, inputconst, BrokenReprTyperError
 from rpython.rtyper.typesystem import LowLevelTypeSystem, getfunctionptr
-from rpython.rtyper.normalizecalls import perform_normalizations
 from rpython.rtyper import rclass
 from rpython.rtyper.rclass import RootClassRepr
 from rpython.tool.pairtype import pair
@@ -55,8 +54,6 @@
         self.concrete_calltables = {}
         self.cache_dummy_values = {}
         self.lltype2vtable = {}
-        self.typererrors = []
-        self.typererror_count = 0
         # make the primitive_to_repr constant mapping
         self.primitive_to_repr = {}
         self.isinstance_helpers = {}
@@ -169,22 +166,16 @@
     def specialize(self, dont_simplify_again=False):
         """Main entry point: specialize all annotated blocks of the program."""
         # specialize depends on annotator simplifications
-        assert dont_simplify_again in (False, True)  # safety check
         if not dont_simplify_again:
             self.annotator.simplify()
-
-        # first make sure that all functions called in a group have exactly
-        # the same signature, by hacking their flow graphs if needed
-        perform_normalizations(self.annotator)
         self.exceptiondata.finish(self)
 
         # new blocks can be created as a result of specialize_block(), so
         # we need to be careful about the loop here.
         self.already_seen = {}
         self.specialize_more_blocks()
-        if self.exceptiondata is not None:
-            self.exceptiondata.make_helpers(self)
-            self.specialize_more_blocks()   # for the helpers just made
+        self.exceptiondata.make_helpers(self)
+        self.specialize_more_blocks()   # for the helpers just made
 
     def getannmixlevel(self):
         if self.annmixlevel is not None:
@@ -231,18 +222,11 @@
                     percentage = 100 * n // total
                     if percentage >= previous_percentage + 5:
                         previous_percentage = percentage
-                        if self.typererror_count:
-                            error_report = " but %d errors" % 
self.typererror_count
-                        else:
-                            error_report = ''
-                        self.log.event('specializing: %d / %d blocks   
(%d%%)%s' %
-                                       (n, total, percentage, error_report))
+                        self.log.event('specializing: %d / %d blocks   (%d%%)' 
%
+                                       (n, total, percentage))
             # make sure all reprs so far have had their setup() called
             self.call_all_setups()
 
-        if self.typererrors:
-            self.dump_typererrors(to_log=True)
-            raise TyperError("there were %d error" % len(self.typererrors))
         self.log.event('-=- specialized %d%s blocks -=-' % (
             blockcount, newtext))
         annmixlevel = self.annmixlevel
@@ -250,29 +234,6 @@
         if annmixlevel is not None:
             annmixlevel.finish()
 
-    def dump_typererrors(self, num=None, minimize=True, to_log=False):
-        c = 0
-        bc = 0
-        for err in self.typererrors[:num]:
-            c += 1
-            if minimize and isinstance(err, BrokenReprTyperError):
-                bc += 1
-                continue
-            graph, block, position = err.where
-            errmsg = ("TyperError-%d: %s\n" % (c, graph) +
-                      str(err) +
-                      "\n")
-            if to_log:
-                self.log.ERROR(errmsg)
-            else:
-                print errmsg
-        if bc:
-            minmsg = "(minimized %d errors away for this dump)" % (bc,)
-            if to_log:
-                self.log.ERROR(minmsg)
-            else:
-                print minmsg
-
     def call_all_setups(self):
         # make sure all reprs so far have had their setup() called
         must_setup_more = []
@@ -324,9 +285,9 @@
         # give the best possible types to the input args
         try:
             self.setup_block_entry(block)
-        except TyperError, e:
-            self.gottypererror(e, block, "block-entry", None)
-            return  # cannot continue this block
+        except TyperError as e:
+            self.gottypererror(e, block, "block-entry")
+            raise
 
 
         # specialize all the operations, as far as possible
@@ -341,9 +302,9 @@
             try:
                 hop.setup()  # this is called from here to catch TyperErrors...
                 self.translate_hl_to_ll(hop, varmapping)
-            except TyperError, e:
-                self.gottypererror(e, block, hop.spaceop, newops)
-                return  # cannot continue this block: no op.result.concretetype
+            except TyperError as e:
+                self.gottypererror(e, block, hop.spaceop)
+                raise
 
         block.operations[:] = newops
         block.renamevariables(varmapping)
@@ -432,9 +393,9 @@
                     continue   # no conversion needed
                 try:
                     new_a1 = newops.convertvar(a1, r_a1, r_a2)
-                except TyperError, e:
-                    self.gottypererror(e, block, link, newops)
-                    continue # try other args
+                except TyperError as e:
+                    self.gottypererror(e, block, link)
+                    raise
                 if new_a1 != a1:
                     newlinkargs[i] = new_a1
 
@@ -516,14 +477,10 @@
                              "has no return value" % op.opname)
         op.result.concretetype = Void
 
-    def gottypererror(self, e, block, position, llops):
-        """Record a TyperError without crashing immediately.
-        Put a 'TyperError' operation in the graph instead.
-        """
+    def gottypererror(self, exc, block, position):
+        """Record information about the location of a TyperError"""
         graph = self.annotator.annotated.get(block)
-        e.where = (graph, block, position)
-        self.typererror_count += 1
-        raise
+        exc.where = (graph, block, position)
 
     # __________ regular operations __________
 
diff --git a/rpython/rtyper/test/test_llinterp.py 
b/rpython/rtyper/test/test_llinterp.py
--- a/rpython/rtyper/test/test_llinterp.py
+++ b/rpython/rtyper/test/test_llinterp.py
@@ -25,22 +25,12 @@
     py.log._setstate(mod.logstate)
 
 
-
-def timelog(prefix, call, *args, **kwds):
-    #import time
-    #print prefix, "...",
-    #start = time.time()
-    res = call(*args, **kwds)
-    #elapsed = time.time() - start
-    #print "%.2f secs" % (elapsed,)
-    return res
-
 def gengraph(func, argtypes=[], viewbefore='auto', policy=None,
              backendopt=False, config=None, **extraconfigopts):
     t = TranslationContext(config=config)
     t.config.set(**extraconfigopts)
     a = t.buildannotator(policy=policy)
-    timelog("annotating", a.build_types, func, argtypes, main_entry_point=True)
+    a.build_types(func, argtypes, main_entry_point=True)
     a.validate()
     if viewbefore == 'auto':
         viewbefore = getattr(option, 'view', False)
@@ -49,13 +39,13 @@
         t.view()
     global typer # we need it for find_exception
     typer = t.buildrtyper()
-    timelog("rtyper-specializing", typer.specialize)
+    typer.specialize()
     #t.view()
-    timelog("checking graphs", t.checkgraphs)
+    t.checkgraphs()
     if backendopt:
         from rpython.translator.backendopt.all import backend_optimizations
         backend_optimizations(t)
-        timelog("checking graphs", t.checkgraphs)
+        t.checkgraphs()
         if viewbefore:
             t.view()
     desc = t.annotator.bookkeeper.getdesc(func)
diff --git a/rpython/translator/backendopt/all.py 
b/rpython/translator/backendopt/all.py
--- a/rpython/translator/backendopt/all.py
+++ b/rpython/translator/backendopt/all.py
@@ -151,8 +151,6 @@
                                 inline_heuristic,
                                 call_count_pred=None,
                                 inline_graph_from_anywhere=False):
-
-    type_system = translator.rtyper.type_system.name
     # inline functions in each other
     if inline_threshold:
         log.inlining("phase with threshold factor: %s" % inline_threshold)
@@ -171,7 +169,7 @@
     # vaporize mallocs
     if config.mallocs:
         log.malloc("starting malloc removal")
-        remove_mallocs(translator, graphs, type_system)
+        remove_mallocs(translator, graphs)
 
         if config.print_statistics:
             print "after malloc removal:"
diff --git a/rpython/translator/backendopt/malloc.py 
b/rpython/translator/backendopt/malloc.py
--- a/rpython/translator/backendopt/malloc.py
+++ b/rpython/translator/backendopt/malloc.py
@@ -536,17 +536,17 @@
             raise AssertionError(op.opname)
 
 
-def remove_simple_mallocs(graph, type_system='lltypesystem', verbose=True):
+def remove_simple_mallocs(graph, verbose=True):
     remover = LLTypeMallocRemover(verbose)
     return remover.remove_simple_mallocs(graph)
 
 
-def remove_mallocs(translator, graphs=None, type_system="lltypesystem"):
+def remove_mallocs(translator, graphs=None):
     if graphs is None:
         graphs = translator.graphs
     tot = 0
     for graph in graphs:
-        count = remove_simple_mallocs(graph, type_system=type_system, 
verbose=translator.config.translation.verbose)
+        count = remove_simple_mallocs(graph, 
verbose=translator.config.translation.verbose)
         if count:
             # remove typical leftovers from malloc removal
             removenoops.remove_same_as(graph)
diff --git a/rpython/translator/backendopt/test/test_all.py 
b/rpython/translator/backendopt/test/test_all.py
--- a/rpython/translator/backendopt/test/test_all.py
+++ b/rpython/translator/backendopt/test/test_all.py
@@ -42,7 +42,6 @@
 HUGE_THRESHOLD  = 100*INLINE_THRESHOLD_FOR_TEST
 
 class TestLLType(object):
-    type_system = 'lltype'
     check_malloc_removed = MallocRemovalTest.check_malloc_removed
 
     def translateopt(self, func, sig, **optflags):
diff --git a/rpython/translator/backendopt/test/test_inline.py 
b/rpython/translator/backendopt/test/test_inline.py
--- a/rpython/translator/backendopt/test/test_inline.py
+++ b/rpython/translator/backendopt/test/test_inline.py
@@ -47,8 +47,6 @@
         self.data2 = 456
 
 class TestInline(BaseRtypingTest):
-    type_system = 'lltype'
-
     def translate(self, func, argtypes):
         t = TranslationContext()
         t.buildannotator().build_types(func, argtypes)
diff --git a/rpython/translator/backendopt/test/test_malloc.py 
b/rpython/translator/backendopt/test/test_malloc.py
--- a/rpython/translator/backendopt/test/test_malloc.py
+++ b/rpython/translator/backendopt/test/test_malloc.py
@@ -10,7 +10,6 @@
 from rpython.conftest import option
 
 class TestMallocRemoval(object):
-    type_system = 'lltype'
     MallocRemover = LLTypeMallocRemover
 
     def check_malloc_removed(cls, graph):
diff --git a/rpython/translator/backendopt/test/test_mallocv.py 
b/rpython/translator/backendopt/test/test_mallocv.py
--- a/rpython/translator/backendopt/test/test_mallocv.py
+++ b/rpython/translator/backendopt/test/test_mallocv.py
@@ -17,8 +17,6 @@
 
 
 class TestMallocRemoval(object):
-    type_system = 'lltype'
-
     def check_malloc_removed(cls, graph, expected_mallocs, expected_calls):
         count_mallocs = 0
         count_calls = 0
diff --git a/rpython/translator/backendopt/test/test_storesink.py 
b/rpython/translator/backendopt/test/test_storesink.py
--- a/rpython/translator/backendopt/test/test_storesink.py
+++ b/rpython/translator/backendopt/test/test_storesink.py
@@ -7,8 +7,6 @@
 from rpython.conftest import option
 
 class TestStoreSink(object):
-    type_system = 'lltype'
-
     def translate(self, func, argtypes):
         t = TranslationContext()
         t.buildannotator().build_types(func, argtypes)
diff --git a/rpython/translator/backendopt/test/test_writeanalyze.py 
b/rpython/translator/backendopt/test/test_writeanalyze.py
--- a/rpython/translator/backendopt/test/test_writeanalyze.py
+++ b/rpython/translator/backendopt/test/test_writeanalyze.py
@@ -7,8 +7,6 @@
 
 
 class BaseTest(object):
-
-    type_system = 'lltype'
     Analyzer = WriteAnalyzer
 
     def translate(self, func, sig):
diff --git a/rpython/translator/test/test_exceptiontransform.py 
b/rpython/translator/test/test_exceptiontransform.py
--- a/rpython/translator/test/test_exceptiontransform.py
+++ b/rpython/translator/test/test_exceptiontransform.py
@@ -27,8 +27,6 @@
     return interp.eval_graph(graph, values)
 
 class TestExceptionTransform:
-    type_system = 'lltype'
-
     def compile(self, fn, inputargs):
         from rpython.translator.c.test.test_genc import compile
         return compile(fn, inputargs)
@@ -239,7 +237,7 @@
         etrafo.create_exception_handling(g)
         ops = dict.fromkeys([o.opname for b, o in g.iterblockops()])
         assert 'zero_gc_pointers_inside' in ops
-    
+
     def test_llexternal(self):
         from rpython.rtyper.lltypesystem.rffi import llexternal
         from rpython.rtyper.lltypesystem import lltype
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to