Author: Richard Plangger <[email protected]>
Branch: s390x-backend
Changeset: r80842:7372febfd770
Date: 2015-11-23 08:17 +0100
http://bitbucket.org/pypy/pypy/changeset/7372febfd770/

Log:    merged default

diff too long, truncating to 2000 out of 2102 lines

diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -88,9 +88,19 @@
         #
         try:
             unbound_method = getattr(_continulet, methodname)
+            _tls.leaving = current
             args, kwds = unbound_method(current, *baseargs, to=target)
-        finally:
             _tls.current = current
+        except:
+            _tls.current = current
+            if hasattr(_tls, 'trace'):
+                _run_trace_callback('throw')
+            _tls.leaving = None
+            raise
+        else:
+            if hasattr(_tls, 'trace'):
+                _run_trace_callback('switch')
+            _tls.leaving = None
         #
         if kwds:
             if args:
@@ -122,6 +132,34 @@
         return f.f_back.f_back.f_back   # go past start(), __switch(), switch()
 
 # ____________________________________________________________
+# Recent additions
+
+GREENLET_USE_GC = True
+GREENLET_USE_TRACING = True
+
+def gettrace():
+    return getattr(_tls, 'trace', None)
+
+def settrace(callback):
+    try:
+        prev = _tls.trace
+        del _tls.trace
+    except AttributeError:
+        prev = None
+    if callback is not None:
+        _tls.trace = callback
+    return prev
+
+def _run_trace_callback(event):
+    try:
+        _tls.trace(event, (_tls.leaving, _tls.current))
+    except:
+        # In case of exceptions trace function is removed
+        if hasattr(_tls, 'trace'):
+            del _tls.trace
+        raise
+
+# ____________________________________________________________
 # Internal stuff
 
 try:
@@ -143,22 +181,32 @@
     _tls.current = gmain
 
 def _greenlet_start(greenlet, args):
-    args, kwds = args
-    _tls.current = greenlet
     try:
-        res = greenlet.run(*args, **kwds)
-    except GreenletExit, e:
-        res = e
+        args, kwds = args
+        _tls.current = greenlet
+        try:
+            if hasattr(_tls, 'trace'):
+                _run_trace_callback('switch')
+            res = greenlet.run(*args, **kwds)
+        except GreenletExit, e:
+            res = e
+        finally:
+            _continuation.permute(greenlet, greenlet.parent)
+        return ((res,), None)
     finally:
-        _continuation.permute(greenlet, greenlet.parent)
-    return ((res,), None)
+        _tls.leaving = greenlet
 
 def _greenlet_throw(greenlet, exc, value, tb):
-    _tls.current = greenlet
     try:
-        raise exc, value, tb
-    except GreenletExit, e:
-        res = e
+        _tls.current = greenlet
+        try:
+            if hasattr(_tls, 'trace'):
+                _run_trace_callback('throw')
+            raise exc, value, tb
+        except GreenletExit, e:
+            res = e
+        finally:
+            _continuation.permute(greenlet, greenlet.parent)
+        return ((res,), None)
     finally:
-        _continuation.permute(greenlet, greenlet.parent)
-    return ((res,), None)
+        _tls.leaving = greenlet
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -1,7 +1,18 @@
 =========================
-What's new in PyPy 4.0.+
+What's new in PyPy 4.1.+
 =========================
 
 .. this is a revision shortly after release-4.0.1
 .. startrev: 4b5c840d0da2
 
+.. branch: numpy-1.10
+
+Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy
+which is now 1.10.2
+
+.. branch: osx-flat-namespace
+
+Fix the cpyext tests on OSX by linking with -flat_namespace
+
+.. branch: anntype
+Refactor and improve exception analysis in the annotator.
diff --git a/pypy/goal/targetpypystandalone.py 
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -302,7 +302,7 @@
     
     def hack_for_cffi_modules(self, driver):
         # HACKHACKHACK
-        # ugly hack to modify target goal from compile_c to build_cffi_imports
+        # ugly hack to modify target goal from compile_* to build_cffi_imports
         # this should probably get cleaned up and merged with driver.create_exe
         from rpython.translator.driver import taskdef
         import types
@@ -316,7 +316,8 @@
                 name = name.new(ext='exe')
             return name
 
-        @taskdef(['compile_c'], "Create cffi bindings for modules")
+        compile_goal, = driver.backend_select_goals(['compile'])
+        @taskdef([compile_goal], "Create cffi bindings for modules")
         def task_build_cffi_imports(self):
             from pypy.tool.build_cffi_imports import 
create_cffi_import_libraries
             ''' Use cffi to compile cffi interfaces to modules'''
@@ -335,7 +336,7 @@
             # if failures, they were already printed
             print  >> sys.stderr, str(exename),'successfully built, but errors 
while building the above modules will be ignored'
         driver.task_build_cffi_imports = 
types.MethodType(task_build_cffi_imports, driver)
-        driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, 
['compile_c']
+        driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, 
[compile_goal]
         driver.default_goal = 'build_cffi_imports'
         # HACKHACKHACK end
 
diff --git a/pypy/module/cpyext/include/patchlevel.h 
b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
 #define PY_VERSION             "2.7.10"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "4.0.1-alpha0"
+#define PYPY_VERSION "4.1.0-alpha0"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */
diff --git a/pypy/module/micronumpy/__init__.py 
b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -34,6 +34,7 @@
         'nditer': 'nditer.W_NDIter',
 
         'set_docstring': 'support.descr_set_docstring',
+        'VisibleDeprecationWarning': 'support.W_VisibleDeprecationWarning',
     }
     for c in ['MAXDIMS', 'CLIP', 'WRAP', 'RAISE']:
         interpleveldefs[c] = 'space.wrap(constants.%s)' % c
@@ -42,6 +43,7 @@
         from pypy.module.micronumpy.concrete import _setup
         _setup()
 
+
 class UMathModule(MixedModule):
     appleveldefs = {}
     interpleveldefs = {
@@ -138,3 +140,9 @@
         'multiarray': MultiArrayModule,
         'umath': UMathModule,
     }
+
+    def setup_after_space_initialization(self):
+        from pypy.module.micronumpy.support import W_VisibleDeprecationWarning
+        for name, w_type in {'VisibleDeprecationWarning': 
W_VisibleDeprecationWarning}.items():
+            setattr(self.space, 'w_' + name, self.space.gettypefor(w_type))
+
diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py
--- a/pypy/module/micronumpy/base.py
+++ b/pypy/module/micronumpy/base.py
@@ -44,7 +44,7 @@
         from pypy.module.micronumpy.strides import calc_strides
         if len(shape) > NPY.MAXDIMS:
             raise oefmt(space.w_ValueError,
-                "sequence too large; must be smaller than %d", NPY.MAXDIMS)
+                "sequence too large; cannot be greater than %d", NPY.MAXDIMS)
         try:
             ovfcheck(support.product_check(shape) * dtype.elsize)
         except OverflowError as e:
@@ -69,7 +69,7 @@
         isize = dtype.elsize
         if len(shape) > NPY.MAXDIMS:
             raise oefmt(space.w_ValueError,
-                "sequence too large; must be smaller than %d", NPY.MAXDIMS)
+                "sequence too large; cannot be greater than %d", NPY.MAXDIMS)
         try:
             totalsize = ovfcheck(support.product_check(shape) * isize)
         except OverflowError as e:
diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py
--- a/pypy/module/micronumpy/boxes.py
+++ b/pypy/module/micronumpy/boxes.py
@@ -444,7 +444,7 @@
 
     @unwrap_spec(axis1=int, axis2=int)
     def descr_swapaxes(self, space, axis1, axis2):
-        return self
+        raise oefmt(space.w_ValueError, 'bad axis1 argument to swapaxes')
 
     def descr_fill(self, space, w_value):
         self.get_dtype(space).coerce(space, w_value)
@@ -573,7 +573,7 @@
         try:
             ofs, dtype = self.dtype.fields[item]
         except KeyError:
-            raise oefmt(space.w_IndexError, "invalid index")
+            raise oefmt(space.w_ValueError, "no field of name %s", item)
 
         from pypy.module.micronumpy.types import VoidType
         if isinstance(dtype.itemtype, VoidType):
diff --git a/pypy/module/micronumpy/compile.py 
b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -65,6 +65,7 @@
     w_KeyError = W_TypeObject("KeyError")
     w_SystemExit = W_TypeObject("SystemExit")
     w_KeyboardInterrupt = W_TypeObject("KeyboardInterrupt")
+    w_VisibleDeprecationWarning = W_TypeObject("VisibleDeprecationWarning")
     w_None = None
 
     w_bool = W_TypeObject("bool")
@@ -402,6 +403,9 @@
         assert isinstance(w_check_class, W_TypeObject)
         return w_exc_type.name == w_check_class.name
 
+    def warn(self, w_msg, w_warn_type):
+        pass
+
 class FloatObject(W_Root):
     tp = FakeSpace.w_float
     def __init__(self, floatval):
diff --git a/pypy/module/micronumpy/concrete.py 
b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -457,7 +457,7 @@
     def set_shape(self, space, orig_array, new_shape):
         if len(new_shape) > NPY.MAXDIMS:
             raise oefmt(space.w_ValueError,
-                "sequence too large; must be smaller than %d", NPY.MAXDIMS)
+                "sequence too large; cannot be greater than %d", NPY.MAXDIMS)
         try:
             ovfcheck(support.product_check(new_shape) * self.dtype.elsize)
         except OverflowError as e:
@@ -601,7 +601,7 @@
     def set_shape(self, space, orig_array, new_shape):
         if len(new_shape) > NPY.MAXDIMS:
             raise oefmt(space.w_ValueError,
-                "sequence too large; must be smaller than %d", NPY.MAXDIMS)
+                "sequence too large; cannot be greater than %d", NPY.MAXDIMS)
         try:
             ovfcheck(support.product_check(new_shape) * self.dtype.elsize)
         except OverflowError as e:
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -18,7 +18,7 @@
         raise oefmt(space.w_TypeError,
                     "argument 1 must be numpy.dtype, not %T", w_dtype)
     if w_dtype.elsize == 0:
-        raise oefmt(space.w_ValueError, "itemsize cannot be zero")
+        raise oefmt(space.w_TypeError, "Empty data-type")
     if not space.isinstance_w(w_state, space.w_str):
         raise oefmt(space.w_TypeError, "initializing object must be a string")
     if space.len_w(w_state) != w_dtype.elsize:
diff --git a/pypy/module/micronumpy/descriptor.py 
b/pypy/module/micronumpy/descriptor.py
--- a/pypy/module/micronumpy/descriptor.py
+++ b/pypy/module/micronumpy/descriptor.py
@@ -217,6 +217,8 @@
             endian = ignore
         if self.num == NPY.UNICODE:
             size >>= 2
+        if self.num == NPY.OBJECT:
+            return "%s%s" %(endian, basic)
         return "%s%s%s" % (endian, basic, size)
 
     def descr_get_descr(self, space, style='descr', force_dict=False):
@@ -420,6 +422,10 @@
         if space.is_w(self, w_other):
             return True
         if isinstance(w_other, W_Dtype):
+            if self.is_object() and w_other.is_object():
+                # ignore possible 'record' unions
+                # created from dtype(('O', spec))
+                return True
             return space.eq_w(self.descr_reduce(space),
                               w_other.descr_reduce(space))
         return False
@@ -485,7 +491,12 @@
 
     def descr_str(self, space):
         if self.fields:
-            return space.str(self.descr_get_descr(space, style='str'))
+            r = self.descr_get_descr(space, style='str')
+            name = space.str_w(space.str(self.w_box_type))
+            if name != "<type 'numpy.void'>":
+                boxname = space.str(self.w_box_type)
+                r = space.newtuple([self.w_box_type, r])
+            return space.str(r)
         elif self.subdtype is not None:
             return space.str(space.newtuple([
                 self.subdtype.descr_get_str(space),
@@ -497,8 +508,13 @@
                 return self.descr_get_name(space)
 
     def descr_repr(self, space):
+        if isinstance(self.itemtype, types.CharType):
+            return space.wrap("dtype('S1')")
         if self.fields:
             r = self.descr_get_descr(space, style='repr')
+            name = space.str_w(space.str(self.w_box_type))
+            if name != "<type 'numpy.void'>":
+                r = space.newtuple([space.wrap(self.w_box_type), r])
         elif self.subdtype is not None:
             r = space.newtuple([self.subdtype.descr_get_str(space),
                                 self.descr_get_shape(space)])
@@ -800,8 +816,8 @@
 def _usefields(space, w_dict, align):
     # Only for testing, a shortened version of the real _usefields
     allfields = []
-    for fname in w_dict.iterkeys().iterator:
-        obj = _get_list_or_none(space, w_dict, fname)
+    for fname_w in space.unpackiterable(w_dict):
+        obj = _get_list_or_none(space, w_dict, space.str_w(fname_w))
         num = space.int_w(obj[1])
         if align:
             alignment = 0
@@ -812,8 +828,8 @@
             title = space.wrap(obj[2])
         else:
             title = space.w_None
-        allfields.append((space.wrap(fname), format, num, title))
-    allfields.sort(key=lambda x: x[2])
+        allfields.append((fname_w, format, num, title))
+    #allfields.sort(key=lambda x: x[2])
     names   = [space.newtuple([x[0], x[3]]) for x in allfields]
     formats = [x[1] for x in allfields]
     offsets = [x[2] for x in allfields]
@@ -837,12 +853,14 @@
     aligned_w = _get_val_or_none(space, w_dict, 'aligned')
     itemsize_w = _get_val_or_none(space, w_dict, 'itemsize')
     if names_w is None or formats_w is None:
-        if we_are_translated():
+        try:
             return get_appbridge_cache(space).call_method(space,
                 'numpy.core._internal', '_usefields', Arguments(space, 
                                 [w_dict, space.wrap(alignment >= 0)]))
-        else:
-            return _usefields(space, w_dict, alignment >= 0)
+        except OperationError as e:
+            if e.match(space, space.w_ImportError):
+                return _usefields(space, w_dict, alignment >= 0)
+            raise
     n = len(names_w)
     if (n != len(formats_w) or 
         (offsets_w is not None and n != len(offsets_w)) or
@@ -882,16 +900,17 @@
 
 def dtype_from_spec(space, w_spec, alignment):
 
-    if we_are_translated():
+    w_lst = w_spec
+    try:
         w_lst = get_appbridge_cache(space).call_method(space,
             'numpy.core._internal', '_commastring', Arguments(space, [w_spec]))
-    else:
+    except OperationError as e:
+        if not e.match(space, space.w_ImportError):
+            raise
         # handle only simple cases for testing
         if space.isinstance_w(w_spec, space.w_str):
             spec = [s.strip() for s in space.str_w(w_spec).split(',')]
             w_lst = space.newlist([space.wrap(s) for s in spec]) 
-        elif space.isinstance_w(w_spec, space.w_list):
-            w_lst = w_spec
     if not space.isinstance_w(w_lst, space.w_list) or space.len_w(w_lst) < 1:
         raise oefmt(space.w_RuntimeError,
                     "_commastring is not returning a list with len >= 1")
@@ -942,7 +961,7 @@
     shape_w = space.fixedview(w_shape)
     if len(shape_w) < 1:
         return None
-    elif len(shape_w) == 1 and space.isinstance_w(shape_w[0], space.w_tuple):
+    elif space.isinstance_w(shape_w[0], space.w_tuple):
         # (base_dtype, new_dtype) dtype spectification
         return None
     shape = []
@@ -997,12 +1016,17 @@
         if len(spec) > 0:
             # this is (base_dtype, new_dtype) so just make it a union by 
setting both
             # parts' offset to 0
-            try:
-                dtype1 = make_new_dtype(space, w_subtype, w_shape, alignment)
-            except:
-                raise
-            raise oefmt(space.w_NotImplementedError, 
-                "(base_dtype, new_dtype) dtype spectification discouraged, not 
implemented")
+            w_dtype1 = make_new_dtype(space, w_subtype, w_shape, alignment)
+            assert isinstance(w_dtype, W_Dtype)
+            assert isinstance(w_dtype1, W_Dtype)
+            if (w_dtype.elsize != 0 and w_dtype1.elsize != 0 and 
+                    w_dtype1.elsize != w_dtype.elsize):
+                raise oefmt(space.w_ValueError,
+                    'mismatch in size of old and new data-descriptor')
+            retval = W_Dtype(w_dtype.itemtype, w_dtype.w_box_type,
+                    names=w_dtype1.names[:], fields=w_dtype1.fields.copy(),
+                    elsize=w_dtype1.elsize)
+            return retval
     if space.is_none(w_dtype):
         return cache.w_float64dtype
     if space.isinstance_w(w_dtype, w_subtype):
@@ -1032,19 +1056,22 @@
     elif space.isinstance_w(w_dtype, space.w_tuple):
         w_dtype0 = space.getitem(w_dtype, space.wrap(0))
         w_dtype1 = space.getitem(w_dtype, space.wrap(1))
-        if space.isinstance_w(w_dtype0, space.w_type) and \
-           space.isinstance_w(w_dtype1, space.w_list):
-            #obscure api - (subclass, spec). Ignore the subclass
-            return make_new_dtype(space, w_subtype, w_dtype1, alignment, 
-                        copy=copy, w_shape=w_shape, w_metadata=w_metadata)
-        subdtype = make_new_dtype(space, w_subtype, w_dtype0, alignment, copy)
-        assert isinstance(subdtype, W_Dtype)
-        if subdtype.elsize == 0:
-            name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1))
+        # create a new dtype object
+        l_side = make_new_dtype(space, w_subtype, w_dtype0, alignment, copy)
+        assert isinstance(l_side, W_Dtype)
+        if l_side.elsize == 0 and space.isinstance_w(w_dtype1, space.w_int):
+            #(flexible_dtype, itemsize)
+            name = "%s%d" % (l_side.kind, space.int_w(w_dtype1))
             retval = make_new_dtype(space, w_subtype, space.wrap(name), 
alignment, copy)
-        else:
-            retval = make_new_dtype(space, w_subtype, w_dtype0, alignment, 
copy, w_shape=w_dtype1)
-        return _set_metadata_and_copy(space, w_metadata, retval, copy)
+            return _set_metadata_and_copy(space, w_metadata, retval, copy)
+        elif (space.isinstance_w(w_dtype1, space.w_int) or
+                space.isinstance_w(w_dtype1, space.w_tuple) or 
+                space.isinstance_w(w_dtype1, space.w_list) or 
+                isinstance(w_dtype1, W_NDimArray)):
+            #(fixed_dtype, shape) or (base_dtype, new_dtype)
+            retval = make_new_dtype(space, w_subtype, l_side, alignment,
+                                    copy, w_shape=w_dtype1)
+            return _set_metadata_and_copy(space, w_metadata, retval, copy)
     elif space.isinstance_w(w_dtype, space.w_dict):
         return _set_metadata_and_copy(space, w_metadata,
                 dtype_from_dict(space, w_dtype, alignment), copy)
@@ -1122,7 +1149,7 @@
             size = int(name[1:])
         except ValueError:
             raise oefmt(space.w_TypeError, "data type not understood")
-    if char == NPY.CHARLTR:
+    if char == NPY.CHARLTR and size == 0:
         return W_Dtype(
             types.CharType(space),
             elsize=1,
@@ -1133,7 +1160,7 @@
         return new_unicode_dtype(space, size)
     elif char == NPY.VOIDLTR:
         return new_void_dtype(space, size)
-    assert False
+    raise oefmt(space.w_TypeError, 'data type "%s" not understood', name)
 
 
 def new_string_dtype(space, size):
diff --git a/pypy/module/micronumpy/ndarray.py 
b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -15,7 +15,7 @@
 from pypy.module.micronumpy.arrayops import repeat, choose, put
 from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, \
     ArrayArgumentException, wrap_impl
-from pypy.module.micronumpy.concrete import BaseConcreteArray
+from pypy.module.micronumpy.concrete import BaseConcreteArray, V_OBJECTSTORE
 from pypy.module.micronumpy.converters import (
     multi_axis_converter, order_converter, shape_converter,
     searchside_converter, out_converter)
@@ -75,7 +75,7 @@
         dtype = space.interp_w(descriptor.W_Dtype, space.call_function(
             space.gettypefor(descriptor.W_Dtype), w_dtype))
         if (dtype.elsize != self.get_dtype().elsize or
-                dtype.is_flexible() or self.get_dtype().is_flexible()):
+                (not dtype.is_record() and self.get_dtype().is_flexible())):
             raise OperationError(space.w_ValueError, space.wrap(
                 "new type not compatible with array."))
         self.implementation.set_dtype(space, dtype)
@@ -116,6 +116,13 @@
                 "index out of range for array"))
         size = loop.count_all_true(arr)
         if arr.ndims() == 1:
+            if self.ndims() > 1 and arr.get_shape()[0] != self.get_shape()[0]:
+                msg = ("boolean index did not match indexed array along"
+                      " dimension 0; dimension is %d but corresponding"
+                      " boolean dimension is %d" % (self.get_shape()[0],
+                      arr.get_shape()[0]))
+                #warning = 
space.gettypefor(support.W_VisibleDeprecationWarning)
+                space.warn(space.wrap(msg), space.w_VisibleDeprecationWarning)
             res_shape = [size] + self.get_shape()[1:]
         else:
             res_shape = [size]
@@ -278,9 +285,12 @@
     def getfield(self, space, field):
         dtype = self.get_dtype()
         if field not in dtype.fields:
-            raise oefmt(space.w_ValueError, "field named %s not found", field)
+            raise oefmt(space.w_ValueError, "no field of name %s", field)
         arr = self.implementation
         ofs, subdtype = arr.dtype.fields[field][:2]
+        if subdtype.is_object() and arr.gcstruct is V_OBJECTSTORE:
+            raise oefmt(space.w_NotImplementedError,
+                "cannot read object from array with no gc hook")
         # ofs only changes start
         # create a view of the original array by extending
         # the shape, strides, backstrides of the array
@@ -489,10 +499,8 @@
         numpy.swapaxes : equivalent function
         """
         if axis1 == axis2:
-            return self
+            return self.descr_view(space)
         n = self.ndims()
-        if n <= 1:
-            return self
         if axis1 < 0:
             axis1 += n
         if axis2 < 0:
@@ -501,6 +509,8 @@
             raise oefmt(space.w_ValueError, "bad axis1 argument to swapaxes")
         if axis2 < 0 or axis2 >= n:
             raise oefmt(space.w_ValueError, "bad axis2 argument to swapaxes")
+        if n <= 1:
+            return self
         return self.implementation.swapaxes(space, self, axis1, axis2)
 
     def descr_nonzero(self, space):
@@ -899,7 +909,7 @@
                     if cur_shape[i] != 1:
                         raise OperationError(space.w_ValueError, space.wrap(
                             "cannot select an axis to squeeze out "
-                            "which has size greater than one"))
+                            "which has size not equal to one"))
                 else:
                     new_shape.append(cur_shape[i])
         else:
@@ -995,7 +1005,7 @@
     # --------------------- operations ----------------------------
     # TODO: support all kwargs like numpy ufunc_object.c
     sig = None
-    cast = 'unsafe'
+    cast = 'safe'
     extobj = None
 
 
@@ -1374,7 +1384,7 @@
     shape = shape_converter(space, w_shape, dtype)
     if len(shape) > NPY.MAXDIMS:
         raise oefmt(space.w_ValueError,
-            "sequence too large; must be smaller than %d", NPY.MAXDIMS)
+            "sequence too large; cannot be greater than %d", NPY.MAXDIMS)
     if not space.is_none(w_buffer):
         if (not space.is_none(w_strides)):
             strides = [space.int_w(w_i) for w_i in
diff --git a/pypy/module/micronumpy/support.py 
b/pypy/module/micronumpy/support.py
--- a/pypy/module/micronumpy/support.py
+++ b/pypy/module/micronumpy/support.py
@@ -8,6 +8,17 @@
 from pypy.objspace.std.typeobject import W_TypeObject
 from pypy.objspace.std.objspace import StdObjSpace
 from pypy.module.micronumpy import constants as NPY
+from pypy.module.exceptions.interp_exceptions import _new_exception, 
W_UserWarning
+
+W_VisibleDeprecationWarning = _new_exception('VisibleDeprecationWarning', 
W_UserWarning,
+    """Visible deprecation warning.
+
+    By default, python will not show deprecation warnings, so this class
+    can be used when a very visible warning is helpful, for example because
+    the usage is most likely a user bug.
+
+    """)
+
 
 def issequence_w(space, w_obj):
     from pypy.module.micronumpy.base import W_NDimArray
diff --git a/pypy/module/micronumpy/test/test_deprecations.py 
b/pypy/module/micronumpy/test/test_deprecations.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/micronumpy/test/test_deprecations.py
@@ -0,0 +1,33 @@
+import py
+import sys
+
+from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
+
+
+class AppTestDeprecations(BaseNumpyAppTest):
+    spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"])
+
+    def test_getitem(self):
+        import numpy as np
+        import warnings, sys
+        warnings.simplefilter('error', np.VisibleDeprecationWarning)
+        try:
+            arr = np.ones((5, 4, 3))
+            index = np.array([True])
+            raises(np.VisibleDeprecationWarning, arr.__getitem__, index)
+
+            index = np.array([False] * 6)
+            raises(np.VisibleDeprecationWarning, arr.__getitem__, index)
+
+            index = np.zeros((4, 4), dtype=bool)
+            if '__pypy__' in sys.builtin_module_names:
+                # boolean indexing matches the dims in index
+                # to the first index.ndims in arr, not implemented in pypy yet
+                raises(IndexError, arr.__getitem__, index)
+                raises(TypeError, arr.__getitem__, (slice(None), index))
+            else:
+                raises(np.VisibleDeprecationWarning, arr.__getitem__, index)
+                raises(np.VisibleDeprecationWarning, arr.__getitem__, 
(slice(None), index))
+        finally:
+            warnings.simplefilter('default', np.VisibleDeprecationWarning)
+
diff --git a/pypy/module/micronumpy/test/test_dtypes.py 
b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -345,14 +345,29 @@
 
     def test_can_subclass(self):
         import numpy as np
+        import sys, pickle
         class xyz(np.void):
             pass
         assert np.dtype(xyz).name == 'xyz'
         # another obscure API, used in numpy record.py
-        # it seems numpy throws away the subclass type and parses the spec
         a = np.dtype((xyz, [('x', 'int32'), ('y', 'float32')]))
-        assert repr(a) == "dtype([('x', '<i4'), ('y', '<f4')])"
-
+        assert "[('x', '<i4'), ('y', '<f4')]" in repr(a)
+        assert 'xyz' in repr(a)
+        data = [(1, 'a'), (2, 'bbb')]
+        b = np.dtype((xyz, [('a', int), ('b', object)]))
+        if '__pypy__' in sys.builtin_module_names:
+            raises(NotImplementedError, np.array, data, dtype=b)
+        else:
+            arr = np.array(data, dtype=b)
+            assert arr[0][0] == 1
+            assert arr[0][1] == 'a'
+        b = np.dtype((xyz, [("col1", "<i4"), ("col2", "<i4"), ("col3", 
"<i4")]))
+        data = [(1, 2,3), (4, 5, 6)]
+        a = np.array(data, dtype=b)
+        x = pickle.loads(pickle.dumps(a))
+        assert (x == a).all()
+        assert x.dtype == a.dtype 
+        
     def test_index(self):
         import numpy as np
         for dtype in [np.int8, np.int16, np.int32, np.int64]:
@@ -486,20 +501,11 @@
         class O(object):
             pass
         for o in [object, O]:
-            if self.ptr_size == 4:
-                assert np.dtype(o).str == '|O4'
-            elif self.ptr_size == 8:
-                assert np.dtype(o).str == '|O8'
-            else:
-                assert False,'self._ptr_size unknown'
+            assert np.dtype(o).str == '|O'
         # Issue gh-2798
-        if '__pypy__' in sys.builtin_module_names:
-            a = np.array(['a'], dtype="O")
-            raises(NotImplementedError, a.astype, ("O", [("name", "O")]))
-            skip("(base_dtype, new_dtype) dtype specification discouraged")
         a = np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
         assert a[0] == 'a'
-        assert a == 'a'
+        assert a != 'a'
         assert a['name'].dtype == a.dtype
 
 class AppTestTypes(BaseAppTestDtypes):
@@ -1038,13 +1044,7 @@
             assert d.name == "string64"
             assert d.num == 18
         for i in [1, 2, 3]:
-            d = dtype('c%d' % i)
-            assert d.itemsize == 1
-            assert d.kind == 'S'
-            assert d.type is str_
-            assert d.name == 'string8'
-            assert d.num == 18
-            assert d.str == '|S1'
+            raises(TypeError, dtype, 'c%d' % i)
 
     def test_unicode_dtype(self):
         from numpy import dtype, unicode_
@@ -1068,6 +1068,7 @@
         assert d.char == 'c'
         assert d.kind == 'S'
         assert d.str == '|S1'
+        assert repr(d) == "dtype('S1')"
 
 class AppTestRecordDtypes(BaseNumpyAppTest):
     spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"])
diff --git a/pypy/module/micronumpy/test/test_ndarray.py 
b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -269,7 +269,7 @@
         assert (y == x.T).all()
 
         exc = raises(ValueError, ndarray, [1,2,256]*10000)
-        assert exc.value[0] == 'sequence too large; must be smaller than 32'
+        assert exc.value[0] == 'sequence too large; cannot be greater than 32'
         exc = raises(ValueError, ndarray, [1,2,256]*10)
         assert exc.value[0] == 'array is too big.'
 
@@ -838,14 +838,19 @@
 
     def test_build_scalar(self):
         from numpy import dtype
+        import sys
         try:
             from numpy.core.multiarray import scalar
         except ImportError:
             from numpy import scalar
         exc = raises(TypeError, scalar, int, 2)
         assert exc.value[0] == 'argument 1 must be numpy.dtype, not type'
-        exc = raises(ValueError, scalar, dtype('void'), 'abc')
-        assert exc.value[0] == 'itemsize cannot be zero'
+        if '__pypy__' in sys.builtin_module_names:
+            exc = raises(TypeError, scalar, dtype('void'), 'abc')
+        else:
+            a = scalar(dtype('void'), 'abc')
+            exc = raises(TypeError, str, a)
+        assert exc.value[0] == 'Empty data-type'
         exc = raises(TypeError, scalar, dtype(float), 2.5)
         assert exc.value[0] == 'initializing object must be a string'
         exc = raises(ValueError, scalar, dtype(float), 'abc')
@@ -1081,14 +1086,6 @@
             for i in range(5):
                 assert a[i] == getattr(c[i], reg_op).__call__(d[i])
 
-    def test_inplace_cast(self):
-        import numpy as np
-        a = np.zeros(5, dtype=np.float64)
-        b = np.zeros(5, dtype=np.complex64)
-        a += b
-        assert a.sum() == 0
-        assert a.dtype is np.dtype(np.float64)
-
     def test_add_list(self):
         from numpy import array, ndarray
         a = array(range(5))
@@ -1965,7 +1962,7 @@
         assert len(a) == 6
         assert (a == [0,1,2,3,4,5]).all()
         assert a.dtype is dtype(int)
-        a = concatenate((a1, a2), axis=1)
+        a = concatenate((a1, a2), axis=0)
         assert (a == [0,1,2,3,4,5]).all()
         a = concatenate((a1, a2), axis=-1)
         assert (a == [0,1,2,3,4,5]).all()
@@ -2013,7 +2010,7 @@
 
         g1 = array([0,1,2])
         g2 = array([[3,4,5]])
-        exc = raises(ValueError, concatenate, (g1, g2), axis=2)
+        exc = raises(ValueError, concatenate, (g1, g2), axis=0)
         assert str(exc.value) == \
                 "all the input arrays must have same number of dimensions"
 
@@ -2129,16 +2126,16 @@
         assert exc.value.message == "'axis' entry 5 is out of bounds [-4, 4)"
         exc = raises(ValueError, a.squeeze, 0)
         assert exc.value.message == "cannot select an axis to squeeze out " \
-                                    "which has size greater than one"
+                                    "which has size not equal to one"
         exc = raises(ValueError, a.squeeze, (1, 1))
         assert exc.value.message == "duplicate value in 'axis'"
 
     def test_swapaxes(self):
         from numpy import array
         x = array([])
-        assert x.swapaxes(0, 2) is x
+        raises(ValueError, x.swapaxes,0, 2)
         x = array([[1, 2]])
-        assert x.swapaxes(0, 0) is x
+        assert x.swapaxes(0, 0) is not x
         exc = raises(ValueError, x.swapaxes, -3, 0)
         assert exc.value.message == "bad axis1 argument to swapaxes"
         exc = raises(ValueError, x.swapaxes, 0, 3)
@@ -2169,7 +2166,7 @@
         # test virtual
         assert ((x + x).swapaxes(0,1) == array([[[ 2,  4,  6], [14, 16, 18]],
                                          [[ 8, 10, 12], [20, 22, 24]]])).all()
-        assert array(1).swapaxes(10, 12) == 1
+        raises(ValueError, array(1).swapaxes, 10, 12)
 
     def test_filter_bug(self):
         from numpy import array
@@ -2410,6 +2407,7 @@
 
     def test_data(self):
         from numpy import array
+        import sys
         a = array([1, 2, 3, 4], dtype='i4')
         assert a.data[0] == '\x01'
         assert a.data[1] == '\x00'
@@ -2418,7 +2416,8 @@
         assert a[1] == 0xff
         assert len(a.data) == 16
         assert type(a.data) is buffer
-        assert a[1:].data._pypy_raw_address() - a.data._pypy_raw_address() == 
a.strides[0]
+        if '__pypy__' in sys.builtin_module_names:
+            assert a[1:].data._pypy_raw_address() - a.data._pypy_raw_address() 
== a.strides[0]
 
     def test_explicit_dtype_conversion(self):
         from numpy import array
@@ -2505,7 +2504,7 @@
 
     def test_string_filling(self):
         import numpy
-        a = numpy.empty((10,10), dtype='c1')
+        a = numpy.empty((10,10), dtype='S1')
         a.fill(12)
         assert (a == '1').all()
 
@@ -3073,7 +3072,8 @@
         assert (b == zeros(10)).all()
 
     def test_array_interface(self):
-        from numpy import array, ones
+        from numpy import array
+        import numpy as np
         a = array(2.5)
         i = a.__array_interface__
         assert isinstance(i['data'][0], int)
@@ -3095,9 +3095,10 @@
         assert b_data + 3 * b.dtype.itemsize == c_data
 
         class Dummy(object):
-            def __init__(self, aif=None):
+            def __init__(self, aif=None, base=None):
                 if aif is not None:
                     self.__array_interface__ = aif
+                self.base = base
 
         a = array(Dummy())
         assert a.dtype == object
@@ -3125,12 +3126,22 @@
         assert b.dtype == 'uint8'
         assert b.shape == (50,)
 
-        a = ones((1,), dtype='float16')
+        a = np.ones((1,), dtype='float16')
         b = Dummy(a.__array_interface__)
         c = array(b)
         assert c.dtype == 'float16'
         assert (a == c).all()
 
+        t = np.dtype([("a", np.float64), ("b", np.float64)], align=True)
+        a = np.zeros(10, dtype=t)
+        a['a'] = range(10, 20)
+        a['b'] = range(20, 30)
+        interface = dict(a.__array_interface__)
+        array = np.array(Dummy(interface))
+        assert array.dtype.kind == 'V'
+        array.dtype = a.dtype
+        assert array[5]['b'] == 25
+
     def test_array_indexing_one_elem(self):
         from numpy import array, arange
         raises(IndexError, 'arange(3)[array([3.5])]')
@@ -3726,7 +3737,7 @@
         assert a[()]['y'] == 0
         assert a.shape == ()
         a = zeros(2, dtype=[('x', int), ('y', float)])
-        raises(IndexError, 'a[0]["xyz"]')
+        raises(ValueError, 'a[0]["xyz"]')
         assert a[0]['x'] == 0
         assert a[0]['y'] == 0
         exc = raises(ValueError, "a[0] = (1, 2, 3)")
@@ -3794,7 +3805,7 @@
         exc = raises(IndexError, "arr[3L]")
         assert exc.value.message == "too many indices for array"
         exc = raises(ValueError, "arr['xx'] = 2")
-        assert exc.value.message == "field named xx not found"
+        assert exc.value.message == "no field of name xx"
         assert arr['y'].dtype == a
         assert arr['y'].shape == ()
         assert arr['y'][()]['x'] == 0
@@ -3969,8 +3980,8 @@
             exc = raises(IndexError, "a[0][%d]" % v)
             assert exc.value.message == "invalid index (%d)" % \
                                         (v + 2 if v < 0 else v)
-        exc = raises(IndexError, "a[0]['z']")
-        assert exc.value.message == "invalid index"
+        exc = raises(ValueError, "a[0]['z']")
+        assert exc.value.message == "no field of name z"
         exc = raises(IndexError, "a[0][None]")
         assert exc.value.message == "invalid index"
 
@@ -4153,7 +4164,7 @@
         d = np.ones(3, dtype=[('a', 'i8'), ('b', 'i8')])
         e = np.ones(3, dtype=[('a', 'i8'), ('b', 'i8'), ('c', 'i8')])
         exc = raises(TypeError, abs, a)
-        assert exc.value[0] == 'Not implemented for this type'
+        assert exc.value[0].startswith("ufunc 'absolute' did not contain a 
loop")
         assert (a == a).all()
         assert not (a != a).any()
         assert (a == b).all()
diff --git a/pypy/module/micronumpy/test/test_object_arrays.py 
b/pypy/module/micronumpy/test/test_object_arrays.py
--- a/pypy/module/micronumpy/test/test_object_arrays.py
+++ b/pypy/module/micronumpy/test/test_object_arrays.py
@@ -114,9 +114,6 @@
 
     def test_array_interface(self):
         import numpy as np
-        if self.runappdirect:
-            skip('requires numpy.core, test with numpy test suite instead')
-        import sys
         class DummyArray(object):
             def __init__(self, interface, base=None):
                 self.__array_interface__ = interface
@@ -126,8 +123,6 @@
         interface = dict(a.__array_interface__)
         interface['shape'] = tuple([3])
         interface['strides'] = tuple([0])
-        if '__pypy__' in sys.builtin_module_names:
-            skip('not implemented yet')
         c = np.array(DummyArray(interface, base=a))
         c.dtype = a.dtype
         #print c
@@ -160,6 +155,9 @@
         import sys
         ytype = np.object_
         if '__pypy__' in sys.builtin_module_names:
+            dt = np.dtype([('x', int), ('y', ytype)])
+            x = np.empty((4, 0), dtype = dt)
+            raises(NotImplementedError, x.__getitem__, 'y')
             ytype = str
         dt = np.dtype([('x', int), ('y', ytype)])
         # Correct way
diff --git a/pypy/module/micronumpy/test/test_scalar.py 
b/pypy/module/micronumpy/test/test_scalar.py
--- a/pypy/module/micronumpy/test/test_scalar.py
+++ b/pypy/module/micronumpy/test/test_scalar.py
@@ -142,7 +142,7 @@
         assert f.round() == 13.
         assert f.round(decimals=-1) == 10.
         assert f.round(decimals=1) == 13.4
-        assert b.round(decimals=5) is b
+        raises(TypeError, b.round, decimals=5)
         assert f.round(decimals=1, out=None) == 13.4
         assert b.round() == 1.0
 
@@ -404,8 +404,8 @@
         def _do_test(np_type, orig_val, exp_val):
             val = np_type(orig_val)
             assert val == orig_val
-            assert val.swapaxes(10, 20) == exp_val
-            assert type(val.swapaxes(0, 1)) is np_type
+            raises(ValueError, val.swapaxes, 10, 20)
+            raises(ValueError, val.swapaxes, 0, 1)
             raises(TypeError, val.swapaxes, 0, ())
 
         for t in int8, int16, int32, int64:
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py 
b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -86,6 +86,7 @@
 
     def test_frompyfunc_innerloop(self):
         from numpy import ufunc, frompyfunc, arange, dtype
+        import sys
         def adder(a, b):
             return a+b
         def sumdiff(a, b):
@@ -123,7 +124,10 @@
         res = int_func12(a)
         assert len(res) == 2
         assert isinstance(res, tuple)
-        assert (res[0] == a).all()
+        if '__pypy__' in sys.builtin_module_names:
+            assert (res[0] == a).all()
+        else:
+            assert all([r is None for r in res[0]]) # ??? no warning or error, 
just a fail?
         res = sumdiff(2 * a, a)
         assert (res[0] == 3 * a).all()
         assert (res[1] == a).all()
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -450,10 +450,12 @@
 
     @specialize.argtype(1)
     def round(self, v, decimals=0):
-        if decimals != 0:
-            # numpy 1.9.0 compatible
-            return v
-        return Float64(self.space).box(self.unbox(v))
+        if decimals == 0:
+            return Float64(self.space).box(self.unbox(v))
+        # numpy 1.10 compatibility
+        raise oefmt(self.space.w_TypeError, "ufunc casting failure")
+            
+            
 
 class Integer(Primitive):
     _mixin_ = True
@@ -1849,6 +1851,9 @@
                     arr.gcstruct)
 
     def read(self, arr, i, offset, dtype):
+        if arr.gcstruct is V_OBJECTSTORE:
+            raise oefmt(self.space.w_NotImplementedError,
+                "cannot read object from array with no gc hook")
         return self.box(self._read(arr.storage, i, offset))
 
     def byteswap(self, w_v):
@@ -2412,18 +2417,20 @@
                 ofs += size
 
     def coerce(self, space, dtype, w_items):
+        if dtype.is_record():
+            # the dtype is a union of a void and a record,
+            return record_coerce(self, space, dtype, w_items)
         arr = VoidBoxStorage(dtype.elsize, dtype)
         self._coerce(space, arr, 0, dtype, w_items, dtype.shape)
         return boxes.W_VoidBox(arr, 0, dtype)
 
     @jit.unroll_safe
     def store(self, arr, i, offset, box, native):
-        assert i == 0
         assert isinstance(box, boxes.W_VoidBox)
         assert box.dtype is box.arr.dtype
         with arr as arr_storage, box.arr as box_storage:
             for k in range(box.arr.dtype.elsize):
-                arr_storage[k + offset] = box_storage[k + box.ofs]
+                arr_storage[i + k + offset] = box_storage[k + box.ofs]
 
     def readarray(self, arr, i, offset, dtype=None):
         from pypy.module.micronumpy.base import W_NDimArray
@@ -2472,17 +2479,7 @@
 class CharType(StringType):
     char = NPY.CHARLTR
 
-class RecordType(FlexibleType):
-    T = lltype.Char
-    num = NPY.VOID
-    kind = NPY.VOIDLTR
-    char = NPY.VOIDLTR
-
-    def read(self, arr, i, offset, dtype):
-        return boxes.W_VoidBox(arr, i + offset, dtype)
-
-    @jit.unroll_safe
-    def coerce(self, space, dtype, w_item):
+def record_coerce(typ, space, dtype, w_item):
         from pypy.module.micronumpy.base import W_NDimArray
         if isinstance(w_item, boxes.W_VoidBox):
             if dtype == w_item.dtype:
@@ -2520,6 +2517,19 @@
             subdtype.store(arr, 0, ofs, w_box)
         return boxes.W_VoidBox(arr, 0, dtype)
 
+class RecordType(FlexibleType):
+    T = lltype.Char
+    num = NPY.VOID
+    kind = NPY.VOIDLTR
+    char = NPY.VOIDLTR
+
+    def read(self, arr, i, offset, dtype):
+        return boxes.W_VoidBox(arr, i + offset, dtype)
+
+    @jit.unroll_safe
+    def coerce(self, space, dtype, w_item):
+        return record_coerce(self, space, dtype, w_item)
+
     def runpack_str(self, space, s, native):
         raise oefmt(space.w_NotImplementedError,
                     "fromstring not implemented for record types")
diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py
--- a/pypy/module/micronumpy/ufuncs.py
+++ b/pypy/module/micronumpy/ufuncs.py
@@ -471,7 +471,8 @@
 
     def find_specialization(self, space, dtype, out, casting):
         if dtype.is_flexible():
-            raise oefmt(space.w_TypeError, 'Not implemented for this type')
+            raise oefmt(space.w_TypeError, "ufunc '%s' did not contain a loop",
+                        self.name)
         if (not self.allow_bool and dtype.is_bool() or
                 not self.allow_complex and dtype.is_complex()):
             raise oefmt(space.w_TypeError,
@@ -555,7 +556,23 @@
         w_ldtype = w_lhs.get_dtype(space)
         w_rdtype = w_rhs.get_dtype(space)
         if w_ldtype.is_object() or w_rdtype.is_object():
-            pass
+            if ((w_ldtype.is_object() and w_ldtype.is_record()) and
+                (w_rdtype.is_object() and w_rdtype.is_record())):
+                pass
+            elif ((w_ldtype.is_object() and w_ldtype.is_record()) or
+                (w_rdtype.is_object() and w_rdtype.is_record())):
+                if self.name == 'not_equal':
+                    return space.wrap(True)
+                elif self.name == 'equal':
+                    return space.wrap(False)
+                else:
+                    msg = ("ufunc '%s' not supported for the input types, "
+                           "and the inputs could not be safely coerced to "
+                           "any supported types according to the casting "
+                           "rule '%s'")
+                    raise oefmt(space.w_TypeError, msg, self.name, casting)
+            else:
+                pass
         elif w_ldtype.is_str() and w_rdtype.is_str() and \
                 self.bool_result:
             pass
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -10,7 +10,7 @@
 #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
 CPYTHON_API_VERSION        = 1013   #XXX # sync with include/modsupport.h
 
-PYPY_VERSION               = (4, 0, 1, "alpha", 0)    #XXX # sync patchlevel.h
+PYPY_VERSION               = (4, 1, 0, "alpha", 0)    #XXX # sync patchlevel.h
 
 
 import pypy
diff --git a/pypy/module/test_lib_pypy/test_greenlet_tracing.py 
b/pypy/module/test_lib_pypy/test_greenlet_tracing.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/test_lib_pypy/test_greenlet_tracing.py
@@ -0,0 +1,53 @@
+import py
+try:
+    from lib_pypy import greenlet
+except ImportError, e:
+    py.test.skip(e)
+
+class SomeError(Exception):
+    pass
+
+class TestTracing:
+    def test_greenlet_tracing(self):
+        main = greenlet.getcurrent()
+        actions = []
+        def trace(*args):
+            actions.append(args)
+        def dummy():
+            pass
+        def dummyexc():
+            raise SomeError()
+        oldtrace = greenlet.settrace(trace)
+        try:
+            g1 = greenlet.greenlet(dummy)
+            g1.switch()
+            g2 = greenlet.greenlet(dummyexc)
+            py.test.raises(SomeError, g2.switch)
+        finally:
+            greenlet.settrace(oldtrace)
+        assert actions == [
+            ('switch', (main, g1)),
+            ('switch', (g1, main)),
+            ('switch', (main, g2)),
+            ('throw', (g2, main)),
+        ]
+
+    def test_exception_disables_tracing(self):
+        main = greenlet.getcurrent()
+        actions = []
+        def trace(*args):
+            actions.append(args)
+            raise SomeError()
+        def dummy():
+            main.switch()
+        g = greenlet.greenlet(dummy)
+        g.switch()
+        oldtrace = greenlet.settrace(trace)
+        try:
+            py.test.raises(SomeError, g.switch)
+            assert greenlet.gettrace() is None
+        finally:
+            greenlet.settrace(oldtrace)
+        assert actions == [
+            ('switch', (main, g)),
+        ]
diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
--- a/pypy/tool/release/repackage.sh
+++ b/pypy/tool/release/repackage.sh
@@ -1,10 +1,11 @@
 # Edit these appropriately before running this script
-maj=2
-min=6
+maj=4
+min=0
 rev=1
 # This script will download latest builds from the buildmaster, rename the top
 # level directory, and repackage ready to be uploaded to bitbucket. It will 
also
 # download source, assuming a tag for the release already exists, and 
repackage them.
+# The script should be run in an empty directory, i.e. /tmp/release_xxx
 
 for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel 
osx64 freebsd64
   do
diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py
--- a/rpython/annotator/annrpython.py
+++ b/rpython/annotator/annrpython.py
@@ -1,15 +1,17 @@
 from __future__ import absolute_import
 
 import types
+from collections import defaultdict
 
 from rpython.tool.ansi_print import ansi_log
 from rpython.tool.pairtype import pair
 from rpython.tool.error import (format_blocked_annotation_error,
                              gather_error, source_lines)
-from rpython.flowspace.model import (
-    Variable, Constant, FunctionGraph, checkgraph)
+from rpython.flowspace.model import Variable, Constant, checkgraph
 from rpython.translator import simplify, transform
 from rpython.annotator import model as annmodel, signature
+from rpython.annotator.model import (
+        typeof, SomeExceptCase, s_ImpossibleValue)
 from rpython.annotator.bookkeeper import Bookkeeper
 from rpython.rtyper.normalizecalls import perform_normalizations
 
@@ -209,7 +211,7 @@
         for graph in newgraphs:
             v = graph.getreturnvar()
             if v.annotation is None:
-                self.setbinding(v, annmodel.s_ImpossibleValue)
+                self.setbinding(v, s_ImpossibleValue)
 
     def validate(self):
         """Check that the annotation results are valid"""
@@ -281,7 +283,7 @@
         except KeyError:
             # the function didn't reach any return statement so far.
             # (some functions actually never do, they always raise exceptions)
-            return annmodel.s_ImpossibleValue
+            return s_ImpossibleValue
 
     def reflowfromposition(self, position_key):
         graph, block, index = position_key
@@ -387,6 +389,34 @@
         if unions != oldcells:
             self.bindinputargs(graph, block, unions)
 
+    def apply_renaming(self, s_out, renaming):
+        if hasattr(s_out, 'is_type_of'):
+            renamed_is_type_of = []
+            for v in s_out.is_type_of:
+                renamed_is_type_of += renaming[v]
+            assert s_out.knowntype is type
+            newcell = typeof(renamed_is_type_of)
+            if s_out.is_constant():
+                newcell.const = s_out.const
+            s_out = newcell
+
+        if hasattr(s_out, 'knowntypedata'):
+            renamed_knowntypedata = {}
+            for value, constraints in s_out.knowntypedata.items():
+                renamed_knowntypedata[value] = {}
+                for v, s in constraints.items():
+                    new_vs = renaming.get(v, [])
+                    for new_v in new_vs:
+                        renamed_knowntypedata[value][new_v] = s
+            assert isinstance(s_out, annmodel.SomeBool)
+            newcell = annmodel.SomeBool()
+            if s_out.is_constant():
+                newcell.const = s_out.const
+            s_out = newcell
+            s_out.set_knowntypedata(renamed_knowntypedata)
+        return s_out
+
+
     def whereami(self, position_key):
         graph, block, i = position_key
         blk = ""
@@ -456,33 +486,43 @@
                     exits = [link for link in exits
                                   if link.exitcase == s_exitswitch.const]
 
-        # filter out those exceptions which cannot
-        # occour for this specific, typed operation.
         if block.canraise:
             op = block.raising_op
             can_only_throw = op.get_can_only_throw(self)
             if can_only_throw is not None:
-                candidates = can_only_throw
-                candidate_exits = exits
-                exits = []
-                for link in candidate_exits:
+                # filter out those exceptions which cannot
+                # occur for this specific, typed operation.
+                s_exception = self.bookkeeper.new_exception(can_only_throw)
+                for link in exits:
                     case = link.exitcase
                     if case is None:
-                        exits.append(link)
+                        self.follow_link(graph, link, {})
                         continue
-                    covered = [c for c in candidates if issubclass(c, case)]
-                    if covered:
-                        exits.append(link)
-                        candidates = [c for c in candidates if c not in 
covered]
+                    if s_exception == s_ImpossibleValue:
+                        break
+                    s_case = SomeExceptCase(
+                            self.bookkeeper.getuniqueclassdef(case))
+                    s_matching_exc = s_exception.intersection(s_case)
+                    if s_matching_exc != s_ImpossibleValue:
+                        self.follow_raise_link(graph, link, s_matching_exc)
+                    s_exception = s_exception.difference(s_case)
+            else:
+                for link in exits:
+                    if link.exitcase is None:
+                        self.follow_link(graph, link, {})
+                    else:
+                        s_exception = 
self.bookkeeper.valueoftype(link.exitcase)
+                        self.follow_raise_link(graph, link, s_exception)
+        else:
+            if isinstance(block.exitswitch, Variable):
+                knowntypedata = getattr(block.exitswitch.annotation,
+                                            "knowntypedata", {})
+            else:
+                knowntypedata = {}
+            for link in exits:
+                constraints = knowntypedata.get(link.exitcase, {})
+                self.follow_link(graph, link, constraints)
 
-        # mapping (exitcase, variable) -> s_annotation
-        # that can be attached to booleans, exitswitches
-        knowntypedata = {}
-        if isinstance(block.exitswitch, Variable):
-            knowntypedata = getattr(self.binding(block.exitswitch),
-                                    "knowntypedata", {})
-        for link in exits:
-            self.follow_link(graph, link, knowntypedata)
         if block in self.notify:
             # reflow from certain positions when this block is done
             for callback in self.notify[block]:
@@ -491,84 +531,66 @@
                 else:
                     callback()
 
-    def follow_link(self, graph, link, knowntypedata):
-        in_except_block = False
-        v_last_exc_type = link.last_exception  # may be None for non-exception 
link
-        v_last_exc_value = link.last_exc_value  # may be None for 
non-exception link
 
-        if (isinstance(link.exitcase, (types.ClassType, type)) and
-                issubclass(link.exitcase, BaseException)):
-            assert v_last_exc_type and v_last_exc_value
-            s_last_exc_value = self.bookkeeper.valueoftype(link.exitcase)
-            s_last_exc_type = annmodel.SomeType()
-            if isinstance(v_last_exc_type, Constant):
-                s_last_exc_type.const = v_last_exc_type.value
-            s_last_exc_type.is_type_of = [v_last_exc_value]
-
-            if isinstance(v_last_exc_type, Variable):
-                self.setbinding(v_last_exc_type, s_last_exc_type)
-            if isinstance(v_last_exc_value, Variable):
-                self.setbinding(v_last_exc_value, s_last_exc_value)
-
-            s_last_exc_type = annmodel.SomeType()
-            if isinstance(v_last_exc_type, Constant):
-                s_last_exc_type.const = v_last_exc_type.value
-            last_exc_value_vars = []
-            in_except_block = True
+    def follow_link(self, graph, link, constraints):
+        assert not (isinstance(link.exitcase, (types.ClassType, type)) and
+                issubclass(link.exitcase, BaseException))
 
         ignore_link = False
         inputs_s = []
-        renaming = {}
+        renaming = defaultdict(list)
         for v_out, v_input in zip(link.args, link.target.inputargs):
-            renaming.setdefault(v_out, []).append(v_input)
-        for v_out, v_input in zip(link.args, link.target.inputargs):
-            if v_out == v_last_exc_type:
-                assert in_except_block
-                inputs_s.append(s_last_exc_type)
-            elif v_out == v_last_exc_value:
-                assert in_except_block
-                inputs_s.append(s_last_exc_value)
-                last_exc_value_vars.append(v_input)
-            else:
-                s_out = self.annotation(v_out)
-                if (link.exitcase, v_out) in knowntypedata:
-                    knownvarvalue = knowntypedata[(link.exitcase, v_out)]
-                    s_out = pair(s_out, knownvarvalue).improve()
-                    # ignore links that try to pass impossible values
-                    if s_out == annmodel.s_ImpossibleValue:
-                        ignore_link = True
+            renaming[v_out].append(v_input)
 
-                if hasattr(s_out,'is_type_of'):
-                    renamed_is_type_of = []
-                    for v in s_out.is_type_of:
-                        new_vs = renaming.get(v, [])
-                        renamed_is_type_of += new_vs
-                    assert s_out.knowntype is type
-                    newcell = annmodel.SomeType()
-                    if s_out.is_constant():
-                        newcell.const = s_out.const
-                    s_out = newcell
-                    s_out.is_type_of = renamed_is_type_of
-
-                if hasattr(s_out, 'knowntypedata'):
-                    renamed_knowntypedata = {}
-                    for (value, v), s in s_out.knowntypedata.items():
-                        new_vs = renaming.get(v, [])
-                        for new_v in new_vs:
-                            renamed_knowntypedata[value, new_v] = s
-                    assert isinstance(s_out, annmodel.SomeBool)
-                    newcell = annmodel.SomeBool()
-                    if s_out.is_constant():
-                        newcell.const = s_out.const
-                    s_out = newcell
-                    s_out.set_knowntypedata(renamed_knowntypedata)
-
-                inputs_s.append(s_out)
+        for v_out in link.args:
+            s_out = self.annotation(v_out)
+            if v_out in constraints:
+                s_constraint = constraints[v_out]
+                s_out = pair(s_out, s_constraint).improve()
+                # ignore links that try to pass impossible values
+                if s_out == s_ImpossibleValue:
+                    ignore_link = True
+            s_out = self.apply_renaming(s_out, renaming)
+            inputs_s.append(s_out)
         if ignore_link:
             return
 
-        if in_except_block:
-            s_last_exc_type.is_type_of = last_exc_value_vars
+        self.links_followed[link] = True
+        self.addpendingblock(graph, link.target, inputs_s)
+
+    def follow_raise_link(self, graph, link, s_last_exc_value):
+        v_last_exc_type = link.last_exception
+        v_last_exc_value = link.last_exc_value
+
+        assert (isinstance(link.exitcase, (types.ClassType, type)) and
+                issubclass(link.exitcase, BaseException))
+
+        assert v_last_exc_type and v_last_exc_value
+
+        if isinstance(v_last_exc_value, Variable):
+            self.setbinding(v_last_exc_value, s_last_exc_value)
+
+        if isinstance(v_last_exc_type, Variable):
+            self.setbinding(v_last_exc_type, typeof([v_last_exc_value]))
+
+        inputs_s = []
+        renaming = defaultdict(list)
+        for v_out, v_input in zip(link.args, link.target.inputargs):
+            renaming[v_out].append(v_input)
+
+        for v_out, v_input in zip(link.args, link.target.inputargs):
+            if v_out == v_last_exc_type:
+                s_out = typeof(renaming[v_last_exc_value])
+                if isinstance(v_last_exc_type, Constant):
+                    s_out.const = v_last_exc_type.value
+                elif v_last_exc_type.annotation.is_constant():
+                    s_out.const = v_last_exc_type.annotation.const
+                inputs_s.append(s_out)
+            else:
+                s_out = self.annotation(v_out)
+                s_out = self.apply_renaming(s_out, renaming)
+                inputs_s.append(s_out)
+
         self.links_followed[link] = True
         self.addpendingblock(graph, link.target, inputs_s)
 
@@ -586,8 +608,8 @@
                 raise BlockedInference(self, op, -1)
         resultcell = op.consider(self)
         if resultcell is None:
-            resultcell = annmodel.s_ImpossibleValue
-        elif resultcell == annmodel.s_ImpossibleValue:
+            resultcell = s_ImpossibleValue
+        elif resultcell == s_ImpossibleValue:
             raise BlockedInference(self, op, -1) # the operation cannot succeed
         assert isinstance(resultcell, annmodel.SomeObject)
         assert isinstance(op.result, Variable)
diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py
--- a/rpython/annotator/binaryop.py
+++ b/rpython/annotator/binaryop.py
@@ -1,18 +1,19 @@
 """
 Binary operations between SomeValues.
 """
+from collections import defaultdict
 
 from rpython.tool.pairtype import pair, pairtype
 from rpython.annotator.model import (
     SomeObject, SomeInteger, SomeBool, s_Bool, SomeString, SomeChar, SomeList,
-    SomeDict, SomeUnicodeCodePoint, SomeUnicodeString,
+    SomeDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeException,
     SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance,
     SomeBuiltinMethod, SomeIterator, SomePBC, SomeNone, SomeFloat, s_None,
     SomeByteArray, SomeWeakRef, SomeSingleFloat,
-    SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError,
+    SomeLongFloat, SomeType, SomeTypeOf, SomeConstantType, unionof, UnionError,
     read_can_only_throw, add_knowntypedata,
     merge_knowntypedata,)
-from rpython.annotator.bookkeeper import immutablevalue
+from rpython.annotator.bookkeeper import immutablevalue, getbookkeeper
 from rpython.flowspace.model import Variable, Constant, const
 from rpython.flowspace.operation import op
 from rpython.rlib import rarithmetic
@@ -35,7 +36,7 @@
     elif s_obj1.is_constant():
         if s_obj1.const is None and not s_obj2.can_be_none():
             r.const = False
-    knowntypedata = {}
+    knowntypedata = defaultdict(dict)
     bk = annotator.bookkeeper
 
     def bind(src_obj, tgt_obj):
@@ -145,24 +146,18 @@
 
     def union((obj1, obj2)):
         result = SomeType()
-        is_type_of1 = getattr(obj1, 'is_type_of', None)
-        is_type_of2 = getattr(obj2, 'is_type_of', None)
         if obj1.is_immutable_constant() and obj2.is_immutable_constant() and 
obj1.const == obj2.const:
             result.const = obj1.const
-            is_type_of = {}
-            if is_type_of1:
-                for v in is_type_of1:
-                    is_type_of[v] = True
-            if is_type_of2:
-                for v in is_type_of2:
-                    is_type_of[v] = True
-            if is_type_of:
-                result.is_type_of = is_type_of.keys()
-        else:
-            if is_type_of1 and is_type_of1 == is_type_of2:
-                result.is_type_of = is_type_of1
         return result
 
+class __extend__(pairtype(SomeTypeOf, SomeTypeOf)):
+    def union((s_obj1, s_obj2)):
+        vars = list(set(s_obj1.is_type_of) | set(s_obj2.is_type_of))
+        result = SomeTypeOf(vars)
+        if (s_obj1.is_immutable_constant() and s_obj2.is_immutable_constant()
+                and s_obj1.const == s_obj2.const):
+            result.const = obj1.const
+        return result
 
 # cloning a function with identical code, for the can_only_throw attribute
 def _clone(f, can_only_throw = None):
@@ -263,7 +258,7 @@
         if not (rarithmetic.signedtype(s_int1.knowntype) and
                 rarithmetic.signedtype(s_int2.knowntype)):
             return r
-        knowntypedata = {}
+        knowntypedata = defaultdict(dict)
         def tointtype(s_int0):
             if s_int0.knowntype is bool:
                 return int
@@ -682,6 +677,22 @@
             thistype = pairtype(SomeInstance, SomeInstance)
             return super(thistype, pair(ins1, ins2)).improve()
 
+class __extend__(
+        pairtype(SomeException, SomeInstance),
+        pairtype(SomeException, SomeNone)):
+    def union((s_exc, s_inst)):
+        return unionof(s_exc.as_SomeInstance(), s_inst)
+
+class __extend__(
+        pairtype(SomeInstance, SomeException),
+        pairtype(SomeNone, SomeException)):
+    def union((s_inst, s_exc)):
+        return unionof(s_exc.as_SomeInstance(), s_inst)
+
+class __extend__(pairtype(SomeException, SomeException)):
+    def union((s_exc1, s_exc2)):
+        return SomeException(s_exc1.classdefs | s_exc2.classdefs)
+
 
 @op.getitem.register_transform(SomeInstance, SomeObject)
 def getitem_SomeInstance(annotator, v_ins, v_idx):
diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py
--- a/rpython/annotator/bookkeeper.py
+++ b/rpython/annotator/bookkeeper.py
@@ -12,7 +12,7 @@
 from rpython.annotator.model import (
     SomeOrderedDict, SomeString, SomeChar, SomeFloat, unionof, SomeInstance,
     SomeDict, SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint,
-    s_None, s_ImpossibleValue, SomeBool, SomeTuple,
+    s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeException,
     SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked,
     SomeWeakRef, SomeByteArray, SomeConstantType, SomeProperty)
 from rpython.annotator.classdesc import ClassDef, ClassDesc
@@ -167,6 +167,10 @@
         desc = self.getdesc(cls)
         return desc.getuniqueclassdef()
 
+    def new_exception(self, exc_classes):
+        clsdefs = {self.getuniqueclassdef(cls) for cls in exc_classes}
+        return SomeException(clsdefs)
+
     def getlistdef(self, **flags_if_new):
         """Get the ListDef associated with the current position."""
         try:
diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py
--- a/rpython/annotator/builtin.py
+++ b/rpython/annotator/builtin.py
@@ -2,7 +2,7 @@
 Built-in functions.
 """
 import sys
-from collections import OrderedDict
+from collections import OrderedDict, defaultdict
 
 from rpython.annotator.model import (
     SomeInteger, SomeChar, SomeBool, SomeString, SomeTuple,
@@ -188,7 +188,7 @@
             variables = [op.args[1]]
         for variable in variables:
             assert bk.annotator.binding(variable) == s_obj
-        knowntypedata = {}
+        knowntypedata = defaultdict(dict)
         if not hasattr(typ, '_freeze_') and isinstance(s_type, SomePBC):
             add_knowntypedata(knowntypedata, True, variables, 
bk.valueoftype(typ))
         r.set_knowntypedata(knowntypedata)
diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py
--- a/rpython/annotator/model.py
+++ b/rpython/annotator/model.py
@@ -32,7 +32,7 @@
 import inspect
 import weakref
 from types import BuiltinFunctionType, MethodType
-from collections import OrderedDict
+from collections import OrderedDict, defaultdict
 
 import rpython
 from rpython.tool import descriptor
@@ -138,6 +138,23 @@
     def can_be_none(self):
         return False
 
+class SomeTypeOf(SomeType):
+    """The type of a variable"""
+    def __init__(self, args_v):
+        self.is_type_of = args_v
+
+def typeof(args_v):
+    if args_v:
+        result = SomeTypeOf(args_v)
+        if len(args_v) == 1:
+            s_arg = args_v[0].annotation
+            if isinstance(s_arg, SomeException) and len(s_arg.classdefs) == 1:
+                cdef, = s_arg.classdefs
+                result.const = cdef.classdesc.pyobj
+        return result
+    else:
+        return SomeType()
+
 
 class SomeFloat(SomeObject):
     "Stands for a float or an integer."
@@ -437,6 +454,39 @@
     def noneify(self):
         return SomeInstance(self.classdef, can_be_None=True)
 
+class SomeException(SomeObject):
+    """The set of exceptions obeying type(exc) in self.classes"""
+    def __init__(self, classdefs):
+        self.classdefs = classdefs
+
+    def intersection(self, other):
+        assert isinstance(other, SomeExceptCase)
+        classdefs = {c:None for c in self.classdefs if 
c.issubclass(other.case)}
+        if classdefs:
+            return SomeException(classdefs)
+        else:
+            return s_ImpossibleValue
+
+    def difference(self, other):
+        assert isinstance(other, SomeExceptCase)
+        classdefs = {c for c in self.classdefs if not c.issubclass(other.case)}
+        if classdefs:
+            return SomeException(classdefs)
+        else:
+            return s_ImpossibleValue
+
+    def as_SomeInstance(self):
+        return unionof(*[SomeInstance(cdef) for cdef in self.classdefs])
+
+
+class SomeExceptCase(SomeObject):
+    """The set of exceptions that match a given except clause.
+
+    IOW, the set of exceptions that verify isinstance(exc, self.case).
+    """
+    def __init__(self, case):
+        self.case = case
+
 
 class SomePBC(SomeObject):
     """Stands for a global user instance, built prior to the analysis,
@@ -682,14 +732,15 @@
 
 def add_knowntypedata(ktd, truth, vars, s_obj):
     for v in vars:
-        ktd[(truth, v)] = s_obj
+        ktd[truth][v] = s_obj
 
 
 def merge_knowntypedata(ktd1, ktd2):
-    r = {}
-    for truth_v in ktd1:
-        if truth_v in ktd2:
-            r[truth_v] = unionof(ktd1[truth_v], ktd2[truth_v])
+    r = defaultdict(dict)
+    for truth, constraints in ktd1.items():
+        for v in constraints:
+            if truth in ktd2 and v in ktd2[truth]:
+                r[truth][v] = unionof(ktd1[truth][v], ktd2[truth][v])
     return r
 
 
diff --git a/rpython/annotator/test/test_annrpython.py 
b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -698,6 +698,56 @@
         s = a.build_types(snippet.exc_deduction_our_excs_plus_others, [])
         assert isinstance(s, annmodel.SomeInteger)
 
+    def test_complex_exception_deduction(self):
+        class InternalError(Exception):
+            def __init__(self, msg):
+                self.msg = msg
+
+        class AppError(Exception):
+            def __init__(self, msg):
+                self.msg = msg
+        def apperror(msg):
+            return AppError(msg)
+
+        def f(string):
+            if not string:
+                raise InternalError('Empty string')
+            return string, None
+        def cleanup():
+            pass
+
+        def g(string):
+            try:
+                try:
+                    string, _ = f(string)
+                except ZeroDivisionError:
+                    raise apperror('ZeroDivisionError')
+                try:
+                    result, _ = f(string)
+                finally:
+                    cleanup()
+            except InternalError as e:
+                raise apperror(e.msg)
+            return result
+
+        a = self.RPythonAnnotator()
+        s_result = a.build_types(g, [str])
+        assert isinstance(s_result, annmodel.SomeString)
+
+    def test_method_exception_specialization(self):
+        def f(l):
+            try:
+                return l.pop()
+            except Exception:
+                raise
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [[int]])
+        graph = graphof(a, f)
+        etype, evalue = graph.exceptblock.inputargs
+        assert evalue.annotation.classdefs == {
+                a.bookkeeper.getuniqueclassdef(IndexError)}
+        assert etype.annotation.const == IndexError
+
     def test_operation_always_raising(self):
         def operation_always_raising(n):
             lst = []
@@ -1376,11 +1426,11 @@
         a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)])
         fg = graphof(a, f)
         et, ev = fg.exceptblock.inputargs
-        t = annmodel.SomeType()
+        t = annmodel.SomeTypeOf([ev])
         t.const = KeyError
-        t.is_type_of = [ev]
-        assert a.binding(et) == t
-        assert isinstance(a.binding(ev), annmodel.SomeInstance) and 
a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError)
+        assert et.annotation == t
+        s_ev = ev.annotation
+        assert s_ev == a.bookkeeper.new_exception([KeyError])
 
     def test_reraiseAnything(self):
         def f(dic):
@@ -1392,11 +1442,11 @@
         a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)])
         fg = graphof(a, f)
         et, ev = fg.exceptblock.inputargs
-        t = annmodel.SomeType()
-        t.is_type_of = [ev]
-        t.const = KeyError    # IndexError ignored because 'dic' is a dict
-        assert a.binding(et) == t
-        assert isinstance(a.binding(ev), annmodel.SomeInstance) and 
a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError)
+        t = annmodel.SomeTypeOf([ev])
+        t.const = KeyError  # IndexError ignored because 'dic' is a dict
+        assert et.annotation == t
+        s_ev = ev.annotation
+        assert s_ev == a.bookkeeper.new_exception([KeyError])
 
     def test_exception_mixing(self):
         def h():
@@ -1427,10 +1477,11 @@
         a.build_types(f, [int, somelist(annmodel.s_Int)])
         fg = graphof(a, f)
         et, ev = fg.exceptblock.inputargs
-        t = annmodel.SomeType()
-        t.is_type_of = [ev]
-        assert a.binding(et) == t
-        assert isinstance(a.binding(ev), annmodel.SomeInstance) and 
a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception)
+        t = annmodel.SomeTypeOf([ev])
+        assert et.annotation == t
+        s_ev = ev.annotation
+        assert (isinstance(s_ev, annmodel.SomeInstance) and
+                s_ev.classdef == a.bookkeeper.getuniqueclassdef(Exception))
 
     def test_try_except_raise_finally1(self):
         def h(): pass
@@ -1449,10 +1500,11 @@
         a.build_types(f, [])
         fg = graphof(a, f)
         et, ev = fg.exceptblock.inputargs
-        t = annmodel.SomeType()
-        t.is_type_of = [ev]
-        assert a.binding(et) == t
-        assert isinstance(a.binding(ev), annmodel.SomeInstance) and 
a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception)
+        t = annmodel.SomeTypeOf([ev])
+        assert et.annotation == t
+        s_ev = ev.annotation
+        assert (isinstance(s_ev, annmodel.SomeInstance) and
+                s_ev.classdef == a.bookkeeper.getuniqueclassdef(Exception))
 
     def test_inplace_div(self):
         def f(n):
diff --git a/rpython/annotator/test/test_model.py 
b/rpython/annotator/test/test_model.py
--- a/rpython/annotator/test/test_model.py
+++ b/rpython/annotator/test/test_model.py
@@ -1,8 +1,14 @@
-import py
+import pytest
 
 from rpython.annotator.model import *
 from rpython.annotator.listdef import ListDef
 from rpython.translator.translator import TranslationContext
+from rpython.annotator import unaryop, binaryop  # for side-effects
+
[email protected]()
+def annotator():
+    t = TranslationContext()
+    return t.buildannotator()
 
 
 listdef1 = ListDef(None, SomeTuple([SomeInteger(nonneg=True), SomeString()]))
@@ -100,19 +106,21 @@
 class AAA(object):
     pass
 
-def test_blocked_inference1():
+def test_blocked_inference1(annotator):
     def blocked_inference():
         return AAA().m()
 
-    py.test.raises(AnnotatorError, compile_function, blocked_inference)
+    with pytest.raises(AnnotatorError):
+        annotator.build_types(blocked_inference, [])
 
-def test_blocked_inference2():
+def test_blocked_inference2(annotator):
     def blocked_inference():
         a = AAA()
         b = a.x
         return b
 
-    py.test.raises(AnnotatorError, compile_function, blocked_inference)
+    with pytest.raises(AnnotatorError):
+        annotator.build_types(blocked_inference, [])
 
 
 def test_not_const():
@@ -129,3 +137,17 @@
     assert s.no_nul is True
     s = SomeChar().nonnulify()
     assert s.no_nul is True
+
+def test_SomeException_union(annotator):
+    bk = annotator.bookkeeper
+    someinst = lambda cls, **kw: SomeInstance(bk.getuniqueclassdef(cls), **kw)
+    s_inst = someinst(Exception)
+    s_exc = bk.new_exception([ValueError, IndexError])
+    assert unionof(s_exc, s_inst) == s_inst
+    assert unionof(s_inst, s_exc) == s_inst
+    s_nullable = unionof(s_None, bk.new_exception([ValueError]))
+    assert isinstance(s_nullable, SomeInstance)
+    assert s_nullable.can_be_None
+    s_exc1 = bk.new_exception([ValueError])
+    s_exc2 = bk.new_exception([IndexError])
+    unionof(s_exc1, s_exc2) == unionof(s_exc2, s_exc1)
diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py
--- a/rpython/annotator/unaryop.py
+++ b/rpython/annotator/unaryop.py
@@ -1,8 +1,9 @@
 """
 Unary operations on SomeValues.
 """
+from __future__ import absolute_import
 
-from __future__ import absolute_import
+from collections import defaultdict
 
 from rpython.tool.pairtype import pair
 from rpython.flowspace.operation import op
@@ -11,7 +12,7 @@
 from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool,
     SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue,
     SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeBuiltinMethod,
-    SomeFloat, SomeIterator, SomePBC, SomeNone, SomeType, s_ImpossibleValue,
+    SomeFloat, SomeIterator, SomePBC, SomeNone, SomeTypeOf, s_ImpossibleValue,
     s_Bool, s_None, s_Int, unionof, add_knowntypedata,
     SomeWeakRef, SomeUnicodeString, SomeByteArray)
 from rpython.annotator.bookkeeper import getbookkeeper, immutablevalue
@@ -26,11 +27,11 @@
                         if oper.dispatch == 1])
 UNARY_OPERATIONS.remove('contains')
 
+
 @op.type.register(SomeObject)
-def type_SomeObject(annotator, arg):
-    r = SomeType()
-    r.is_type_of = [arg]
-    return r
+def type_SomeObject(annotator, v_arg):
+    return SomeTypeOf([v_arg])
+
 
 @op.bool.register(SomeObject)
 def bool_SomeObject(annotator, obj):
@@ -39,7 +40,7 @@
     s_nonnone_obj = annotator.annotation(obj)
     if s_nonnone_obj.can_be_none():
         s_nonnone_obj = s_nonnone_obj.nonnoneify()
-    knowntypedata = {}
+    knowntypedata = defaultdict(dict)
     add_knowntypedata(knowntypedata, True, [obj], s_nonnone_obj)
     r.set_knowntypedata(knowntypedata)
     return r
@@ -99,18 +100,17 @@
     callspec = complex_args([annotator.annotation(v_arg) for v_arg in args_v])
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to