Author: mattip <matti.pi...@gmail.com>
Branch: release-2.6.x
Changeset: r77718:295ee98b6928
Date: 2015-05-31 10:19 +0300
http://bitbucket.org/pypy/pypy/changeset/295ee98b6928/

Log:    merge default into release

diff too long, truncating to 2000 out of 4408 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -11,3 +11,5 @@
 10f1b29a2bd21f837090286174a9ca030b8680b2 release-2.5.0
 9c4588d731b7fe0b08669bd732c2b676cb0a8233 release-2.5.1
 fcdb941565156385cbac04cfb891f8f4c7a92ef6 release-2.6.0
+fcdb941565156385cbac04cfb891f8f4c7a92ef6 release-2.6.0
+e03971291f3a0729ecd3ee7fae7ddb0bb82d476c release-2.6.0
diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
--- a/lib_pypy/_ctypes/function.py
+++ b/lib_pypy/_ctypes/function.py
@@ -278,7 +278,7 @@
                         for argtype, arg in zip(argtypes, args)]
             try:
                 return to_call(*args)
-            except SystemExit, e:
+            except SystemExit as e:
                 handle_system_exit(e)
                 raise
         return f
@@ -306,12 +306,12 @@
 
             try:
                 newargs = self._convert_args_for_callback(argtypes, args)
-            except (UnicodeError, TypeError, ValueError), e:
+            except (UnicodeError, TypeError, ValueError) as e:
                 raise ArgumentError(str(e))
             try:
                 try:
                     res = self.callable(*newargs)
-                except SystemExit, e:
+                except SystemExit as e:
                     handle_system_exit(e)
                     raise
             except:
@@ -575,7 +575,7 @@
             for i, argtype in enumerate(argtypes):
                 try:
                     keepalive, newarg, newargtype = self._conv_param(argtype, 
args[i])
-                except (UnicodeError, TypeError, ValueError), e:
+                except (UnicodeError, TypeError, ValueError) as e:
                     raise ArgumentError(str(e))
                 keepalives.append(keepalive)
                 newargs.append(newarg)
@@ -586,7 +586,7 @@
             for i, arg in enumerate(extra):
                 try:
                     keepalive, newarg, newargtype = self._conv_param(None, arg)
-                except (UnicodeError, TypeError, ValueError), e:
+                except (UnicodeError, TypeError, ValueError) as e:
                     raise ArgumentError(str(e))
                 keepalives.append(keepalive)
                 newargs.append(newarg)
diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py
--- a/lib_pypy/_curses_build.py
+++ b/lib_pypy/_curses_build.py
@@ -47,9 +47,9 @@
 ffi.cdef("""
 typedef ... WINDOW;
 typedef ... SCREEN;
-typedef unsigned long mmask_t;
+typedef unsigned long... mmask_t;
 typedef unsigned char bool;
-typedef unsigned long chtype;
+typedef unsigned long... chtype;
 typedef chtype attr_t;
 
 typedef struct
diff --git a/lib_pypy/_pwdgrp_build.py b/lib_pypy/_pwdgrp_build.py
--- a/lib_pypy/_pwdgrp_build.py
+++ b/lib_pypy/_pwdgrp_build.py
@@ -11,8 +11,8 @@
 
 ffi.cdef("""
 
-typedef int uid_t;
-typedef int gid_t;
+typedef int... uid_t;
+typedef int... gid_t;
 
 struct passwd {
     char *pw_name;
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cffi
-Version: 1.0.4
+Version: 1.1.0
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "1.0.4"
-__version_info__ = (1, 0, 4)
+__version__ = "1.1.0"
+__version_info__ = (1, 1, 0)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
--- a/lib_pypy/cffi/_cffi_include.h
+++ b/lib_pypy/cffi/_cffi_include.h
@@ -208,13 +208,11 @@
 #define _cffi_array_len(array)   (sizeof(array) / sizeof((array)[0]))
 
 #define _cffi_prim_int(size, sign)                                      \
-    ((size) == sizeof(int) ? ((sign) ? _CFFI_PRIM_INT   : _CFFI_PRIM_UINT)   : 
\
-     (size) == sizeof(long)? ((sign) ? _CFFI_PRIM_LONG  : _CFFI_PRIM_ULONG)  : 
\
-     (size) == 1           ? ((sign) ? _CFFI_PRIM_INT8  : _CFFI_PRIM_UINT8)  : 
\
-     (size) == 2           ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : 
\
-     (size) == 4           ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : 
\
-     (size) == 8           ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : 
\
-     0)
+    ((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8  : _CFFI_PRIM_UINT8)  :    \
+     (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) :    \
+     (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) :    \
+     (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) :    \
+     _CFFI__UNKNOWN_PRIM)
 
 #define _cffi_check_int(got, got_nonpos, expected)      \
     ((got_nonpos) == (expected <= 0) &&                 \
diff --git a/lib_pypy/cffi/cffi_opcode.py b/lib_pypy/cffi/cffi_opcode.py
--- a/lib_pypy/cffi/cffi_opcode.py
+++ b/lib_pypy/cffi/cffi_opcode.py
@@ -9,16 +9,16 @@
             assert isinstance(self.arg, str)
             return '(_cffi_opcode_t)(%s)' % (self.arg,)
         classname = CLASS_NAME[self.op]
-        return '_CFFI_OP(_CFFI_OP_%s, %d)' % (classname, self.arg)
+        return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg)
 
     def as_python_bytes(self):
-        if self.op is None:
-            if self.arg.isdigit():
-                value = int(self.arg)     # non-negative: '-' not in self.arg
-                if value >= 2**31:
-                    raise OverflowError("cannot emit %r: limited to 2**31-1"
-                                        % (self.arg,))
-                return format_four_bytes(value)
+        if self.op is None and self.arg.isdigit():
+            value = int(self.arg)     # non-negative: '-' not in self.arg
+            if value >= 2**31:
+                raise OverflowError("cannot emit %r: limited to 2**31-1"
+                                    % (self.arg,))
+            return format_four_bytes(value)
+        if isinstance(self.arg, str):
             from .ffiplatform import VerificationError
             raise VerificationError("cannot emit to Python: %r" % (self.arg,))
         return format_four_bytes((self.arg << 8) | self.op)
@@ -52,6 +52,7 @@
 OP_CONSTANT_INT    = 31
 OP_GLOBAL_VAR      = 33
 OP_DLOPEN_FUNC     = 35
+OP_DLOPEN_CONST    = 37
 
 PRIM_VOID          = 0
 PRIM_BOOL          = 1
@@ -104,6 +105,7 @@
 PRIM_UINTMAX       = 47
 
 _NUM_PRIM          = 48
+_UNKNOWN_PRIM      = -1
 
 PRIMITIVE_TO_INDEX = {
     'char':               PRIM_CHAR,
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -189,8 +189,8 @@
                         raise api.CDefError("typedef does not declare any 
name",
                                             decl)
                     if (isinstance(decl.type.type, 
pycparser.c_ast.IdentifierType)
-                            and decl.type.type.names == ['__dotdotdot__']):
-                        realtype = model.unknown_type(decl.name)
+                            and decl.type.type.names[-1] == '__dotdotdot__'):
+                        realtype = self._get_unknown_type(decl)
                     elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and
                           isinstance(decl.type.type, pycparser.c_ast.TypeDecl) 
and
                           isinstance(decl.type.type.type,
@@ -271,14 +271,12 @@
                 if tp.is_raw_function:
                     tp = self._get_type_pointer(tp)
                     self._declare('function ' + decl.name, tp)
-                elif (isinstance(tp, model.PrimitiveType) and
-                        tp.is_integer_type() and
+                elif (tp.is_integer_type() and
                         hasattr(decl, 'init') and
                         hasattr(decl.init, 'value') and
                         _r_int_literal.match(decl.init.value)):
                     self._add_integer_constant(decl.name, decl.init.value)
-                elif (isinstance(tp, model.PrimitiveType) and
-                        tp.is_integer_type() and
+                elif (tp.is_integer_type() and
                         isinstance(decl.init, pycparser.c_ast.UnaryOp) and
                         decl.init.op == '-' and
                         hasattr(decl.init.expr, 'value') and
@@ -338,7 +336,9 @@
             else:
                 length = self._parse_constant(
                     typenode.dim, partial_length_ok=partial_length_ok)
-            return model.ArrayType(self._get_type(typenode.type), length)
+            tp = self._get_type(typenode.type,
+                                partial_length_ok=(length == '...'))
+            return model.ArrayType(tp, length)
         #
         if isinstance(typenode, pycparser.c_ast.PtrDecl):
             # pointer type
@@ -639,3 +639,13 @@
                 self._declare(name, tp, included=True)
         for k, v in other._int_constants.items():
             self._add_constants(k, v)
+
+    def _get_unknown_type(self, decl):
+        typenames = decl.type.type.names
+        assert typenames[-1] == '__dotdotdot__'
+        if len(typenames) == 1:
+            return model.unknown_type(decl.name)
+        for t in typenames[:-1]:
+            if t not in ['int', 'short', 'long', 'signed', 'unsigned', 'char']:
+                raise api.FFIError(':%d: bad usage of "..."' % decl.coord.line)
+        return model.UnknownIntegerType(decl.name)
diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py
--- a/lib_pypy/cffi/model.py
+++ b/lib_pypy/cffi/model.py
@@ -31,7 +31,10 @@
 
     def has_c_name(self):
         return '$' not in self._get_c_name()
-    
+
+    def is_integer_type(self):
+        return False
+
     def sizeof_enabled(self):
         return False
 
@@ -76,7 +79,12 @@
 void_type = VoidType()
 
 
-class PrimitiveType(BaseType):
+class BasePrimitiveType(BaseType):
+    def sizeof_enabled(self):
+        return True
+
+
+class PrimitiveType(BasePrimitiveType):
     _attrs_ = ('name',)
 
     ALL_PRIMITIVE_TYPES = {
@@ -142,11 +150,23 @@
     def is_float_type(self):
         return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
 
-    def sizeof_enabled(self):
-        return True
+    def build_backend_type(self, ffi, finishlist):
+        return global_cache(self, ffi, 'new_primitive_type', self.name)
+
+
+class UnknownIntegerType(BasePrimitiveType):
+    _attrs_ = ('name',)
+
+    def __init__(self, name):
+        self.name = name
+        self.c_name_with_marker = name + '&'
+
+    def is_integer_type(self):
+        return True    # for now
 
     def build_backend_type(self, ffi, finishlist):
-        return global_cache(self, ffi, 'new_primitive_type', self.name)
+        raise NotImplementedError("integer type '%s' can only be used after "
+                                  "compilation" % self.name)
 
 
 class BaseFunctionType(BaseType):
diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h
--- a/lib_pypy/cffi/parse_c_type.h
+++ b/lib_pypy/cffi/parse_c_type.h
@@ -25,6 +25,7 @@
 #define _CFFI_OP_CONSTANT_INT   31
 #define _CFFI_OP_GLOBAL_VAR     33
 #define _CFFI_OP_DLOPEN_FUNC    35
+#define _CFFI_OP_DLOPEN_CONST   37
 
 #define _CFFI_PRIM_VOID          0
 #define _CFFI_PRIM_BOOL          1
@@ -77,6 +78,7 @@
 #define _CFFI_PRIM_UINTMAX      47
 
 #define _CFFI__NUM_PRIM         48
+#define _CFFI__UNKNOWN_PRIM    (-1)
 
 
 struct _cffi_global_s {
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -11,7 +11,7 @@
 
 
 class GlobalExpr:
-    def __init__(self, name, address, type_op, size=0, check_value=None):
+    def __init__(self, name, address, type_op, size=0, check_value=0):
         self.name = name
         self.address = address
         self.type_op = type_op
@@ -23,11 +23,6 @@
             self.name, self.address, self.type_op.as_c_expr(), self.size)
 
     def as_python_expr(self):
-        if not isinstance(self.check_value, int_type):
-            raise ffiplatform.VerificationError(
-                "ffi.dlopen() will not be able to figure out the value of "
-                "constant %r (only integer constants are supported, and only "
-                "if their value are specified in the cdef)" % (self.name,))
         return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name,
                                self.check_value)
 
@@ -149,7 +144,7 @@
                 self.cffi_types.append(tp)     # placeholder
                 for tp1 in tp.args:
                     assert isinstance(tp1, (model.VoidType,
-                                            model.PrimitiveType,
+                                            model.BasePrimitiveType,
                                             model.PointerType,
                                             model.StructOrUnionOrEnum,
                                             model.FunctionPtrType))
@@ -474,7 +469,7 @@
 
     def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
         extraarg = ''
-        if isinstance(tp, model.PrimitiveType):
+        if isinstance(tp, model.BasePrimitiveType):
             if tp.is_integer_type() and tp.name != '_Bool':
                 converter = '_cffi_to_c_int'
                 extraarg = ', %s' % tp.name
@@ -529,7 +524,7 @@
         self._prnt('  }')
 
     def _convert_expr_from_c(self, tp, var, context):
-        if isinstance(tp, model.PrimitiveType):
+        if isinstance(tp, model.BasePrimitiveType):
             if tp.is_integer_type():
                 return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
             elif tp.name != 'long double':
@@ -747,7 +742,7 @@
             meth_kind = OP_CPYTHON_BLTN_V   # 'METH_VARARGS'
         self._lsts["global"].append(
             GlobalExpr(name, '_cffi_f_%s' % name,
-                       CffiOp(meth_kind, type_index), check_value=0,
+                       CffiOp(meth_kind, type_index),
                        size='_cffi_d_%s' % name))
 
     # ----------
@@ -758,7 +753,9 @@
             ptr_struct_name = tp_struct.get_c_name('*')
             actual_length = '_cffi_array_len(((%s)0)->%s)' % (
                 ptr_struct_name, field_name)
-            tp_field = tp_field.resolve_length(actual_length)
+            tp_item = self._field_type(tp_struct, '%s[0]' % field_name,
+                                       tp_field.item)
+            tp_field = model.ArrayType(tp_item, actual_length)
         return tp_field
 
     def _struct_collecttype(self, tp):
@@ -776,20 +773,19 @@
         prnt('  (void)p;')
         for fname, ftype, fbitsize in tp.enumfields():
             try:
-                if (isinstance(ftype, model.PrimitiveType)
-                    and ftype.is_integer_type()) or fbitsize >= 0:
+                if ftype.is_integer_type() or fbitsize >= 0:
                     # accept all integers, but complain on float or double
                     prnt('  (void)((p->%s) << 1);' % fname)
-                elif (isinstance(ftype, model.ArrayType)
-                      and (ftype.length is None or ftype.length == '...')):
-                    # for C++: "int(*)tmp[] = &p->a;" errors out if p->a is
-                    # declared as "int[5]".  Instead, write "int *tmp = p->a;".
-                    prnt('  { %s = p->%s; (void)tmp; }' % (
-                        ftype.item.get_c_name('*tmp', 'field %r'%fname), 
fname))
-                else:
-                    # only accept exactly the type declared.
-                    prnt('  { %s = &p->%s; (void)tmp; }' % (
-                        ftype.get_c_name('*tmp', 'field %r'%fname), fname))
+                    continue
+                # only accept exactly the type declared, except that '[]'
+                # is interpreted as a '*' and so will match any array length.
+                # (It would also match '*', but that's harder to detect...)
+                while (isinstance(ftype, model.ArrayType)
+                       and (ftype.length is None or ftype.length == '...')):
+                    ftype = ftype.item
+                    fname = fname + '[0]'
+                prnt('  { %s = &p->%s; (void)tmp; }' % (
+                    ftype.get_c_name('*tmp', 'field %r'%fname), fname))
             except ffiplatform.VerificationError as e:
                 prnt('  /* %s */' % str(e))   # cannot verify it, ignore
         prnt('}')
@@ -970,20 +966,28 @@
         prnt()
 
     def _generate_cpy_constant_collecttype(self, tp, name):
-        is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
-        if not is_int:
+        is_int = tp.is_integer_type()
+        if not is_int or self.target_is_python:
             self._do_collect_type(tp)
 
     def _generate_cpy_constant_decl(self, tp, name):
-        is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
+        is_int = tp.is_integer_type()
         self._generate_cpy_const(is_int, name, tp)
 
     def _generate_cpy_constant_ctx(self, tp, name):
-        if isinstance(tp, model.PrimitiveType) and tp.is_integer_type():
+        if not self.target_is_python and tp.is_integer_type():
             type_op = CffiOp(OP_CONSTANT_INT, -1)
         else:
+            if not tp.sizeof_enabled():
+                raise ffiplatform.VerificationError(
+                    "constant '%s' is of type '%s', whose size is not known"
+                    % (name, tp._get_c_name()))
+            if self.target_is_python:
+                const_kind = OP_DLOPEN_CONST
+            else:
+                const_kind = OP_CONSTANT
             type_index = self._typesdict[tp]
-            type_op = CffiOp(OP_CONSTANT, type_index)
+            type_op = CffiOp(const_kind, type_index)
         self._lsts["global"].append(
             GlobalExpr(name, '_cffi_const_%s' % name, type_op))
 
@@ -1034,6 +1038,10 @@
 
     def _generate_cpy_macro_ctx(self, tp, name):
         if tp == '...':
+            if self.target_is_python:
+                raise ffiplatform.VerificationError(
+                    "cannot use the syntax '...' in '#define %s ...' when "
+                    "using the ABI mode" % (name,))
             check_value = None
         else:
             check_value = tp     # an integer
@@ -1048,7 +1056,8 @@
     def _global_type(self, tp, global_name):
         if isinstance(tp, model.ArrayType) and tp.length == '...':
             actual_length = '_cffi_array_len(%s)' % (global_name,)
-            tp = tp.resolve_length(actual_length)
+            tp_item = self._global_type(tp.item, '%s[0]' % global_name)
+            tp = model.ArrayType(tp_item, actual_length)
         return tp
 
     def _generate_cpy_variable_collecttype(self, tp, name):
@@ -1066,7 +1075,7 @@
         else:
             size = 0
         self._lsts["global"].append(
-            GlobalExpr(name, '&%s' % name, type_op, size, 0))
+            GlobalExpr(name, '&%s' % name, type_op, size))
 
     # ----------
     # emitting the opcodes for individual types
@@ -1078,6 +1087,11 @@
         prim_index = PRIMITIVE_TO_INDEX[tp.name]
         self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index)
 
+    def _emit_bytecode_UnknownIntegerType(self, tp, index):
+        s = '_cffi_prim_int(sizeof(%s), (((%s)-1) << 0) <= 0)' % (
+            tp.name, tp.name)
+        self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
+
     def _emit_bytecode_RawFunctionType(self, tp, index):
         self.cffi_types[index] = CffiOp(OP_FUNCTION, 
self._typesdict[tp.result])
         index += 1
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -816,9 +816,9 @@
             _MONTHNAMES[self._month],
             self._day, self._year)
 
-    def strftime(self, fmt):
+    def strftime(self, format):
         "Format using strftime()."
-        return _wrap_strftime(self, fmt, self.timetuple())
+        return _wrap_strftime(self, format, self.timetuple())
 
     def __format__(self, fmt):
         if not isinstance(fmt, (str, unicode)):
@@ -1308,7 +1308,7 @@
 
     __str__ = isoformat
 
-    def strftime(self, fmt):
+    def strftime(self, format):
         """Format using strftime().  The date part of the timestamp passed
         to underlying strftime should not be used.
         """
@@ -1317,7 +1317,7 @@
         timetuple = (1900, 1, 1,
                      self._hour, self._minute, self._second,
                      0, 1, -1)
-        return _wrap_strftime(self, fmt, timetuple)
+        return _wrap_strftime(self, format, timetuple)
 
     def __format__(self, fmt):
         if not isinstance(fmt, (str, unicode)):
@@ -1497,7 +1497,7 @@
         return self._tzinfo
 
     @classmethod
-    def fromtimestamp(cls, t, tz=None):
+    def fromtimestamp(cls, timestamp, tz=None):
         """Construct a datetime from a POSIX timestamp (like time.time()).
 
         A timezone info object may be passed in as well.
@@ -1507,12 +1507,12 @@
 
         converter = _time.localtime if tz is None else _time.gmtime
 
-        if isinstance(t, int):
+        if isinstance(timestamp, int):
             us = 0
         else:
-            t_full = t
-            t = int(_math.floor(t))
-            frac = t_full - t
+            t_full = timestamp
+            timestamp = int(_math.floor(timestamp))
+            frac = t_full - timestamp
             us = _round(frac * 1e6)
 
         # If timestamp is less than one microsecond smaller than a
@@ -1520,9 +1520,9 @@
         # roll over to seconds, otherwise, ValueError is raised
         # by the constructor.
         if us == 1000000:
-            t += 1
+            timestamp += 1
             us = 0
-        y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
+        y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
         ss = min(ss, 59)    # clamp out leap seconds if the platform has them
         result = cls(y, m, d, hh, mm, ss, us, tz)
         if tz is not None:
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -190,6 +190,11 @@
 just make sure there is a ``__del__`` method in the class to start with
 (even containing only ``pass``; replacing or overriding it later works fine).
 
+Last note: CPython tries to do a ``gc.collect()`` automatically when the
+program finishes; not PyPy.  (It is possible in both CPython and PyPy to
+design a case where several ``gc.collect()`` are needed before all objects
+die.  This makes CPython's approach only work "most of the time" anyway.)
+
 
 Subclasses of built-in types
 ----------------------------
@@ -364,6 +369,18 @@
   wrappers.  On PyPy we can't tell the difference, so
   ``ismethod([].__add__) == ismethod(list.__add__) == True``.
 
+* in pure Python, if you write ``class A(object): def f(self): pass``
+  and have a subclass ``B`` which doesn't override ``f()``, then
+  ``B.f(x)`` still checks that ``x`` is an instance of ``B``.  In
+  CPython, types written in C use a different rule.  If ``A`` is
+  written in C, any instance of ``A`` will be accepted by ``B.f(x)``
+  (and actually, ``B.f is A.f`` in this case).  Some code that could
+  work on CPython but not on PyPy includes:
+  ``datetime.datetime.strftime(datetime.date.today(), ...)`` (here,
+  ``datetime.date`` is the superclass of ``datetime.datetime``).
+  Anyway, the proper fix is arguably to use a regular method call in
+  the first place: ``datetime.date.today().strftime(...)``
+
 * the ``__dict__`` attribute of new-style classes returns a normal dict, as
   opposed to a dict proxy like in CPython. Mutating the dict will change the
   type and vice versa. For builtin types, a dictionary will be returned that
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,4 +5,10 @@
 .. this is a revision shortly after release-2.6.0
 .. startrev: 2ac87a870acf562301840cace411e34c1b96589c
 
+.. branch: fix-result-types
 
+branch fix-result-types:
+* Refactor dtype casting and promotion rules for consistency and compatibility
+with CNumPy.
+* Refactor ufunc creation.
+* Implement np.promote_types().
diff --git a/pypy/module/_cffi_backend/__init__.py 
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -2,7 +2,7 @@
 from pypy.interpreter.mixedmodule import MixedModule
 from rpython.rlib import rdynload
 
-VERSION = "1.0.4"
+VERSION = "1.1.0"
 
 
 class Module(MixedModule):
diff --git a/pypy/module/_cffi_backend/cffi_opcode.py 
b/pypy/module/_cffi_backend/cffi_opcode.py
--- a/pypy/module/_cffi_backend/cffi_opcode.py
+++ b/pypy/module/_cffi_backend/cffi_opcode.py
@@ -52,6 +52,7 @@
 OP_CONSTANT_INT    = 31
 OP_GLOBAL_VAR      = 33
 OP_DLOPEN_FUNC     = 35
+OP_DLOPEN_CONST    = 37
 
 PRIM_VOID          = 0
 PRIM_BOOL          = 1
@@ -104,6 +105,7 @@
 PRIM_UINTMAX       = 47
 
 _NUM_PRIM          = 48
+_UNKNOWN_PRIM      = -1
 
 PRIMITIVE_TO_INDEX = {
     'char':               PRIM_CHAR,
diff --git a/pypy/module/_cffi_backend/lib_obj.py 
b/pypy/module/_cffi_backend/lib_obj.py
--- a/pypy/module/_cffi_backend/lib_obj.py
+++ b/pypy/module/_cffi_backend/lib_obj.py
@@ -122,18 +122,25 @@
                 w_result = realize_c_type.realize_global_int(self.ffi, g,
                                                              index)
                 #
-            elif op == cffi_opcode.OP_CONSTANT:
+            elif (op == cffi_opcode.OP_CONSTANT or
+                  op == cffi_opcode.OP_DLOPEN_CONST):
                 # A constant which is not of integer type
                 w_ct = realize_c_type.realize_c_type(
                     self.ffi, self.ctx.c_types, getarg(g.c_type_op))
                 fetch_funcptr = rffi.cast(
                     realize_c_type.FUNCPTR_FETCH_CHARP,
                     g.c_address)
-                assert fetch_funcptr
-                assert w_ct.size > 0
-                ptr = lltype.malloc(rffi.CCHARP.TO, w_ct.size, flavor='raw')
-                self.ffi._finalizer.free_mems.append(ptr)
-                fetch_funcptr(ptr)
+                if w_ct.size <= 0:
+                    raise oefmt(space.w_SystemError,
+                                "constant has no known size")
+                if not fetch_funcptr:   # for dlopen() style
+                    assert op == cffi_opcode.OP_DLOPEN_CONST
+                    ptr = self.cdlopen_fetch(attr)
+                else:
+                    assert op == cffi_opcode.OP_CONSTANT
+                    ptr = lltype.malloc(rffi.CCHARP.TO, w_ct.size, 
flavor='raw')
+                    self.ffi._finalizer.free_mems.append(ptr)
+                    fetch_funcptr(ptr)
                 w_result = w_ct.convert_to_object(ptr)
                 #
             elif op == cffi_opcode.OP_DLOPEN_FUNC:
diff --git a/pypy/module/_cffi_backend/realize_c_type.py 
b/pypy/module/_cffi_backend/realize_c_type.py
--- a/pypy/module/_cffi_backend/realize_c_type.py
+++ b/pypy/module/_cffi_backend/realize_c_type.py
@@ -69,19 +69,27 @@
         "intmax_t",
         "uintmax_t",
         ]
+    assert len(NAMES) == cffi_opcode._NUM_PRIM
+
     def __init__(self, space):
         self.all_primitives = [None] * cffi_opcode._NUM_PRIM
 
-def get_primitive_type(space, num):
+def get_primitive_type(ffi, num):
+    space = ffi.space
+    if not (0 <= num < cffi_opcode._NUM_PRIM):
+        if num == cffi_opcode._UNKNOWN_PRIM:
+            raise oefmt(ffi.w_FFIError, "primitive integer type with an "
+                        "unexpected size (or not an integer type at all)")
+        else:
+            raise oefmt(space.w_NotImplementedError, "prim=%d", num)
     realize_cache = space.fromcache(RealizeCache)
     w_ctype = realize_cache.all_primitives[num]
     if w_ctype is None:
         if num == cffi_opcode.PRIM_VOID:
             w_ctype = newtype.new_void_type(space)
-        elif 0 <= num < len(RealizeCache.NAMES) and RealizeCache.NAMES[num]:
+        else:
+            assert RealizeCache.NAMES[num]
             w_ctype = newtype.new_primitive_type(space, 
RealizeCache.NAMES[num])
-        else:
-            raise oefmt(space.w_NotImplementedError, "prim=%d", num)
         realize_cache.all_primitives[num] = w_ctype
     return w_ctype
 
@@ -296,7 +304,7 @@
         return ffi.cached_types[type_index] #found already in the "primary" 
slot
 
     space = ffi.space
-    w_basetd = get_primitive_type(space, rffi.getintfield(e, 'c_type_prim'))
+    w_basetd = get_primitive_type(ffi, rffi.getintfield(e, 'c_type_prim'))
 
     enumerators_w = []
     enumvalues_w = []
@@ -344,7 +352,7 @@
     case = getop(op)
 
     if case == cffi_opcode.OP_PRIMITIVE:
-        x = get_primitive_type(ffi.space, getarg(op))
+        x = get_primitive_type(ffi, getarg(op))
 
     elif case == cffi_opcode.OP_POINTER:
         y = realize_c_type_or_func(ffi, opcodes, getarg(op))
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py 
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -3335,4 +3335,4 @@
 
 def test_version():
     # this test is here mostly for PyPy
-    assert __version__ == "1.0.4"
+    assert __version__ == "1.1.0"
diff --git a/pypy/module/_cffi_backend/test/test_re_python.py 
b/pypy/module/_cffi_backend/test/test_re_python.py
--- a/pypy/module/_cffi_backend/test/test_re_python.py
+++ b/pypy/module/_cffi_backend/test/test_re_python.py
@@ -22,6 +22,8 @@
         #define BIGNEG -420000000000L
         int add42(int x) { return x + 42; }
         int globalvar42 = 1234;
+        const int globalconst42 = 4321;
+        const char *const globalconsthello = "hello";
         struct foo_s;
         typedef struct bar_s { int x; signed char a[]; } bar_t;
         enum foo_e { AA, BB, CC };
@@ -34,7 +36,8 @@
         c_file = tmpdir.join('_test_re_python.c')
         c_file.write(SRC)
         ext = ffiplatform.get_extension(str(c_file), '_test_re_python',
-                                        export_symbols=['add42', 
'globalvar42'])
+            export_symbols=['add42', 'globalvar42',
+                            'globalconst42', 'globalconsthello'])
         outputfilename = ffiplatform.compile(str(tmpdir), ext)
         cls.w_extmod = space.wrap(outputfilename)
         #mod.tmpdir = tmpdir
@@ -47,6 +50,8 @@
         #define BIGNEG -420000000000L
         int add42(int);
         int globalvar42;
+        const int globalconst42;
+        const char *const globalconsthello = "hello";
         int no_such_function(int);
         int no_such_globalvar;
         struct foo_s;
@@ -157,6 +162,18 @@
         p[0] -= 1
         assert lib.globalvar42 == 1238
 
+    def test_global_const_int(self):
+        from re_python_pysrc import ffi
+        lib = ffi.dlopen(self.extmod)
+        assert lib.globalconst42 == 4321
+        raises(AttributeError, ffi.addressof, lib, 'globalconst42')
+
+    def test_global_const_nonint(self):
+        from re_python_pysrc import ffi
+        lib = ffi.dlopen(self.extmod)
+        assert ffi.string(lib.globalconsthello, 8) == "hello"
+        raises(AttributeError, ffi.addressof, lib, 'globalconsthello')
+
     def test_rtld_constants(self):
         from re_python_pysrc import ffi
         ffi.RTLD_NOW    # check that we have the attributes
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py 
b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -7,7 +7,8 @@
 
 
 @unwrap_spec(cdef=str, module_name=str, source=str)
-def prepare(space, cdef, module_name, source, w_includes=None):
+def prepare(space, cdef, module_name, source, w_includes=None,
+            w_extra_source=None):
     try:
         import cffi
         from cffi import FFI            # <== the system one, which
@@ -45,9 +46,13 @@
     ffi.emit_c_code(c_file)
 
     base_module_name = module_name.split('.')[-1]
+    sources = []
+    if w_extra_source is not None:
+        sources.append(space.str_w(w_extra_source))
     ext = ffiplatform.get_extension(c_file, module_name,
             include_dirs=[str(rdir)],
-            export_symbols=['_cffi_pypyinit_' + base_module_name])
+            export_symbols=['_cffi_pypyinit_' + base_module_name],
+            sources=sources)
     ffiplatform.compile(str(rdir), ext)
 
     for extension in ['so', 'pyd', 'dylib']:
@@ -79,6 +84,8 @@
         if cls.runappdirect:
             py.test.skip("not a test for -A")
         cls.w_prepare = cls.space.wrap(interp2app(prepare))
+        cls.w_udir = cls.space.wrap(str(udir))
+        cls.w_os_sep = cls.space.wrap(os.sep)
 
     def setup_method(self, meth):
         self._w_modules = self.space.appexec([], """():
@@ -849,3 +856,100 @@
         p = ffi.addressof(lib, 'globvar')
         assert ffi.typeof(p) == ffi.typeof('opaque_t *')
         assert ffi.string(ffi.cast("char *", p), 8) == "hello"
+
+    def test_constant_of_value_unknown_to_the_compiler(self):
+        extra_c_source = self.udir + self.os_sep + (
+            'extra_test_constant_of_value_unknown_to_the_compiler.c')
+        with open(extra_c_source, 'w') as f:
+            f.write('const int external_foo = 42;\n')
+        ffi, lib = self.prepare(
+            "const int external_foo;",
+            'test_constant_of_value_unknown_to_the_compiler',
+            "extern const int external_foo;",
+            extra_source=extra_c_source)
+        assert lib.external_foo == 42
+
+    def test_call_with_incomplete_structs(self):
+        ffi, lib = self.prepare(
+            "typedef struct {...;} foo_t; "
+            "foo_t myglob; "
+            "foo_t increment(foo_t s); "
+            "double getx(foo_t s);",
+            'test_call_with_incomplete_structs', """
+            typedef double foo_t;
+            double myglob = 42.5;
+            double getx(double x) { return x; }
+            double increment(double x) { return x + 1; }
+        """)
+        assert lib.getx(lib.myglob) == 42.5
+        assert lib.getx(lib.increment(lib.myglob)) == 43.5
+
+    def test_struct_array_guess_length_2(self):
+        ffi, lib = self.prepare(
+            "struct foo_s { int a[...][...]; };",
+            'test_struct_array_guess_length_2',
+            "struct foo_s { int x; int a[5][8]; int y; };")
+        assert ffi.sizeof('struct foo_s') == 42 * ffi.sizeof('int')
+        s = ffi.new("struct foo_s *")
+        assert ffi.sizeof(s.a) == 40 * ffi.sizeof('int')
+        assert s.a[4][7] == 0
+        raises(IndexError, 's.a[4][8]')
+        raises(IndexError, 's.a[5][0]')
+        assert ffi.typeof(s.a) == ffi.typeof("int[5][8]")
+        assert ffi.typeof(s.a[0]) == ffi.typeof("int[8]")
+
+    def test_global_var_array_2(self):
+        ffi, lib = self.prepare(
+            "int a[...][...];",
+            'test_global_var_array_2',
+            'int a[10][8];')
+        lib.a[9][7] = 123456
+        assert lib.a[9][7] == 123456
+        raises(IndexError, 'lib.a[0][8]')
+        raises(IndexError, 'lib.a[10][0]')
+        assert ffi.typeof(lib.a) == ffi.typeof("int[10][8]")
+        assert ffi.typeof(lib.a[0]) == ffi.typeof("int[8]")
+
+    def test_some_integer_type(self):
+        ffi, lib = self.prepare("""
+            typedef int... foo_t;
+            typedef unsigned long... bar_t;
+            typedef struct { foo_t a, b; } mystruct_t;
+            foo_t foobar(bar_t, mystruct_t);
+            static const bar_t mu = -20;
+            static const foo_t nu = 20;
+        """, 'test_some_integer_type', """
+            typedef unsigned long long foo_t;
+            typedef short bar_t;
+            typedef struct { foo_t a, b; } mystruct_t;
+            static foo_t foobar(bar_t x, mystruct_t s) {
+                return (foo_t)x + s.a + s.b;
+            }
+            static const bar_t mu = -20;
+            static const foo_t nu = 20;
+        """)
+        assert ffi.sizeof("foo_t") == ffi.sizeof("unsigned long long")
+        assert ffi.sizeof("bar_t") == ffi.sizeof("short")
+        maxulonglong = 2 ** 64 - 1
+        assert int(ffi.cast("foo_t", -1)) == maxulonglong
+        assert int(ffi.cast("bar_t", -1)) == -1
+        assert lib.foobar(-1, [0, 0]) == maxulonglong
+        assert lib.foobar(2 ** 15 - 1, [0, 0]) == 2 ** 15 - 1
+        assert lib.foobar(10, [20, 31]) == 61
+        assert lib.foobar(0, [0, maxulonglong]) == maxulonglong
+        raises(OverflowError, lib.foobar, 2 ** 15, [0, 0])
+        raises(OverflowError, lib.foobar, -(2 ** 15) - 1, [0, 0])
+        raises(OverflowError, ffi.new, "mystruct_t *", [0, -1])
+        assert lib.mu == -20
+        assert lib.nu == 20
+
+    def test_issue200(self):
+        ffi, lib = self.prepare("""
+            typedef void (function_t)(void*);
+            void function(void *);
+        """, 'test_issue200', """
+            static void function(void *p) { (void)p; }
+        """)
+        ffi.typeof('function_t*')
+        lib.function(ffi.NULL)
+        # assert did not crash
diff --git a/pypy/module/micronumpy/__init__.py 
b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -24,6 +24,7 @@
         'result_type': 'casting.result_type',
         'can_cast': 'casting.can_cast',
         'min_scalar_type': 'casting.min_scalar_type',
+        'promote_types': 'casting.w_promote_types',
 
         'set_string_function': 'appbridge.set_string_function',
         'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo',
diff --git a/pypy/module/micronumpy/arrayops.py 
b/pypy/module/micronumpy/arrayops.py
--- a/pypy/module/micronumpy/arrayops.py
+++ b/pypy/module/micronumpy/arrayops.py
@@ -1,11 +1,12 @@
 from pypy.interpreter.error import OperationError, oefmt
 from pypy.interpreter.gateway import unwrap_spec
-from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \
-    constants as NPY
+from pypy.module.micronumpy import loop, descriptor, support
+from pypy.module.micronumpy import constants as NPY
 from pypy.module.micronumpy.base import convert_to_array, W_NDimArray
 from pypy.module.micronumpy.converters import clipmode_converter
 from pypy.module.micronumpy.strides import (
     Chunk, Chunks, shape_agreement, shape_agreement_multiple)
+from .casting import find_binop_result_dtype, find_result_type
 
 
 def where(space, w_arr, w_x=None, w_y=None):
@@ -84,8 +85,7 @@
         if arr.get_dtype().itemtype.bool(arr.get_scalar_value()):
             return x
         return y
-    dtype = ufuncs.find_binop_result_dtype(space, x.get_dtype(),
-                                                  y.get_dtype())
+    dtype = find_result_type(space, [x, y], [])
     shape = shape_agreement(space, arr.get_shape(), x)
     shape = shape_agreement(space, shape, y)
     out = W_NDimArray.from_shape(space, shape, dtype)
@@ -137,19 +137,8 @@
                 raise OperationError(space.w_ValueError, space.wrap(
                     "all the input array dimensions except for the "
                     "concatenation axis must match exactly"))
-        a_dt = arr.get_dtype()
-        if dtype.is_record() and a_dt.is_record():
-            # Record types must match
-            for f in dtype.fields:
-                if f not in a_dt.fields or \
-                             dtype.fields[f] != a_dt.fields[f]:
-                    raise OperationError(space.w_TypeError,
-                               space.wrap("invalid type promotion"))
-        elif dtype.is_record() or a_dt.is_record():
-            raise OperationError(space.w_TypeError,
-                        space.wrap("invalid type promotion"))
-        dtype = ufuncs.find_binop_result_dtype(space, dtype,
-                                                      arr.get_dtype())
+
+    dtype = find_result_type(space, args_w, [])
     # concatenate does not handle ndarray subtypes, it always returns a ndarray
     res = W_NDimArray.from_shape(space, shape, dtype, 'C')
     chunks = [Chunk(0, i, 1, i) for i in shape]
diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py
--- a/pypy/module/micronumpy/boxes.py
+++ b/pypy/module/micronumpy/boxes.py
@@ -35,8 +35,8 @@
 def new_dtype_getter(num):
     @specialize.memo()
     def _get_dtype(space):
-        from pypy.module.micronumpy.descriptor import get_dtype_cache
-        return get_dtype_cache(space).dtypes_by_num[num]
+        from pypy.module.micronumpy.descriptor import num2dtype
+        return num2dtype(space, num)
 
     def descr__new__(space, w_subtype, w_value=None):
         from pypy.module.micronumpy.ctors import array
@@ -144,7 +144,7 @@
         return self
 
     def get_flags(self):
-        return (NPY.ARRAY_C_CONTIGUOUS | NPY.ARRAY_F_CONTIGUOUS | 
+        return (NPY.ARRAY_C_CONTIGUOUS | NPY.ARRAY_F_CONTIGUOUS |
                 NPY.ARRAY_WRITEABLE | NPY.ARRAY_OWNDATA)
 
     def item(self, space):
@@ -180,10 +180,11 @@
 
     def descr_getitem(self, space, w_item):
         from pypy.module.micronumpy.base import convert_to_array
-        if space.is_w(w_item, space.w_Ellipsis) or \
-                (space.isinstance_w(w_item, space.w_tuple) and
+        if space.is_w(w_item, space.w_Ellipsis):
+            return convert_to_array(space, self)
+        elif (space.isinstance_w(w_item, space.w_tuple) and
                     space.len_w(w_item) == 0):
-            return convert_to_array(space, self)
+            return self
         raise OperationError(space.w_IndexError, space.wrap(
             "invalid index to scalar variable"))
 
@@ -239,7 +240,7 @@
 
     # TODO: support all kwargs in ufuncs like numpy ufunc_object.c
     sig = None
-    cast = None
+    cast = 'unsafe'
     extobj = None
 
     def _unaryop_impl(ufunc_name):
@@ -578,7 +579,9 @@
         try:
             ofs, dtype = self.dtype.fields[item]
         except KeyError:
-            raise oefmt(space.w_ValueError, "field named %s not found", item)
+            raise oefmt(space.w_IndexError, "222only integers, slices (`:`), "
+                "ellipsis (`...`), numpy.newaxis (`None`) and integer or "
+                "boolean arrays are valid indices")
         dtype.itemtype.store(self.arr, self.ofs, ofs,
                              dtype.coerce(space, w_value))
 
diff --git a/pypy/module/micronumpy/casting.py 
b/pypy/module/micronumpy/casting.py
--- a/pypy/module/micronumpy/casting.py
+++ b/pypy/module/micronumpy/casting.py
@@ -1,16 +1,19 @@
 """Functions and helpers for converting between dtypes"""
 
 from rpython.rlib import jit
+from rpython.rlib.signature import signature, types as ann
 from pypy.interpreter.gateway import unwrap_spec
-from pypy.interpreter.error import oefmt
+from pypy.interpreter.error import oefmt, OperationError
 
 from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
 from pypy.module.micronumpy import constants as NPY
-from pypy.module.micronumpy.ufuncs import (
-    find_binop_result_dtype, find_dtype_for_scalar)
 from .types import (
-    Bool, ULong, Long, Float64, Complex64, UnicodeType, VoidType, ObjectType)
-from .descriptor import get_dtype_cache, as_dtype, is_scalar_w
+    BaseType, Bool, ULong, Long, Float64, Complex64,
+    StringType, UnicodeType, VoidType, ObjectType,
+    int_types, float_types, complex_types, number_types, all_types)
+from .descriptor import (
+    W_Dtype, get_dtype_cache, as_dtype, is_scalar_w, variable_dtype,
+    new_string_dtype, new_unicode_dtype, num2dtype)
 
 @jit.unroll_safe
 def result_type(space, __args__):
@@ -21,12 +24,96 @@
     if not args_w:
         raise oefmt(space.w_ValueError,
             "at least one array or dtype is required")
+    arrays_w = []
+    dtypes_w = []
+    for w_arg in args_w:
+        if isinstance(w_arg, W_NDimArray):
+            arrays_w.append(w_arg)
+        elif is_scalar_w(space, w_arg):
+            w_scalar = as_scalar(space, w_arg)
+            w_arr = W_NDimArray.from_scalar(space, w_scalar)
+            arrays_w.append(w_arr)
+        else:
+            dtype = as_dtype(space, w_arg)
+            dtypes_w.append(dtype)
+    return find_result_type(space, arrays_w, dtypes_w)
+
+
+def find_result_type(space, arrays_w, dtypes_w):
+    # equivalent to PyArray_ResultType
+    if len(arrays_w) == 1 and not dtypes_w:
+        return arrays_w[0].get_dtype()
+    elif not arrays_w and len(dtypes_w) == 1:
+        return dtypes_w[0]
     result = None
-    for w_arg in args_w:
-        dtype = as_dtype(space, w_arg)
-        result = find_binop_result_dtype(space, result, dtype)
+    if not _use_min_scalar(arrays_w, dtypes_w):
+        for w_array in arrays_w:
+            if result is None:
+                result = w_array.get_dtype()
+            else:
+                result = promote_types(space, result, w_array.get_dtype())
+        for dtype in dtypes_w:
+            if result is None:
+                result = dtype
+            else:
+                result = promote_types(space, result, dtype)
+    else:
+        small_unsigned = False
+        for w_array in arrays_w:
+            dtype = w_array.get_dtype()
+            small_unsigned_scalar = False
+            if w_array.is_scalar() and dtype.is_number():
+                num, alt_num = w_array.get_scalar_value().min_dtype()
+                small_unsigned_scalar = (num != alt_num)
+                dtype = num2dtype(space, num)
+            if result is None:
+                result = dtype
+                small_unsigned = small_unsigned_scalar
+            else:
+                result, small_unsigned = _promote_types_su(
+                    space, result, dtype,
+                    small_unsigned, small_unsigned_scalar)
+        for dtype in dtypes_w:
+            if result is None:
+                result = dtype
+                small_unsigned = False
+            else:
+                result, small_unsigned = _promote_types_su(
+                    space, result, dtype,
+                    small_unsigned, False)
     return result
 
+simple_kind_ordering = {
+    Bool.kind: 0, ULong.kind: 1, Long.kind: 1,
+    Float64.kind: 2, Complex64.kind: 2,
+    NPY.STRINGLTR: 3, NPY.STRINGLTR2: 3,
+    UnicodeType.kind: 3, VoidType.kind: 3, ObjectType.kind: 3}
+
+def _use_min_scalar(arrays_w, dtypes_w):
+    """Helper for find_result_type()"""
+    if not arrays_w:
+        return False
+    all_scalars = True
+    max_scalar_kind = 0
+    max_array_kind = 0
+    for w_array in arrays_w:
+        if w_array.is_scalar():
+            kind = simple_kind_ordering[w_array.get_dtype().kind]
+            if kind > max_scalar_kind:
+                max_scalar_kind = kind
+        else:
+            all_scalars = False
+            kind = simple_kind_ordering[w_array.get_dtype().kind]
+            if kind > max_array_kind:
+                max_array_kind = kind
+    for dtype in dtypes_w:
+        all_scalars = False
+        kind = simple_kind_ordering[dtype.kind]
+        if kind > max_array_kind:
+            max_array_kind = kind
+    return not all_scalars and max_array_kind >= max_scalar_kind
+
+
 @unwrap_spec(casting=str)
 def can_cast(space, w_from, w_totype, casting='safe'):
     try:
@@ -56,6 +143,11 @@
 
 def can_cast_type(space, origin, target, casting):
     # equivalent to PyArray_CanCastTypeTo
+    if origin == target:
+        return True
+    if origin.is_record() or target.is_record():
+        return can_cast_record(space, origin, target, casting)
+
     if casting == 'no':
         return origin.eq(space, target)
     elif casting == 'equiv':
@@ -63,13 +155,29 @@
     elif casting == 'unsafe':
         return True
     elif casting == 'same_kind':
-        if origin.can_cast_to(target):
+        if can_cast_to(origin, target):
             return True
         if origin.kind in kind_ordering and target.kind in kind_ordering:
             return kind_ordering[origin.kind] <= kind_ordering[target.kind]
         return False
-    else:
-        return origin.can_cast_to(target)
+    else:  # 'safe'
+        return can_cast_to(origin, target)
+
+def can_cast_record(space, origin, target, casting):
+    if origin is target:
+        return True
+    if origin.fields is None or target.fields is None:
+        return False
+    if len(origin.fields) != len(target.fields):
+        return False
+    for name, (offset, orig_field) in origin.fields.iteritems():
+        if name not in target.fields:
+            return False
+        target_field = target.fields[name][1]
+        if not can_cast_type(space, orig_field, target_field, casting):
+            return False
+    return True
+
 
 def can_cast_array(space, w_from, target, casting):
     # equivalent to PyArray_CanCastArrayTo
@@ -91,11 +199,11 @@
     dtypenum, altnum = value.min_dtype()
     if target.is_unsigned():
         dtypenum = altnum
-    dtype = get_dtype_cache(space).dtypes_by_num[dtypenum]
+    dtype = num2dtype(space, dtypenum)
     return can_cast_type(space, dtype, target, casting)
 
 def as_scalar(space, w_obj):
-    dtype = find_dtype_for_scalar(space, w_obj)
+    dtype = scalar2dtype(space, w_obj)
     return dtype.coerce(space, w_obj)
 
 def min_scalar_type(space, w_a):
@@ -103,6 +211,231 @@
     dtype = w_array.get_dtype()
     if w_array.is_scalar() and dtype.is_number():
         num, alt_num = w_array.get_scalar_value().min_dtype()
-        return get_dtype_cache(space).dtypes_by_num[num]
+        return num2dtype(space, num)
     else:
         return dtype
+
+def w_promote_types(space, w_type1, w_type2):
+    dt1 = as_dtype(space, w_type1, allow_None=False)
+    dt2 = as_dtype(space, w_type2, allow_None=False)
+    return promote_types(space, dt1, dt2)
+
+def find_binop_result_dtype(space, dt1, dt2):
+    if dt2 is None:
+        return dt1
+    if dt1 is None:
+        return dt2
+    return promote_types(space, dt1, dt2)
+
+def promote_types(space, dt1, dt2):
+    """Return the smallest dtype to which both input dtypes can be safely 
cast"""
+    # Equivalent to PyArray_PromoteTypes
+    num = promotion_table[dt1.num][dt2.num]
+    if num != -1:
+        return num2dtype(space, num)
+
+    # dt1.num should be <= dt2.num
+    if dt1.num > dt2.num:
+        dt1, dt2 = dt2, dt1
+
+    if dt2.is_str():
+        if dt1.is_str():
+            if dt1.elsize > dt2.elsize:
+                return dt1
+            else:
+                return dt2
+        else:  # dt1 is numeric
+            dt1_size = dt1.itemtype.strlen
+            if dt1_size > dt2.elsize:
+                return new_string_dtype(space, dt1_size)
+            else:
+                return dt2
+    elif dt2.is_unicode():
+        if dt1.is_unicode():
+            if dt1.elsize > dt2.elsize:
+                return dt1
+            else:
+                return dt2
+        elif dt1.is_str():
+            if dt2.elsize >= 4 * dt1.elsize:
+                return dt2
+            else:
+                return new_unicode_dtype(space, dt1.elsize)
+        else:  # dt1 is numeric
+            dt1_size = dt1.itemtype.strlen
+            if 4 * dt1_size > dt2.elsize:
+                return new_unicode_dtype(space, dt1_size)
+            else:
+                return dt2
+    else:
+        assert dt2.num == NPY.VOID
+        if can_cast_type(space, dt1, dt2, casting='equiv'):
+            return dt1
+    raise oefmt(space.w_TypeError, "invalid type promotion")
+
+def _promote_types_su(space, dt1, dt2, su1, su2):
+    """Like promote_types(), but handles the small_unsigned flag as well"""
+    if su1:
+        if dt2.is_bool() or dt2.is_unsigned():
+            dt1 = dt1.as_unsigned(space)
+        else:
+            dt1 = dt1.as_signed(space)
+    elif su2:
+        if dt1.is_bool() or dt1.is_unsigned():
+            dt2 = dt2.as_unsigned(space)
+        else:
+            dt2 = dt2.as_signed(space)
+    if dt1.elsize < dt2.elsize:
+        su = su2 and (su1 or not dt1.is_signed())
+    elif dt1.elsize == dt2.elsize:
+        su = su1 and su2
+    else:
+        su = su1 and (su2 or not dt2.is_signed())
+    return promote_types(space, dt1, dt2), su
+
+def scalar2dtype(space, w_obj):
+    from .boxes import W_GenericBox
+    bool_dtype = get_dtype_cache(space).w_booldtype
+    long_dtype = get_dtype_cache(space).w_longdtype
+    int64_dtype = get_dtype_cache(space).w_int64dtype
+    uint64_dtype = get_dtype_cache(space).w_uint64dtype
+    complex_dtype = get_dtype_cache(space).w_complex128dtype
+    float_dtype = get_dtype_cache(space).w_float64dtype
+    object_dtype = get_dtype_cache(space).w_objectdtype
+    if isinstance(w_obj, W_GenericBox):
+        return w_obj.get_dtype(space)
+
+    if space.isinstance_w(w_obj, space.w_bool):
+        return bool_dtype
+    elif space.isinstance_w(w_obj, space.w_int):
+        return long_dtype
+    elif space.isinstance_w(w_obj, space.w_long):
+        try:
+            space.int_w(w_obj)
+        except OperationError, e:
+            if e.match(space, space.w_OverflowError):
+                if space.is_true(space.le(w_obj, space.wrap(0))):
+                    return int64_dtype
+                return uint64_dtype
+            raise
+        return int64_dtype
+    elif space.isinstance_w(w_obj, space.w_float):
+        return float_dtype
+    elif space.isinstance_w(w_obj, space.w_complex):
+        return complex_dtype
+    elif space.isinstance_w(w_obj, space.w_str):
+        return variable_dtype(space, 'S%d' % space.len_w(w_obj))
+    return object_dtype
+
+@signature(ann.instance(W_Dtype), ann.instance(W_Dtype), returns=ann.bool())
+def can_cast_to(dt1, dt2):
+    """Return whether dtype `dt1` can be cast safely to `dt2`"""
+    # equivalent to PyArray_CanCastTo
+    from .casting import can_cast_itemtype
+    result = can_cast_itemtype(dt1.itemtype, dt2.itemtype)
+    if result:
+        if dt1.num == NPY.STRING:
+            if dt2.num == NPY.STRING:
+                return dt1.elsize <= dt2.elsize
+            elif dt2.num == NPY.UNICODE:
+                return dt1.elsize * 4 <= dt2.elsize
+        elif dt1.num == NPY.UNICODE and dt2.num == NPY.UNICODE:
+            return dt1.elsize <= dt2.elsize
+        elif dt2.num in (NPY.STRING, NPY.UNICODE):
+            if dt2.num == NPY.STRING:
+                char_size = 1
+            else:  # NPY.UNICODE
+                char_size = 4
+            if dt2.elsize == 0:
+                return True
+            if dt1.is_int():
+                return dt2.elsize >= dt1.itemtype.strlen * char_size
+    return result
+
+
+@signature(ann.instance(BaseType), ann.instance(BaseType), returns=ann.bool())
+def can_cast_itemtype(tp1, tp2):
+    # equivalent to PyArray_CanCastSafely
+    return casting_table[tp1.num][tp2.num]
+
+#_________________________
+
+
+casting_table = [[False] * NPY.NTYPES for _ in range(NPY.NTYPES)]
+
+def enable_cast(type1, type2):
+    casting_table[type1.num][type2.num] = True
+
+def _can_cast(type1, type2):
+    """NOT_RPYTHON: operates on BaseType subclasses"""
+    return casting_table[type1.num][type2.num]
+
+for tp in all_types:
+    enable_cast(tp, tp)
+    if tp.num != NPY.DATETIME:
+        enable_cast(Bool, tp)
+    enable_cast(tp, ObjectType)
+    enable_cast(tp, VoidType)
+enable_cast(StringType, UnicodeType)
+#enable_cast(Bool, TimeDelta)
+
+for tp in number_types:
+    enable_cast(tp, StringType)
+    enable_cast(tp, UnicodeType)
+
+for tp1 in int_types:
+    for tp2 in int_types:
+        if tp1.signed:
+            if tp2.signed and tp1.basesize() <= tp2.basesize():
+                enable_cast(tp1, tp2)
+        else:
+            if tp2.signed and tp1.basesize() < tp2.basesize():
+                enable_cast(tp1, tp2)
+            elif not tp2.signed and tp1.basesize() <= tp2.basesize():
+                enable_cast(tp1, tp2)
+for tp1 in int_types:
+    for tp2 in float_types + complex_types:
+        size1 = tp1.basesize()
+        size2 = tp2.basesize()
+        if (size1 < 8 and size2 > size1) or (size1 >= 8 and size2 >= size1):
+            enable_cast(tp1, tp2)
+for tp1 in float_types:
+    for tp2 in float_types + complex_types:
+        if tp1.basesize() <= tp2.basesize():
+            enable_cast(tp1, tp2)
+for tp1 in complex_types:
+    for tp2 in complex_types:
+        if tp1.basesize() <= tp2.basesize():
+            enable_cast(tp1, tp2)
+
+promotion_table = [[-1] * NPY.NTYPES for _ in range(NPY.NTYPES)]
+def promotes(tp1, tp2, tp3):
+    if tp3 is None:
+        num = -1
+    else:
+        num = tp3.num
+    promotion_table[tp1.num][tp2.num] = num
+
+
+for tp in all_types:
+    promotes(tp, ObjectType, ObjectType)
+    promotes(ObjectType, tp, ObjectType)
+
+for tp1 in [Bool] + number_types:
+    for tp2 in [Bool] + number_types:
+        if tp1 is tp2:
+            promotes(tp1, tp1, tp1)
+        elif _can_cast(tp1, tp2):
+            promotes(tp1, tp2, tp2)
+        elif _can_cast(tp2, tp1):
+            promotes(tp1, tp2, tp1)
+        else:
+            # Brute-force search for the least upper bound
+            result = None
+            for tp3 in number_types:
+                if _can_cast(tp1, tp3) and _can_cast(tp2, tp3):
+                    if result is None:
+                        result = tp3
+                    elif _can_cast(tp3, result) and not _can_cast(result, tp3):
+                        result = tp3
+            promotes(tp1, tp2, result)
diff --git a/pypy/module/micronumpy/concrete.py 
b/pypy/module/micronumpy/concrete.py
--- a/pypy/module/micronumpy/concrete.py
+++ b/pypy/module/micronumpy/concrete.py
@@ -207,7 +207,7 @@
                     raise ArrayArgumentException
             return self._lookup_by_index(space, view_w)
         if shape_len == 0:
-            raise oefmt(space.w_IndexError, "0-d arrays can't be indexed")
+            raise oefmt(space.w_IndexError, "too many indices for array")
         elif shape_len > 1:
             raise IndexError
         idx = support.index_w(space, w_idx)
@@ -218,7 +218,11 @@
         if space.isinstance_w(w_idx, space.w_str):
             idx = space.str_w(w_idx)
             dtype = self.dtype
-            if not dtype.is_record() or idx not in dtype.fields:
+            if not dtype.is_record():
+                raise oefmt(space.w_IndexError, "only integers, slices (`:`), "
+                    "ellipsis (`...`), numpy.newaxis (`None`) and integer or "
+                    "boolean arrays are valid indices")
+            elif idx not in dtype.fields:
                 raise oefmt(space.w_ValueError, "field named %s not found", 
idx)
             return RecordChunk(idx)
         elif (space.isinstance_w(w_idx, space.w_int) or
diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py
--- a/pypy/module/micronumpy/ctors.py
+++ b/pypy/module/micronumpy/ctors.py
@@ -64,8 +64,8 @@
     #print 'create view from 
shape',shape,'dtype',dtype,'descr',w_descr,'data',data_w[0],'rw',rw
     raise oefmt(space.w_NotImplementedError,
                 "creating array from __array_interface__ not supported yet")
-    return 
-    
+    return
+
 
 @unwrap_spec(ndmin=int, copy=bool, subok=bool)
 def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False,
@@ -114,9 +114,9 @@
             elif not copy and (subok or type(w_object) is W_NDimArray):
                 return w_object
         if subok and not type(w_object) is W_NDimArray:
-            raise oefmt(space.w_NotImplementedError, 
+            raise oefmt(space.w_NotImplementedError,
                 "array(..., subok=True) only partially implemented")
-        # we have a ndarray, but need to copy or change dtype 
+        # we have a ndarray, but need to copy or change dtype
         if dtype is None:
             dtype = w_object.get_dtype()
         if dtype != w_object.get_dtype():
@@ -126,7 +126,7 @@
             shape = w_object.get_shape()
             w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order)
             if support.product(shape) == 1:
-                w_arr.set_scalar_value(dtype.coerce(space, 
+                w_arr.set_scalar_value(dtype.coerce(space,
                         w_object.implementation.getitem(0)))
             else:
                 loop.setslice(space, shape, w_arr.implementation, 
w_object.implementation)
@@ -137,13 +137,13 @@
             with imp as storage:
                 sz = support.product(w_object.get_shape()) * dtype.elsize
                 return W_NDimArray.from_shape_and_storage(space,
-                    w_object.get_shape(), storage, dtype, storage_bytes=sz, 
+                    w_object.get_shape(), storage, dtype, storage_bytes=sz,
                     w_base=w_base, start=imp.start)
     else:
         # not an array
         shape, elems_w = strides.find_shape_and_elems(space, w_object, dtype)
     if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1):
-        dtype = strides.find_dtype_for_seq(space, elems_w, dtype)
+        dtype = find_dtype_for_seq(space, elems_w, dtype)
         if dtype is None:
             dtype = descriptor.get_dtype_cache(space).w_float64dtype
         elif dtype.is_str_or_unicode() and dtype.elsize < 1:
@@ -170,7 +170,7 @@
         return w_array
 
     shape, elems_w = strides.find_shape_and_elems(space, w_object, None)
-    dtype = strides.find_dtype_for_seq(space, elems_w, None)
+    dtype = find_dtype_for_seq(space, elems_w, None)
     if dtype is None:
         dtype = descriptor.get_dtype_cache(space).w_float64dtype
     elif dtype.is_str_or_unicode() and dtype.elsize < 1:
@@ -184,6 +184,21 @@
         loop.assign(space, w_arr, elems_w)
         return w_arr
 
+def _dtype_guess(space, dtype, w_elem):
+    from .casting import scalar2dtype, find_binop_result_dtype
+    if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar():
+        w_elem = w_elem.get_scalar_value()
+    elem_dtype = scalar2dtype(space, w_elem)
+    return find_binop_result_dtype(space, elem_dtype, dtype)
+
+def find_dtype_for_seq(space, elems_w, dtype):
+    if len(elems_w) == 1:
+        w_elem = elems_w[0]
+        return _dtype_guess(space, dtype, w_elem)
+    for w_elem in elems_w:
+        dtype = _dtype_guess(space, dtype, w_elem)
+    return dtype
+
 
 def _zeros_or_empty(space, w_shape, w_dtype, w_order, zero):
     dtype = space.interp_w(descriptor.W_Dtype,
@@ -359,5 +374,5 @@
         return a
     else:
         writable = not buf.readonly
-    return W_NDimArray.from_shape_and_storage(space, [n], storage, 
storage_bytes=s, 
+    return W_NDimArray.from_shape_and_storage(space, [n], storage, 
storage_bytes=s,
                                 dtype=dtype, w_base=w_buffer, 
writable=writable)
diff --git a/pypy/module/micronumpy/descriptor.py 
b/pypy/module/micronumpy/descriptor.py
--- a/pypy/module/micronumpy/descriptor.py
+++ b/pypy/module/micronumpy/descriptor.py
@@ -8,7 +8,6 @@
 from rpython.rlib import jit
 from rpython.rlib.objectmodel import specialize, compute_hash, 
we_are_translated
 from rpython.rlib.rarithmetic import r_longlong, r_ulonglong
-from rpython.rlib.signature import finishsigs, signature, types as ann
 from pypy.module.micronumpy import types, boxes, support, constants as NPY
 from .base import W_NDimArray
 from pypy.module.micronumpy.appbridge import get_appbridge_cache
@@ -29,22 +28,18 @@
     """ agree on dtype from a list of arrays. if out is allocated,
     use it's dtype, otherwise allocate a new one with agreed dtype
     """
-    from pypy.module.micronumpy.ufuncs import find_binop_result_dtype
+    from .casting import find_result_type
 
     if not space.is_none(out):
         return out
-    dtype = None
-    for w_arr in w_arr_list:
-        if not space.is_none(w_arr):
-            dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype())
+    arr_w = [w_arr for w_arr in w_arr_list if not space.is_none(w_arr)]
+    dtype = find_result_type(space, arr_w, [])
     assert dtype is not None
     out = W_NDimArray.from_shape(space, shape, dtype)
     return out
 
 
-_REQ_STRLEN = [0, 3, 5, 10, 10, 20, 20, 20, 20]  # data for can_cast_to()
 
-@finishsigs
 class W_Dtype(W_Root):
     _immutable_fields_ = [
         "itemtype?", "w_box_type", "byteorder?", "names?", "fields?",
@@ -98,41 +93,6 @@
     def box_complex(self, real, imag):
         return self.itemtype.box_complex(real, imag)
 
-    @signature(ann.self(), ann.self(), returns=ann.bool())
-    def can_cast_to(self, other):
-        # equivalent to PyArray_CanCastTo
-        result = self.itemtype.can_cast_to(other.itemtype)
-        if result:
-            if self.num == NPY.STRING:
-                if other.num == NPY.STRING:
-                    return self.elsize <= other.elsize
-                elif other.num == NPY.UNICODE:
-                    return self.elsize * 4 <= other.elsize
-            elif self.num == NPY.UNICODE and other.num == NPY.UNICODE:
-                return self.elsize <= other.elsize
-            elif other.num in (NPY.STRING, NPY.UNICODE):
-                if other.num == NPY.STRING:
-                    char_size = 1
-                else:  # NPY.UNICODE
-                    char_size = 4
-                if other.elsize == 0:
-                    return True
-                if self.is_bool():
-                    return other.elsize >= 5 * char_size
-                elif self.is_unsigned():
-                    if self.elsize > 8 or self.elsize < 0:
-                        return False
-                    else:
-                        return (other.elsize >=
-                                _REQ_STRLEN[self.elsize] * char_size)
-                elif self.is_signed():
-                    if self.elsize > 8 or self.elsize < 0:
-                        return False
-                    else:
-                        return (other.elsize >=
-                                (_REQ_STRLEN[self.elsize] + 1) * char_size)
-        return result
-
     def coerce(self, space, w_item):
         return self.itemtype.coerce(space, self, w_item)
 
@@ -161,6 +121,9 @@
     def is_str(self):
         return self.num == NPY.STRING
 
+    def is_unicode(self):
+        return self.num == NPY.UNICODE
+
     def is_object(self):
         return self.num == NPY.OBJECT
 
@@ -176,6 +139,20 @@
     def is_native(self):
         return self.byteorder in (NPY.NATIVE, NPY.NATBYTE)
 
+    def as_signed(self, space):
+        """Convert from an unsigned integer dtype to its signed partner"""
+        if self.is_unsigned():
+            return num2dtype(space, self.num - 1)
+        else:
+            return self
+
+    def as_unsigned(self, space):
+        """Convert from a signed integer dtype to its unsigned partner"""
+        if self.is_signed():
+            return num2dtype(space, self.num + 1)
+        else:
+            return self
+
     def get_float_dtype(self, space):
         assert self.is_complex()
         dtype = get_dtype_cache(space).component_dtypes[self.num]
@@ -309,20 +286,24 @@
         return space.wrap(not self.eq(space, w_other))
 
     def descr_le(self, space, w_other):
+        from .casting import can_cast_to
         w_other = as_dtype(space, w_other)
-        return space.wrap(self.can_cast_to(w_other))
+        return space.wrap(can_cast_to(self, w_other))
 
     def descr_ge(self, space, w_other):
+        from .casting import can_cast_to
         w_other = as_dtype(space, w_other)
-        return space.wrap(w_other.can_cast_to(self))
+        return space.wrap(can_cast_to(w_other, self))
 
     def descr_lt(self, space, w_other):
+        from .casting import can_cast_to
         w_other = as_dtype(space, w_other)
-        return space.wrap(self.can_cast_to(w_other) and not self.eq(space, 
w_other))
+        return space.wrap(can_cast_to(self, w_other) and not self.eq(space, 
w_other))
 
     def descr_gt(self, space, w_other):
+        from .casting import can_cast_to
         w_other = as_dtype(space, w_other)
-        return space.wrap(w_other.can_cast_to(self) and not self.eq(space, 
w_other))
+        return space.wrap(can_cast_to(w_other, self) and not self.eq(space, 
w_other))
 
     def _compute_hash(self, space, x):
         from rpython.rlib.rarithmetic import intmask
@@ -861,8 +842,8 @@
             NPY.UBYTE:       ['ubyte'],
             NPY.SHORT:       ['short'],
             NPY.USHORT:      ['ushort'],
-            NPY.LONG:        ['int', 'intp', 'p'],
-            NPY.ULONG:       ['uint', 'uintp', 'P'],
+            NPY.LONG:        ['int'],
+            NPY.ULONG:       ['uint'],
             NPY.LONGLONG:    ['longlong'],
             NPY.ULONGLONG:   ['ulonglong'],
             NPY.FLOAT:       ['single'],
@@ -904,17 +885,20 @@
             NPY.CDOUBLE:     self.w_float64dtype,
             NPY.CLONGDOUBLE: self.w_floatlongdtype,
         }
-        self.builtin_dtypes = [
-            self.w_booldtype,
+        integer_dtypes = [
             self.w_int8dtype, self.w_uint8dtype,
             self.w_int16dtype, self.w_uint16dtype,
+            self.w_int32dtype, self.w_uint32dtype,
             self.w_longdtype, self.w_ulongdtype,
-            self.w_int32dtype, self.w_uint32dtype,
-            self.w_int64dtype, self.w_uint64dtype,
-            ] + float_dtypes + complex_dtypes + [
-            self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype,
-            self.w_objectdtype,
-        ]
+            self.w_int64dtype, self.w_uint64dtype]
+        self.builtin_dtypes = ([self.w_booldtype] + integer_dtypes +
+            float_dtypes + complex_dtypes + [
+                self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype,
+                self.w_objectdtype,
+            ])
+        self.integer_dtypes = integer_dtypes
+        self.float_dtypes = float_dtypes
+        self.complex_dtypes = complex_dtypes
         self.float_dtypes_by_num_bytes = sorted(
             (dtype.elsize, dtype)
             for dtype in float_dtypes
@@ -923,7 +907,9 @@
         self.dtypes_by_name = {}
         # we reverse, so the stuff with lower numbers override stuff with
         # higher numbers
-        for dtype in reversed(self.builtin_dtypes):
+        # However, Long/ULong always take precedence over Intxx
+        for dtype in reversed(
+                [self.w_longdtype, self.w_ulongdtype] + self.builtin_dtypes):
             dtype.fields = None  # mark these as builtin
             self.dtypes_by_num[dtype.num] = dtype
             self.dtypes_by_name[dtype.get_name()] = dtype
@@ -936,6 +922,14 @@
             if dtype.num in aliases:
                 for alias in aliases[dtype.num]:
                     self.dtypes_by_name[alias] = dtype
+        if self.w_longdtype.elsize == self.w_int32dtype.elsize:
+            intp_dtype = self.w_int32dtype
+            uintp_dtype = self.w_uint32dtype
+        else:
+            intp_dtype = self.w_longdtype
+            uintp_dtype = self.w_ulongdtype
+        self.dtypes_by_name['p'] = self.dtypes_by_name['intp'] = intp_dtype
+        self.dtypes_by_name['P'] = self.dtypes_by_name['uintp'] = uintp_dtype
 
         typeinfo_full = {
             'LONGLONG': self.w_int64dtype,
@@ -1012,16 +1006,19 @@
 def get_dtype_cache(space):
     return space.fromcache(DtypeCache)
 
+@jit.elidable
+def num2dtype(space, num):
+    return get_dtype_cache(space).dtypes_by_num[num]
+
 def as_dtype(space, w_arg, allow_None=True):
-    from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar
+    from pypy.module.micronumpy.casting import scalar2dtype
     # roughly equivalent to CNumPy's PyArray_DescrConverter2
     if not allow_None and space.is_none(w_arg):
         raise TypeError("Cannot create dtype from None here")
     if isinstance(w_arg, W_NDimArray):
         return w_arg.get_dtype()
     elif is_scalar_w(space, w_arg):
-        result = find_dtype_for_scalar(space, w_arg)
-        assert result is not None  # XXX: not guaranteed
+        result = scalar2dtype(space, w_arg)
         return result
     else:
         return space.interp_w(W_Dtype,
diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py
--- a/pypy/module/micronumpy/loop.py
+++ b/pypy/module/micronumpy/loop.py
@@ -18,35 +18,7 @@
     greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'],
     reds='auto')
 
-def call2(space, shape, func, calc_dtype, res_dtype, w_lhs, w_rhs, out):
-    # handle array_priority
-    # w_lhs and w_rhs could be of different ndarray subtypes. Numpy does:
-    # 1. if __array_priorities__ are equal and one is an ndarray and the
-    #        other is a subtype,  return a subtype
-    # 2. elif rhs.__array_priority__ is higher, return the type of rhs
-
-    w_ndarray = space.gettypefor(W_NDimArray)
-    lhs_type = space.type(w_lhs)
-    rhs_type = space.type(w_rhs)
-    lhs_for_subtype = w_lhs
-    rhs_for_subtype = w_rhs
-    #it may be something like a FlatIter, which is not an ndarray
-    if not space.is_true(space.issubtype(lhs_type, w_ndarray)):
-        lhs_type = space.type(w_lhs.base)
-        lhs_for_subtype = w_lhs.base
-    if not space.is_true(space.issubtype(rhs_type, w_ndarray)):
-        rhs_type = space.type(w_rhs.base)
-        rhs_for_subtype = w_rhs.base
-
-    w_highpriority = w_lhs
-    highpriority_subtype = lhs_for_subtype
-    if space.is_w(lhs_type, w_ndarray) and not space.is_w(rhs_type, w_ndarray):
-        highpriority_subtype = rhs_for_subtype
-        w_highpriority = w_rhs
-    if support.is_rhs_priority_higher(space, w_lhs, w_rhs):
-        highpriority_subtype = rhs_for_subtype
-        w_highpriority = w_rhs
-
+def call2(space, shape, func, calc_dtype, w_lhs, w_rhs, out):
     if w_lhs.get_size() == 1:
         w_left = w_lhs.get_scalar_value().convert_to(space, calc_dtype)
         left_iter = left_state = None
@@ -63,13 +35,9 @@
         right_iter, right_state = w_rhs.create_iter(shape)
         right_iter.track_index = False
 
-    if out is None:
-        w_ret = W_NDimArray.from_shape(space, shape, res_dtype,
-                                     w_instance=highpriority_subtype)
-    else:
-        w_ret = out
-    out_iter, out_state = w_ret.create_iter(shape)
+    out_iter, out_state = out.create_iter(shape)
     shapelen = len(shape)
+    res_dtype = out.get_dtype()
     while not out_iter.done(out_state):
         call2_driver.jit_merge_point(shapelen=shapelen, func=func,
                                      calc_dtype=calc_dtype, 
res_dtype=res_dtype)
@@ -82,25 +50,19 @@
         out_iter.setitem(out_state, func(calc_dtype, w_left, 
w_right).convert_to(
             space, res_dtype))
         out_state = out_iter.next(out_state)
-    if out is None:
-        w_ret = space.call_method(w_highpriority, '__array_wrap__', w_ret)
-    return w_ret
+    return out
 
 call1_driver = jit.JitDriver(
     name='numpy_call1',
     greens=['shapelen', 'func', 'calc_dtype', 'res_dtype'],
     reds='auto')
 
-def call1(space, shape, func, calc_dtype, res_dtype, w_obj, out):
+def call1(space, shape, func, calc_dtype, w_obj, w_ret):
     obj_iter, obj_state = w_obj.create_iter(shape)
     obj_iter.track_index = False
-
-    if out is None:
-        w_ret = W_NDimArray.from_shape(space, shape, res_dtype, 
w_instance=w_obj)
-    else:
-        w_ret = out
     out_iter, out_state = w_ret.create_iter(shape)
     shapelen = len(shape)
+    res_dtype = w_ret.get_dtype()
     while not out_iter.done(out_state):
         call1_driver.jit_merge_point(shapelen=shapelen, func=func,
                                      calc_dtype=calc_dtype, 
res_dtype=res_dtype)
@@ -108,8 +70,6 @@
         out_iter.setitem(out_state, func(calc_dtype, elem).convert_to(space, 
res_dtype))
         out_state = out_iter.next(out_state)
         obj_state = obj_iter.next(obj_state)
-    if out is None:
-        w_ret = space.call_method(w_obj, '__array_wrap__', w_ret)
     return w_ret
 
 call_many_to_one_driver = jit.JitDriver(
@@ -181,7 +141,7 @@
             vals[i] = in_iters[i].getitem(in_states[i])
         w_arglist = space.newlist(vals)
         w_outvals = space.call_args(func, Arguments.frompacked(space, 
w_arglist))
-        # w_outvals should be a tuple, but func can return a single value as 
well 
+        # w_outvals should be a tuple, but func can return a single value as 
well
         if space.isinstance_w(w_outvals, space.w_tuple):
             batch = space.listview(w_outvals)
             for i in range(len(batch)):
@@ -254,9 +214,10 @@
         obj_state = obj_iter.next(obj_state)
     return cur_value
 
-reduce_cum_driver = jit.JitDriver(name='numpy_reduce_cum_driver',
-                                  greens = ['shapelen', 'func', 'dtype'],
-                                  reds = 'auto')
+reduce_cum_driver = jit.JitDriver(
+    name='numpy_reduce_cum_driver',
+    greens=['shapelen', 'func', 'dtype', 'out_dtype'],
+    reds='auto')
 
 def compute_reduce_cumulative(space, obj, out, calc_dtype, func, identity):
     obj_iter, obj_state = obj.create_iter()
@@ -270,12 +231,14 @@
     else:
         cur_value = identity.convert_to(space, calc_dtype)
     shapelen = len(obj.get_shape())
+    out_dtype = out.get_dtype()
     while not obj_iter.done(obj_state):
-        reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func,
-                                          dtype=calc_dtype)
+        reduce_cum_driver.jit_merge_point(
+            shapelen=shapelen, func=func,
+            dtype=calc_dtype, out_dtype=out_dtype)
         rval = obj_iter.getitem(obj_state).convert_to(space, calc_dtype)
         cur_value = func(calc_dtype, cur_value, rval)
-        out_iter.setitem(out_state, cur_value)
+        out_iter.setitem(out_state, out_dtype.coerce(space, cur_value))
         out_state = out_iter.next(out_state)
         obj_state = obj_iter.next(obj_state)
 
diff --git a/pypy/module/micronumpy/ndarray.py 
b/pypy/module/micronumpy/ndarray.py
--- a/pypy/module/micronumpy/ndarray.py
+++ b/pypy/module/micronumpy/ndarray.py
@@ -100,10 +100,10 @@
 
     def getitem_filter(self, space, arr):
         if arr.ndims() > 1 and arr.get_shape() != self.get_shape():
-            raise OperationError(space.w_ValueError, space.wrap(
+            raise OperationError(space.w_IndexError, space.wrap(
                 "boolean index array should have 1 dimension"))
         if arr.get_size() > self.get_size():
-            raise OperationError(space.w_ValueError, space.wrap(
+            raise OperationError(space.w_IndexError, space.wrap(
                 "index out of range for array"))
         size = loop.count_all_true(arr)
         if arr.ndims() == 1:
@@ -116,10 +116,10 @@
 
     def setitem_filter(self, space, idx, val):
         if idx.ndims() > 1 and idx.get_shape() != self.get_shape():
-            raise OperationError(space.w_ValueError, space.wrap(
+            raise OperationError(space.w_IndexError, space.wrap(
                 "boolean index array should have 1 dimension"))
         if idx.get_size() > self.get_size():
-            raise OperationError(space.w_ValueError, space.wrap(
+            raise OperationError(space.w_IndexError, space.wrap(
                 "index out of range for array"))
         size = loop.count_all_true(idx)
         if size > val.get_size() and val.get_size() != 1:
@@ -205,9 +205,13 @@
     def descr_getitem(self, space, w_idx):
         if space.is_w(w_idx, space.w_Ellipsis):
             return self
-        elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \
-                and w_idx.ndims() > 0:
-            w_ret = self.getitem_filter(space, w_idx)
+        elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool():
+            if w_idx.ndims() > 0:
+                w_ret = self.getitem_filter(space, w_idx)
+            else:
+                raise oefmt(space.w_IndexError,
+                        "in the future, 0-d boolean arrays will be "
+                        "interpreted as a valid boolean index")
         else:
             try:
                 w_ret = self.implementation.descr_getitem(space, self, w_idx)
@@ -896,7 +900,7 @@
     # --------------------- operations ----------------------------
     # TODO: support all kwargs like numpy ufunc_object.c
     sig = None
-    cast = None
+    cast = 'unsafe'
     extobj = None
 
 
@@ -1013,6 +1017,7 @@
         return space.newtuple([w_quotient, w_remainder])
 
     def descr_dot(self, space, w_other, w_out=None):
+        from .casting import find_result_type
         if space.is_none(w_out):
             out = None
         elif not isinstance(w_out, W_NDimArray):
@@ -1027,8 +1032,7 @@
             w_res = self.descr_mul(space, other)
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to