Author: mattip <matti.pi...@gmail.com>
Branch: numpy-fixes
Changeset: r77265:bd891c231bf7
Date: 2015-05-09 23:26 +0300
http://bitbucket.org/pypy/pypy/changeset/bd891c231bf7/

Log:    merge default into branch

diff too long, truncating to 2000 out of 2015 lines

diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py
--- a/lib_pypy/_functools.py
+++ b/lib_pypy/_functools.py
@@ -8,16 +8,16 @@
     partial(func, *args, **keywords) - new function with partial application
     of the given arguments and keywords.
     """
-
-    def __init__(self, *args, **keywords):
-        if not args:
-            raise TypeError('__init__() takes at least 2 arguments (1 given)')
-        func, args = args[0], args[1:]
+    def __init__(*args, **keywords):
+        if len(args) < 2:
+            raise TypeError('__init__() takes at least 2 arguments (%d given)'
+                            % len(args))
+        self, func, args = args[0], args[1], args[2:]
         if not callable(func):
             raise TypeError("the first argument must be callable")
         self._func = func
         self._args = args
-        self._keywords = keywords or None
+        self._keywords = keywords
 
     def __delattr__(self, key):
         if key == '__dict__':
@@ -37,19 +37,22 @@
         return self._keywords
 
     def __call__(self, *fargs, **fkeywords):
-        if self.keywords is not None:
-            fkeywords = dict(self.keywords, **fkeywords)
-        return self.func(*(self.args + fargs), **fkeywords)
+        if self._keywords:
+            fkeywords = dict(self._keywords, **fkeywords)
+        return self._func(*(self._args + fargs), **fkeywords)
 
     def __reduce__(self):
         d = dict((k, v) for k, v in self.__dict__.iteritems() if k not in
                 ('_func', '_args', '_keywords'))
         if len(d) == 0:
             d = None
-        return (type(self), (self.func,),
-                (self.func, self.args, self.keywords, d))
+        return (type(self), (self._func,),
+                (self._func, self._args, self._keywords, d))
 
     def __setstate__(self, state):
-        self._func, self._args, self._keywords, d = state
+        func, args, keywords, d = state
         if d is not None:
             self.__dict__.update(d)
+        self._func = func
+        self._args = args
+        self._keywords = keywords
diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py
--- a/lib_pypy/gdbm.py
+++ b/lib_pypy/gdbm.py
@@ -1,4 +1,6 @@
 import cffi, os, sys
+import thread
+_lock = thread.allocate_lock()
 
 ffi = cffi.FFI()
 ffi.cdef('''
@@ -40,6 +42,7 @@
 
 try:
     verify_code = '''
+    #include <stdlib.h>
     #include "gdbm.h"
 
     static datum pygdbm_fetch(GDBM_FILE gdbm_file, char *dptr, int dsize) {
@@ -86,101 +89,121 @@
     return {'dptr': ffi.new("char[]", key), 'dsize': len(key)}
 
 class gdbm(object):
-    ll_dbm = None
+    __ll_dbm = None
+
+    # All public methods need to acquire the lock; all private methods
+    # assume the lock is already held.  Thus public methods cannot call
+    # other public methods.
 
     def __init__(self, filename, iflags, mode):
-        res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL)
-        self.size = -1
-        if not res:
-            self._raise_from_errno()
-        self.ll_dbm = res
+        with _lock:
+            res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL)
+            self.__size = -1
+            if not res:
+                self.__raise_from_errno()
+            self.__ll_dbm = res
 
     def close(self):
-        if self.ll_dbm:
-            lib.gdbm_close(self.ll_dbm)
-            self.ll_dbm = None
+        with _lock:
+            if self.__ll_dbm:
+                lib.gdbm_close(self.__ll_dbm)
+                self.__ll_dbm = None
 
-    def _raise_from_errno(self):
+    def __raise_from_errno(self):
         if ffi.errno:
             raise error(ffi.errno, os.strerror(ffi.errno))
         raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno))
 
     def __len__(self):
-        if self.size < 0:
-            self.size = len(self.keys())
-        return self.size
+        with _lock:
+            if self.__size < 0:
+                self.__size = len(self.__keys())
+            return self.__size
 
     def __setitem__(self, key, value):
-        self._check_closed()
-        self._size = -1
-        r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value),
-                           lib.GDBM_REPLACE)
-        if r < 0:
-            self._raise_from_errno()
+        with _lock:
+            self.__check_closed()
+            self.__size = -1
+            r = lib.gdbm_store(self.__ll_dbm, _fromstr(key), _fromstr(value),
+                               lib.GDBM_REPLACE)
+            if r < 0:
+                self.__raise_from_errno()
 
     def __delitem__(self, key):
-        self._check_closed()
-        res = lib.gdbm_delete(self.ll_dbm, _fromstr(key))
-        if res < 0:
-            raise KeyError(key)
+        with _lock:
+            self.__check_closed()
+            self.__size = -1
+            res = lib.gdbm_delete(self.__ll_dbm, _fromstr(key))
+            if res < 0:
+                raise KeyError(key)
 
     def __contains__(self, key):
-        self._check_closed()
-        key = _checkstr(key)
-        return lib.pygdbm_exists(self.ll_dbm, key, len(key))
+        with _lock:
+            self.__check_closed()
+            key = _checkstr(key)
+            return lib.pygdbm_exists(self.__ll_dbm, key, len(key))
     has_key = __contains__
 
     def __getitem__(self, key):
-        self._check_closed()
-        key = _checkstr(key)        
-        drec = lib.pygdbm_fetch(self.ll_dbm, key, len(key))
-        if not drec.dptr:
-            raise KeyError(key)
-        res = str(ffi.buffer(drec.dptr, drec.dsize))
-        lib.free(drec.dptr)
-        return res
+        with _lock:
+            self.__check_closed()
+            key = _checkstr(key)
+            drec = lib.pygdbm_fetch(self.__ll_dbm, key, len(key))
+            if not drec.dptr:
+                raise KeyError(key)
+            res = str(ffi.buffer(drec.dptr, drec.dsize))
+            lib.free(drec.dptr)
+            return res
 
-    def keys(self):
-        self._check_closed()
+    def __keys(self):
+        self.__check_closed()
         l = []
-        key = lib.gdbm_firstkey(self.ll_dbm)
+        key = lib.gdbm_firstkey(self.__ll_dbm)
         while key.dptr:
             l.append(str(ffi.buffer(key.dptr, key.dsize)))
-            nextkey = lib.gdbm_nextkey(self.ll_dbm, key)
+            nextkey = lib.gdbm_nextkey(self.__ll_dbm, key)
             lib.free(key.dptr)
             key = nextkey
         return l
 
+    def keys(self):
+        with _lock:
+            return self.__keys()
+
     def firstkey(self):
-        self._check_closed()
-        key = lib.gdbm_firstkey(self.ll_dbm)
-        if key.dptr:
-            res = str(ffi.buffer(key.dptr, key.dsize))
-            lib.free(key.dptr)
-            return res
+        with _lock:
+            self.__check_closed()
+            key = lib.gdbm_firstkey(self.__ll_dbm)
+            if key.dptr:
+                res = str(ffi.buffer(key.dptr, key.dsize))
+                lib.free(key.dptr)
+                return res
 
     def nextkey(self, key):
-        self._check_closed()
-        key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key))
-        if key.dptr:
-            res = str(ffi.buffer(key.dptr, key.dsize))
-            lib.free(key.dptr)
-            return res
+        with _lock:
+            self.__check_closed()
+            key = lib.gdbm_nextkey(self.__ll_dbm, _fromstr(key))
+            if key.dptr:
+                res = str(ffi.buffer(key.dptr, key.dsize))
+                lib.free(key.dptr)
+                return res
 
     def reorganize(self):
-        self._check_closed()
-        if lib.gdbm_reorganize(self.ll_dbm) < 0:
-            self._raise_from_errno()
+        with _lock:
+            self.__check_closed()
+            if lib.gdbm_reorganize(self.__ll_dbm) < 0:
+                self.__raise_from_errno()
 
-    def _check_closed(self):
-        if not self.ll_dbm:
+    def __check_closed(self):
+        if not self.__ll_dbm:
             raise error(0, "GDBM object has already been closed")
 
     __del__ = close
 
     def sync(self):
-        self._check_closed()
-        lib.gdbm_sync(self.ll_dbm)
+        with _lock:
+            self.__check_closed()
+            lib.gdbm_sync(self.__ll_dbm)
 
 def open(filename, flags='r', mode=0666):
     if flags[0] == 'r':
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -74,3 +74,10 @@
 
 .. branch: jit_hint_docs
 Add more detail to @jit.elidable and @jit.promote in rpython/rlib/jit.py
+
+.. branch: remove-frame-debug-attrs
+Remove the debug attributes from frames only used for tracing and replace
+them with a debug object that is created on-demand
+
+.. branch: can_cast
+Implement np.can_cast, np.min_scalar_type and missing dtype comparison 
operations.
diff --git a/pypy/interpreter/executioncontext.py 
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -288,7 +288,6 @@
         # field of all frames, during the loop below.)
         frame = self.gettopframe_nohidden()
         while frame:
-            frame.getorcreatedebug().f_lineno = frame.get_last_lineno()
             if is_being_profiled:
                 frame.getorcreatedebug().is_being_profiled = True
             frame = self.getnextframe_nohidden(frame)
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -34,6 +34,9 @@
     is_being_profiled        = False
     w_locals                 = None
 
+    def __init__(self, pycode):
+        self.f_lineno = pycode.co_firstlineno
+
 class PyFrame(W_Root):
     """Represents a frame for a regular Python function
     that needs to be interpreted.
@@ -106,7 +109,7 @@
 
     def getorcreatedebug(self):
         if self.debugdata is None:
-            self.debugdata = FrameDebugData()
+            self.debugdata = FrameDebugData(self.pycode)
         return self.debugdata
 
     def get_w_f_trace(self):
@@ -822,7 +825,7 @@
         else:
             d = self.getorcreatedebug()
             d.w_f_trace = w_trace
-            d = self.get_last_lineno()
+            d.f_lineno = self.get_last_lineno()
 
     def fdel_f_trace(self, space):
         self.getorcreatedebug().w_f_trace = None
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1015,7 +1015,11 @@
         if w_import is None:
             raise OperationError(space.w_ImportError,
                                  space.wrap("__import__ not found"))
-        w_locals = self.getorcreatedebug().w_locals
+        d = self.getdebug()
+        if d is None:
+            w_locals = None
+        else:
+            w_locals = d.w_locals
         if w_locals is None:            # CPython does this
             w_locals = space.w_None
         w_modulename = space.wrap(modulename)
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -536,7 +536,7 @@
     __objclass__ = GetSetProperty(GetSetProperty.descr_get_objclass),
     __doc__ = interp_attrproperty('doc', cls=GetSetProperty),
     )
-GetSetProperty.typedef.acceptable_as_base_class = False
+assert not GetSetProperty.typedef.acceptable_as_base_class  # no __new__
 
 
 class Member(W_Root):
@@ -590,7 +590,7 @@
     __name__ = interp_attrproperty('name', cls=Member),
     __objclass__ = interp_attrproperty_w('w_cls', cls=Member),
     )
-Member.typedef.acceptable_as_base_class = False
+assert not Member.typedef.acceptable_as_base_class  # no __new__
 
 # ____________________________________________________________
 
@@ -706,7 +706,7 @@
     co_flags = GetSetProperty(fget_co_flags, cls=Code),
     co_consts = GetSetProperty(fget_co_consts, cls=Code),
     )
-Code.typedef.acceptable_as_base_class = False
+assert not Code.typedef.acceptable_as_base_class  # no __new__
 
 BuiltinCode.typedef = TypeDef('builtin-code',
     __reduce__   = interp2app(BuiltinCode.descr__reduce__),
@@ -716,7 +716,7 @@
     co_flags = GetSetProperty(fget_co_flags, cls=BuiltinCode),
     co_consts = GetSetProperty(fget_co_consts, cls=BuiltinCode),
     )
-BuiltinCode.typedef.acceptable_as_base_class = False
+assert not BuiltinCode.typedef.acceptable_as_base_class  # no __new__
 
 
 PyCode.typedef = TypeDef('code',
@@ -761,7 +761,7 @@
     f_locals = GetSetProperty(PyFrame.fget_getdictscope),
     f_globals = interp_attrproperty_w('w_globals', cls=PyFrame),
 )
-PyFrame.typedef.acceptable_as_base_class = False
+assert not PyFrame.typedef.acceptable_as_base_class  # no __new__
 
 Module.typedef = TypeDef("module",
     __new__ = interp2app(Module.descr_module__new__.im_func),
@@ -907,7 +907,7 @@
     tb_lineno = GetSetProperty(PyTraceback.descr_tb_lineno),
     tb_next = interp_attrproperty('next', cls=PyTraceback),
     )
-PyTraceback.typedef.acceptable_as_base_class = False
+assert not PyTraceback.typedef.acceptable_as_base_class  # no __new__
 
 GeneratorIterator.typedef = TypeDef("generator",
     __repr__   = interp2app(GeneratorIterator.descr__repr__),
@@ -929,7 +929,7 @@
     __name__   = GetSetProperty(GeneratorIterator.descr__name__),
     __weakref__ = make_weakref_descr(GeneratorIterator),
 )
-GeneratorIterator.typedef.acceptable_as_base_class = False
+assert not GeneratorIterator.typedef.acceptable_as_base_class  # no __new__
 
 Cell.typedef = TypeDef("cell",
     __cmp__      = interp2app(Cell.descr__cmp__),
@@ -939,17 +939,17 @@
     __setstate__ = interp2app(Cell.descr__setstate__),
     cell_contents= GetSetProperty(Cell.descr__cell_contents, cls=Cell),
 )
-Cell.typedef.acceptable_as_base_class = False
+assert not Cell.typedef.acceptable_as_base_class  # no __new__
 
 Ellipsis.typedef = TypeDef("Ellipsis",
     __repr__ = interp2app(Ellipsis.descr__repr__),
 )
-Ellipsis.typedef.acceptable_as_base_class = False
+assert not Ellipsis.typedef.acceptable_as_base_class  # no __new__
 
 NotImplemented.typedef = TypeDef("NotImplemented",
     __repr__ = interp2app(NotImplemented.descr__repr__),
 )
-NotImplemented.typedef.acceptable_as_base_class = False
+assert not NotImplemented.typedef.acceptable_as_base_class  # no __new__
 
 SuspendedUnroller.typedef = TypeDef("SuspendedUnroller")
-SuspendedUnroller.typedef.acceptable_as_base_class = False
+assert not SuspendedUnroller.typedef.acceptable_as_base_class  # no __new__
diff --git a/pypy/module/_cffi_backend/libraryobj.py 
b/pypy/module/_cffi_backend/libraryobj.py
--- a/pypy/module/_cffi_backend/libraryobj.py
+++ b/pypy/module/_cffi_backend/libraryobj.py
@@ -91,7 +91,7 @@
     read_variable = interp2app(W_Library.read_variable),
     write_variable = interp2app(W_Library.write_variable),
     )
-W_Library.acceptable_as_base_class = False
+W_Library.typedef.acceptable_as_base_class = False
 
 
 @unwrap_spec(filename="str_or_None", flags=int)
diff --git a/pypy/module/_hashlib/interp_hashlib.py 
b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -156,7 +156,7 @@
     block_size=GetSetProperty(W_Hash.get_block_size),
     name=GetSetProperty(W_Hash.get_name),
 )
-W_Hash.acceptable_as_base_class = False
+W_Hash.typedef.acceptable_as_base_class = False
 
 @unwrap_spec(name=str, string='bufferstr')
 def new(space, name, string=''):
diff --git a/pypy/module/micronumpy/__init__.py 
b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -20,8 +20,10 @@
         'concatenate': 'arrayops.concatenate',
         'count_nonzero': 'arrayops.count_nonzero',
         'dot': 'arrayops.dot',
-        'result_type': 'arrayops.result_type',
         'where': 'arrayops.where',
+        'result_type': 'casting.result_type',
+        'can_cast': 'casting.can_cast',
+        'min_scalar_type': 'casting.min_scalar_type',
 
         'set_string_function': 'appbridge.set_string_function',
         'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo',
diff --git a/pypy/module/micronumpy/arrayops.py 
b/pypy/module/micronumpy/arrayops.py
--- a/pypy/module/micronumpy/arrayops.py
+++ b/pypy/module/micronumpy/arrayops.py
@@ -1,13 +1,11 @@
-from rpython.rlib import jit
 from pypy.interpreter.error import OperationError, oefmt
 from pypy.interpreter.gateway import unwrap_spec
 from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \
     constants as NPY
 from pypy.module.micronumpy.base import convert_to_array, W_NDimArray
 from pypy.module.micronumpy.converters import clipmode_converter
-from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \
-    shape_agreement_multiple
-from .boxes import W_GenericBox
+from pypy.module.micronumpy.strides import (
+    Chunk, Chunks, shape_agreement, shape_agreement_multiple)
 
 
 def where(space, w_arr, w_x=None, w_y=None):
@@ -285,28 +283,3 @@
     else:
         loop.diagonal_array(space, arr, out, offset, axis1, axis2, shape)
     return out
-
-
-@jit.unroll_safe
-def result_type(space, __args__):
-    args_w, kw_w = __args__.unpack()
-    if kw_w:
-        raise oefmt(space.w_TypeError, "result_type() takes no keyword 
arguments")
-    if not args_w:
-        raise oefmt(space.w_ValueError, "at least one array or dtype is 
required")
-    result = None
-    for w_arg in args_w:
-        if isinstance(w_arg, W_NDimArray):
-            dtype = w_arg.get_dtype()
-        elif isinstance(w_arg, W_GenericBox) or (
-                space.isinstance_w(w_arg, space.w_int) or
-                space.isinstance_w(w_arg, space.w_float) or
-                space.isinstance_w(w_arg, space.w_complex) or
-                space.isinstance_w(w_arg, space.w_long) or
-                space.isinstance_w(w_arg, space.w_bool)):
-            dtype = ufuncs.find_dtype_for_scalar(space, w_arg)
-        else:
-            dtype = space.interp_w(descriptor.W_Dtype,
-                space.call_function(space.gettypefor(descriptor.W_Dtype), 
w_arg))
-        result = ufuncs.find_binop_result_dtype(space, result, dtype)
-    return result
diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py
--- a/pypy/module/micronumpy/boxes.py
+++ b/pypy/module/micronumpy/boxes.py
@@ -879,4 +879,3 @@
     __new__ = interp2app(W_ObjectBox.descr__new__.im_func),
     __getattr__ = interp2app(W_ObjectBox.descr__getattr__),
 )
-
diff --git a/pypy/module/micronumpy/casting.py 
b/pypy/module/micronumpy/casting.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/micronumpy/casting.py
@@ -0,0 +1,108 @@
+"""Functions and helpers for converting between dtypes"""
+
+from rpython.rlib import jit
+from pypy.interpreter.gateway import unwrap_spec
+from pypy.interpreter.error import oefmt
+
+from pypy.module.micronumpy.base import W_NDimArray, convert_to_array
+from pypy.module.micronumpy import constants as NPY
+from pypy.module.micronumpy.ufuncs import (
+    find_binop_result_dtype, find_dtype_for_scalar)
+from .types import (
+    Bool, ULong, Long, Float64, Complex64, UnicodeType, VoidType, ObjectType)
+from .descriptor import get_dtype_cache, as_dtype, is_scalar_w
+
+@jit.unroll_safe
+def result_type(space, __args__):
+    args_w, kw_w = __args__.unpack()
+    if kw_w:
+        raise oefmt(space.w_TypeError,
+            "result_type() takes no keyword arguments")
+    if not args_w:
+        raise oefmt(space.w_ValueError,
+            "at least one array or dtype is required")
+    result = None
+    for w_arg in args_w:
+        dtype = as_dtype(space, w_arg)
+        result = find_binop_result_dtype(space, result, dtype)
+    return result
+
+@unwrap_spec(casting=str)
+def can_cast(space, w_from, w_totype, casting='safe'):
+    try:
+        target = as_dtype(space, w_totype, allow_None=False)
+    except TypeError:
+        raise oefmt(space.w_TypeError,
+            "did not understand one of the types; 'None' not accepted")
+    if isinstance(w_from, W_NDimArray):
+        return space.wrap(can_cast_array(space, w_from, target, casting))
+    elif is_scalar_w(space, w_from):
+        w_scalar = as_scalar(space, w_from)
+        w_arr = W_NDimArray.from_scalar(space, w_scalar)
+        return space.wrap(can_cast_array(space, w_arr, target, casting))
+
+    try:
+        origin = as_dtype(space, w_from, allow_None=False)
+    except TypeError:
+        raise oefmt(space.w_TypeError,
+            "did not understand one of the types; 'None' not accepted")
+    return space.wrap(can_cast_type(space, origin, target, casting))
+
+kind_ordering = {
+    Bool.kind: 0, ULong.kind: 1, Long.kind: 2,
+    Float64.kind: 4, Complex64.kind: 5,
+    NPY.STRINGLTR: 6, NPY.STRINGLTR2: 6,
+    UnicodeType.kind: 7, VoidType.kind: 8, ObjectType.kind: 9}
+
+def can_cast_type(space, origin, target, casting):
+    # equivalent to PyArray_CanCastTypeTo
+    if casting == 'no':
+        return origin.eq(space, target)
+    elif casting == 'equiv':
+        return origin.num == target.num and origin.elsize == target.elsize
+    elif casting == 'unsafe':
+        return True
+    elif casting == 'same_kind':
+        if origin.can_cast_to(target):
+            return True
+        if origin.kind in kind_ordering and target.kind in kind_ordering:
+            return kind_ordering[origin.kind] <= kind_ordering[target.kind]
+        return False
+    else:
+        return origin.can_cast_to(target)
+
+def can_cast_array(space, w_from, target, casting):
+    # equivalent to PyArray_CanCastArrayTo
+    origin = w_from.get_dtype()
+    if w_from.is_scalar():
+        return can_cast_scalar(
+            space, origin, w_from.get_scalar_value(), target, casting)
+    else:
+        return can_cast_type(space, origin, target, casting)
+
+def can_cast_scalar(space, from_type, value, target, casting):
+    # equivalent to CNumPy's can_cast_scalar_to
+    if from_type == target or casting == 'unsafe':
+        return True
+    if not from_type.is_number() or casting in ('no', 'equiv'):
+        return can_cast_type(space, from_type, target, casting)
+    if not from_type.is_native():
+        value = value.descr_byteswap(space)
+    dtypenum, altnum = value.min_dtype()
+    if target.is_unsigned():
+        dtypenum = altnum
+    dtype = get_dtype_cache(space).dtypes_by_num[dtypenum]
+    return can_cast_type(space, dtype, target, casting)
+
+def as_scalar(space, w_obj):
+    dtype = find_dtype_for_scalar(space, w_obj)
+    return dtype.coerce(space, w_obj)
+
+def min_scalar_type(space, w_a):
+    w_array = convert_to_array(space, w_a)
+    dtype = w_array.get_dtype()
+    if w_array.is_scalar() and dtype.is_number():
+        num, alt_num = w_array.get_scalar_value().min_dtype()
+        return get_dtype_cache(space).dtypes_by_num[num]
+    else:
+        return dtype
diff --git a/pypy/module/micronumpy/descriptor.py 
b/pypy/module/micronumpy/descriptor.py
--- a/pypy/module/micronumpy/descriptor.py
+++ b/pypy/module/micronumpy/descriptor.py
@@ -8,7 +8,9 @@
 from rpython.rlib import jit
 from rpython.rlib.objectmodel import specialize, compute_hash, 
we_are_translated
 from rpython.rlib.rarithmetic import r_longlong, r_ulonglong
-from pypy.module.micronumpy import types, boxes, base, support, constants as 
NPY
+from rpython.rlib.signature import finishsigs, signature, types as ann
+from pypy.module.micronumpy import types, boxes, support, constants as NPY
+from .base import W_NDimArray
 from pypy.module.micronumpy.appbridge import get_appbridge_cache
 from pypy.module.micronumpy.converters import byteorder_converter
 
@@ -36,24 +38,21 @@
         if not space.is_none(w_arr):
             dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype())
     assert dtype is not None
-    out = base.W_NDimArray.from_shape(space, shape, dtype)
+    out = W_NDimArray.from_shape(space, shape, dtype)
     return out
 
 
+_REQ_STRLEN = [0, 3, 5, 10, 10, 20, 20, 20, 20]  # data for can_cast_to()
+
+@finishsigs
 class W_Dtype(W_Root):
     _immutable_fields_ = [
-        "itemtype?", "num", "kind", "char", "w_box_type",
-        "byteorder?", "names?", "fields?", "elsize?", "alignment?",
-        "shape?", "subdtype?", "base?",
-    ]
+        "itemtype?", "w_box_type", "byteorder?", "names?", "fields?",
+        "elsize?", "alignment?", "shape?", "subdtype?", "base?"]
 
-    def __init__(self, itemtype, num, kind, char, w_box_type,
-                 byteorder=None, names=[], fields={},
-                 elsize=None, shape=[], subdtype=None):
+    def __init__(self, itemtype, w_box_type, byteorder=None, names=[],
+                 fields={}, elsize=None, shape=[], subdtype=None):
         self.itemtype = itemtype
-        self.num = num
-        self.kind = kind
-        self.char = char
         self.w_box_type = w_box_type
         if byteorder is None:
             if itemtype.get_element_size() == 1 or isinstance(itemtype, 
types.ObjectType):
@@ -74,6 +73,18 @@
         else:
             self.base = subdtype.base
 
+    @property
+    def num(self):
+        return self.itemtype.num
+
+    @property
+    def kind(self):
+        return self.itemtype.kind
+
+    @property
+    def char(self):
+        return self.itemtype.char
+
     def __repr__(self):
         if self.fields:
             return '<DType %r>' % self.fields
@@ -87,6 +98,41 @@
     def box_complex(self, real, imag):
         return self.itemtype.box_complex(real, imag)
 
+    @signature(ann.self(), ann.self(), returns=ann.bool())
+    def can_cast_to(self, other):
+        # equivalent to PyArray_CanCastTo
+        result = self.itemtype.can_cast_to(other.itemtype)
+        if result:
+            if self.num == NPY.STRING:
+                if other.num == NPY.STRING:
+                    return self.elsize <= other.elsize
+                elif other.num == NPY.UNICODE:
+                    return self.elsize * 4 <= other.elsize
+            elif self.num == NPY.UNICODE and other.num == NPY.UNICODE:
+                return self.elsize <= other.elsize
+            elif other.num in (NPY.STRING, NPY.UNICODE):
+                if other.num == NPY.STRING:
+                    char_size = 1
+                else:  # NPY.UNICODE
+                    char_size = 4
+                if other.elsize == 0:
+                    return True
+                if self.is_bool():
+                    return other.elsize >= 5 * char_size
+                elif self.is_unsigned():
+                    if self.elsize > 8 or self.elsize < 0:
+                        return False
+                    else:
+                        return (other.elsize >=
+                                _REQ_STRLEN[self.elsize] * char_size)
+                elif self.is_signed():
+                    if self.elsize > 8 or self.elsize < 0:
+                        return False
+                    else:
+                        return (other.elsize >=
+                                (_REQ_STRLEN[self.elsize] + 1) * char_size)
+        return result
+
     def coerce(self, space, w_item):
         return self.itemtype.coerce(space, self, w_item)
 
@@ -109,6 +155,9 @@
     def is_complex(self):
         return self.kind == NPY.COMPLEXLTR
 
+    def is_number(self):
+        return self.is_int() or self.is_float() or self.is_complex()
+
     def is_str(self):
         return self.num == NPY.STRING
 
@@ -259,6 +308,22 @@
     def descr_ne(self, space, w_other):
         return space.wrap(not self.eq(space, w_other))
 
+    def descr_le(self, space, w_other):
+        w_other = as_dtype(space, w_other)
+        return space.wrap(self.can_cast_to(w_other))
+
+    def descr_ge(self, space, w_other):
+        w_other = as_dtype(space, w_other)
+        return space.wrap(w_other.can_cast_to(self))
+
+    def descr_lt(self, space, w_other):
+        w_other = as_dtype(space, w_other)
+        return space.wrap(self.can_cast_to(w_other) and not self.eq(space, 
w_other))
+
+    def descr_gt(self, space, w_other):
+        w_other = as_dtype(space, w_other)
+        return space.wrap(w_other.can_cast_to(self) and not self.eq(space, 
w_other))
+
     def _compute_hash(self, space, x):
         from rpython.rlib.rarithmetic import intmask
         if not self.fields and self.subdtype is None:
@@ -450,7 +515,7 @@
         fields = self.fields
         if fields is None:
             fields = {}
-        return W_Dtype(itemtype, self.num, self.kind, self.char,
+        return W_Dtype(itemtype,
                        self.w_box_type, byteorder=endian, elsize=self.elsize,
                        names=self.names, fields=fields,
                        shape=self.shape, subdtype=self.subdtype)
@@ -485,8 +550,7 @@
         fields[fldname] = (offset, subdtype)
         offset += subdtype.elsize
         names.append(fldname)
-    return W_Dtype(types.RecordType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR,
-                   space.gettypefor(boxes.W_VoidBox),
+    return W_Dtype(types.RecordType(space), space.gettypefor(boxes.W_VoidBox),
                    names=names, fields=fields, elsize=offset)
 
 
@@ -553,7 +617,7 @@
         if size == 1:
             return subdtype
         size *= subdtype.elsize
-        return W_Dtype(types.VoidType(space), NPY.VOID, NPY.VOIDLTR, 
NPY.VOIDLTR,
+        return W_Dtype(types.VoidType(space),
                        space.gettypefor(boxes.W_VoidBox),
                        shape=shape, subdtype=subdtype, elsize=size)
 
@@ -630,6 +694,10 @@
 
     __eq__ = interp2app(W_Dtype.descr_eq),
     __ne__ = interp2app(W_Dtype.descr_ne),
+    __lt__ = interp2app(W_Dtype.descr_lt),
+    __le__ = interp2app(W_Dtype.descr_le),
+    __gt__ = interp2app(W_Dtype.descr_gt),
+    __ge__ = interp2app(W_Dtype.descr_ge),
     __hash__ = interp2app(W_Dtype.descr_hash),
     __str__= interp2app(W_Dtype.descr_str),
     __repr__ = interp2app(W_Dtype.descr_repr),
@@ -654,7 +722,10 @@
         except ValueError:
             raise oefmt(space.w_TypeError, "data type not understood")
     if char == NPY.CHARLTR:
-        return new_string_dtype(space, 1, NPY.CHARLTR)
+        return W_Dtype(
+            types.CharType(space),
+            elsize=1,
+            w_box_type=space.gettypefor(boxes.W_StringBox))
     elif char == NPY.STRINGLTR or char == NPY.STRINGLTR2:
         return new_string_dtype(space, size)
     elif char == NPY.UNICODELTR:
@@ -664,13 +735,10 @@
     assert False
 
 
-def new_string_dtype(space, size, char=NPY.STRINGLTR):
+def new_string_dtype(space, size):
     return W_Dtype(
         types.StringType(space),
         elsize=size,
-        num=NPY.STRING,
-        kind=NPY.STRINGLTR,
-        char=char,
         w_box_type=space.gettypefor(boxes.W_StringBox),
     )
 
@@ -680,9 +748,6 @@
     return W_Dtype(
         itemtype,
         elsize=size * itemtype.get_element_size(),
-        num=NPY.UNICODE,
-        kind=NPY.UNICODELTR,
-        char=NPY.UNICODELTR,
         w_box_type=space.gettypefor(boxes.W_UnicodeBox),
     )
 
@@ -691,9 +756,6 @@
     return W_Dtype(
         types.VoidType(space),
         elsize=size,
-        num=NPY.VOID,
-        kind=NPY.VOIDLTR,
-        char=NPY.VOIDLTR,
         w_box_type=space.gettypefor(boxes.W_VoidBox),
     )
 
@@ -702,173 +764,93 @@
     def __init__(self, space):
         self.w_booldtype = W_Dtype(
             types.Bool(space),
-            num=NPY.BOOL,
-            kind=NPY.GENBOOLLTR,
-            char=NPY.BOOLLTR,
             w_box_type=space.gettypefor(boxes.W_BoolBox),
         )
         self.w_int8dtype = W_Dtype(
             types.Int8(space),
-            num=NPY.BYTE,
-            kind=NPY.SIGNEDLTR,
-            char=NPY.BYTELTR,
             w_box_type=space.gettypefor(boxes.W_Int8Box),
         )
         self.w_uint8dtype = W_Dtype(
             types.UInt8(space),
-            num=NPY.UBYTE,
-            kind=NPY.UNSIGNEDLTR,
-            char=NPY.UBYTELTR,
             w_box_type=space.gettypefor(boxes.W_UInt8Box),
         )
         self.w_int16dtype = W_Dtype(
             types.Int16(space),
-            num=NPY.SHORT,
-            kind=NPY.SIGNEDLTR,
-            char=NPY.SHORTLTR,
             w_box_type=space.gettypefor(boxes.W_Int16Box),
         )
         self.w_uint16dtype = W_Dtype(
             types.UInt16(space),
-            num=NPY.USHORT,
-            kind=NPY.UNSIGNEDLTR,
-            char=NPY.USHORTLTR,
             w_box_type=space.gettypefor(boxes.W_UInt16Box),
         )
         self.w_int32dtype = W_Dtype(
             types.Int32(space),
-            num=NPY.INT,
-            kind=NPY.SIGNEDLTR,
-            char=NPY.INTLTR,
             w_box_type=space.gettypefor(boxes.W_Int32Box),
         )
         self.w_uint32dtype = W_Dtype(
             types.UInt32(space),
-            num=NPY.UINT,
-            kind=NPY.UNSIGNEDLTR,
-            char=NPY.UINTLTR,
             w_box_type=space.gettypefor(boxes.W_UInt32Box),
         )
         self.w_longdtype = W_Dtype(
             types.Long(space),
-            num=NPY.LONG,
-            kind=NPY.SIGNEDLTR,
-            char=NPY.LONGLTR,
             w_box_type=space.gettypefor(boxes.W_LongBox),
         )
         self.w_ulongdtype = W_Dtype(
             types.ULong(space),
-            num=NPY.ULONG,
-            kind=NPY.UNSIGNEDLTR,
-            char=NPY.ULONGLTR,
             w_box_type=space.gettypefor(boxes.W_ULongBox),
         )
         self.w_int64dtype = W_Dtype(
             types.Int64(space),
-            num=NPY.LONGLONG,
-            kind=NPY.SIGNEDLTR,
-            char=NPY.LONGLONGLTR,
             w_box_type=space.gettypefor(boxes.W_Int64Box),
         )
         self.w_uint64dtype = W_Dtype(
             types.UInt64(space),
-            num=NPY.ULONGLONG,
-            kind=NPY.UNSIGNEDLTR,
-            char=NPY.ULONGLONGLTR,
             w_box_type=space.gettypefor(boxes.W_UInt64Box),
         )
         self.w_float32dtype = W_Dtype(
             types.Float32(space),
-            num=NPY.FLOAT,
-            kind=NPY.FLOATINGLTR,
-            char=NPY.FLOATLTR,
             w_box_type=space.gettypefor(boxes.W_Float32Box),
         )
         self.w_float64dtype = W_Dtype(
             types.Float64(space),
-            num=NPY.DOUBLE,
-            kind=NPY.FLOATINGLTR,
-            char=NPY.DOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_Float64Box),
         )
         self.w_floatlongdtype = W_Dtype(
             types.FloatLong(space),
-            num=NPY.LONGDOUBLE,
-            kind=NPY.FLOATINGLTR,
-            char=NPY.LONGDOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_FloatLongBox),
         )
         self.w_complex64dtype = W_Dtype(
             types.Complex64(space),
-            num=NPY.CFLOAT,
-            kind=NPY.COMPLEXLTR,
-            char=NPY.CFLOATLTR,
             w_box_type=space.gettypefor(boxes.W_Complex64Box),
         )
         self.w_complex128dtype = W_Dtype(
             types.Complex128(space),
-            num=NPY.CDOUBLE,
-            kind=NPY.COMPLEXLTR,
-            char=NPY.CDOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_Complex128Box),
         )
         self.w_complexlongdtype = W_Dtype(
             types.ComplexLong(space),
-            num=NPY.CLONGDOUBLE,
-            kind=NPY.COMPLEXLTR,
-            char=NPY.CLONGDOUBLELTR,
             w_box_type=space.gettypefor(boxes.W_ComplexLongBox),
         )
         self.w_stringdtype = W_Dtype(
             types.StringType(space),
             elsize=0,
-            num=NPY.STRING,
-            kind=NPY.STRINGLTR,
-            char=NPY.STRINGLTR,
             w_box_type=space.gettypefor(boxes.W_StringBox),
         )
         self.w_unicodedtype = W_Dtype(
             types.UnicodeType(space),
             elsize=0,
-            num=NPY.UNICODE,
-            kind=NPY.UNICODELTR,
-            char=NPY.UNICODELTR,
             w_box_type=space.gettypefor(boxes.W_UnicodeBox),
         )
         self.w_voiddtype = W_Dtype(
             types.VoidType(space),
             elsize=0,
-            num=NPY.VOID,
-            kind=NPY.VOIDLTR,
-            char=NPY.VOIDLTR,
             w_box_type=space.gettypefor(boxes.W_VoidBox),
         )
         self.w_float16dtype = W_Dtype(
             types.Float16(space),
-            num=NPY.HALF,
-            kind=NPY.FLOATINGLTR,
-            char=NPY.HALFLTR,
             w_box_type=space.gettypefor(boxes.W_Float16Box),
         )
-        self.w_intpdtype = W_Dtype(
-            types.Long(space),
-            num=NPY.LONG,
-            kind=NPY.SIGNEDLTR,
-            char=NPY.INTPLTR,
-            w_box_type=space.gettypefor(boxes.W_LongBox),
-        )
-        self.w_uintpdtype = W_Dtype(
-            types.ULong(space),
-            num=NPY.ULONG,
-            kind=NPY.UNSIGNEDLTR,
-            char=NPY.UINTPLTR,
-            w_box_type=space.gettypefor(boxes.W_ULongBox),
-        )
         self.w_objectdtype = W_Dtype(
             types.ObjectType(space),
-            num=NPY.OBJECT,
-            kind=NPY.OBJECTLTR,
-            char=NPY.OBJECTLTR,
             w_box_type=space.gettypefor(boxes.W_ObjectBox),
         )
         aliases = {
@@ -929,7 +911,7 @@
             self.w_int64dtype, self.w_uint64dtype,
             ] + float_dtypes + complex_dtypes + [
             self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype,
-            self.w_intpdtype, self.w_uintpdtype, self.w_objectdtype,
+            self.w_objectdtype,
         ]
         self.float_dtypes_by_num_bytes = sorted(
             (dtype.elsize, dtype)
@@ -970,8 +952,7 @@
             'CLONGDOUBLE': self.w_complexlongdtype,
             #'DATETIME',
             'UINT': self.w_uint32dtype,
-            'INTP': self.w_intpdtype,
-            'UINTP': self.w_uintpdtype,
+            'INTP': self.w_longdtype,
             'HALF': self.w_float16dtype,
             'BYTE': self.w_int8dtype,
             #'TIMEDELTA',
@@ -1001,7 +982,11 @@
             space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v))
         for k, dtype in typeinfo_full.iteritems():
             itembits = dtype.elsize * 8
-            items_w = [space.wrap(dtype.char),
+            if k in ('INTP', 'UINTP'):
+                char = getattr(NPY, k + 'LTR')
+            else:
+                char = dtype.char
+            items_w = [space.wrap(char),
                        space.wrap(dtype.num),
                        space.wrap(itembits),
                        space.wrap(dtype.itemtype.get_element_size())]
@@ -1024,3 +1009,26 @@
 
 def get_dtype_cache(space):
     return space.fromcache(DtypeCache)
+
+def as_dtype(space, w_arg, allow_None=True):
+    from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar
+    # roughly equivalent to CNumPy's PyArray_DescrConverter2
+    if not allow_None and space.is_none(w_arg):
+        raise TypeError("Cannot create dtype from None here")
+    if isinstance(w_arg, W_NDimArray):
+        return w_arg.get_dtype()
+    elif is_scalar_w(space, w_arg):
+        result = find_dtype_for_scalar(space, w_arg)
+        assert result is not None  # XXX: not guaranteed
+        return result
+    else:
+        return space.interp_w(W_Dtype,
+            space.call_function(space.gettypefor(W_Dtype), w_arg))
+
+def is_scalar_w(space, w_arg):
+    return (isinstance(w_arg, boxes.W_GenericBox) or
+            space.isinstance_w(w_arg, space.w_int) or
+            space.isinstance_w(w_arg, space.w_float) or
+            space.isinstance_w(w_arg, space.w_complex) or
+            space.isinstance_w(w_arg, space.w_long) or
+            space.isinstance_w(w_arg, space.w_bool))
diff --git a/pypy/module/micronumpy/test/test_arrayops.py 
b/pypy/module/micronumpy/test/test_arrayops.py
--- a/pypy/module/micronumpy/test/test_arrayops.py
+++ b/pypy/module/micronumpy/test/test_arrayops.py
@@ -199,19 +199,3 @@
         a.put(23, -1, mode=1)  # wrap
         assert (a == array([0, 1, -10, -1, -15])).all()
         raises(TypeError, "arange(5).put(22, -5, mode='zzzz')")  # 
unrecognized mode
-
-    def test_result_type(self):
-        import numpy as np
-        exc = raises(ValueError, np.result_type)
-        assert str(exc.value) == "at least one array or dtype is required"
-        exc = raises(TypeError, np.result_type, a=2)
-        assert str(exc.value) == "result_type() takes no keyword arguments"
-        assert np.result_type(True) is np.dtype('bool')
-        assert np.result_type(1) is np.dtype('int')
-        assert np.result_type(1.) is np.dtype('float64')
-        assert np.result_type(1+2j) is np.dtype('complex128')
-        assert np.result_type(1, 1.) is np.dtype('float64')
-        assert np.result_type(np.array([1, 2])) is np.dtype('int')
-        assert np.result_type(np.array([1, 2]), 1, 1+2j) is 
np.dtype('complex128')
-        assert np.result_type(np.array([1, 2]), 1, 'float64') is 
np.dtype('float64')
-        assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64')
diff --git a/pypy/module/micronumpy/test/test_casting.py 
b/pypy/module/micronumpy/test/test_casting.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/micronumpy/test/test_casting.py
@@ -0,0 +1,121 @@
+from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
+
+
+class AppTestNumSupport(BaseNumpyAppTest):
+    def test_result_type(self):
+        import numpy as np
+        exc = raises(ValueError, np.result_type)
+        assert str(exc.value) == "at least one array or dtype is required"
+        exc = raises(TypeError, np.result_type, a=2)
+        assert str(exc.value) == "result_type() takes no keyword arguments"
+        assert np.result_type(True) is np.dtype('bool')
+        assert np.result_type(1) is np.dtype('int')
+        assert np.result_type(1.) is np.dtype('float64')
+        assert np.result_type(1+2j) is np.dtype('complex128')
+        assert np.result_type(1, 1.) is np.dtype('float64')
+        assert np.result_type(np.array([1, 2])) is np.dtype('int')
+        assert np.result_type(np.array([1, 2]), 1, 1+2j) is 
np.dtype('complex128')
+        assert np.result_type(np.array([1, 2]), 1, 'float64') is 
np.dtype('float64')
+        assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64')
+
+    def test_can_cast(self):
+        import numpy as np
+
+        assert np.can_cast(np.int32, np.int64)
+        assert np.can_cast(np.float64, complex)
+        assert not np.can_cast(np.complex64, float)
+
+        assert np.can_cast('i8', 'f8')
+        assert not np.can_cast('i8', 'f4')
+        assert np.can_cast('i4', 'S11')
+
+        assert np.can_cast('i8', 'i8', 'no')
+        assert not np.can_cast('<i8', '>i8', 'no')
+
+        assert np.can_cast('<i8', '>i8', 'equiv')
+        assert not np.can_cast('<i4', '>i8', 'equiv')
+
+        assert np.can_cast('<i4', '>i8', 'safe')
+        assert not np.can_cast('<i8', '>i4', 'safe')
+
+        assert np.can_cast('<i8', '>i4', 'same_kind')
+        assert not np.can_cast('<i8', '>u4', 'same_kind')
+
+        assert np.can_cast('<i8', '>u4', 'unsafe')
+
+        assert np.can_cast('bool', 'S5')
+        assert not np.can_cast('bool', 'S4')
+
+        assert np.can_cast('b', 'S4')
+        assert not np.can_cast('b', 'S3')
+
+        assert np.can_cast('u1', 'S3')
+        assert not np.can_cast('u1', 'S2')
+        assert np.can_cast('u2', 'S5')
+        assert not np.can_cast('u2', 'S4')
+        assert np.can_cast('u4', 'S10')
+        assert not np.can_cast('u4', 'S9')
+        assert np.can_cast('u8', 'S20')
+        assert not np.can_cast('u8', 'S19')
+
+        assert np.can_cast('i1', 'S4')
+        assert not np.can_cast('i1', 'S3')
+        assert np.can_cast('i2', 'S6')
+        assert not np.can_cast('i2', 'S5')
+        assert np.can_cast('i4', 'S11')
+        assert not np.can_cast('i4', 'S10')
+        assert np.can_cast('i8', 'S21')
+        assert not np.can_cast('i8', 'S20')
+
+        assert np.can_cast('bool', 'S5')
+        assert not np.can_cast('bool', 'S4')
+
+        assert np.can_cast('b', 'U4')
+        assert not np.can_cast('b', 'U3')
+
+        assert np.can_cast('u1', 'U3')
+        assert not np.can_cast('u1', 'U2')
+        assert np.can_cast('u2', 'U5')
+        assert not np.can_cast('u2', 'U4')
+        assert np.can_cast('u4', 'U10')
+        assert not np.can_cast('u4', 'U9')
+        assert np.can_cast('u8', 'U20')
+        assert not np.can_cast('u8', 'U19')
+
+        assert np.can_cast('i1', 'U4')
+        assert not np.can_cast('i1', 'U3')
+        assert np.can_cast('i2', 'U6')
+        assert not np.can_cast('i2', 'U5')
+        assert np.can_cast('i4', 'U11')
+        assert not np.can_cast('i4', 'U10')
+        assert np.can_cast('i8', 'U21')
+        assert not np.can_cast('i8', 'U20')
+
+        raises(TypeError, np.can_cast, 'i4', None)
+        raises(TypeError, np.can_cast, None, 'i4')
+
+    def test_can_cast_scalar(self):
+        import numpy as np
+        assert np.can_cast(True, np.bool_)
+        assert np.can_cast(True, np.int8)
+        assert not np.can_cast(0, np.bool_)
+        assert np.can_cast(127, np.int8)
+        assert not np.can_cast(128, np.int8)
+        assert np.can_cast(128, np.int16)
+
+        assert np.can_cast(np.float32('inf'), np.float32)
+        assert np.can_cast(float('inf'), np.float32)  # XXX: False in CNumPy?!
+        assert np.can_cast(3.3e38, np.float32)
+        assert not np.can_cast(3.4e38, np.float32)
+
+        assert np.can_cast(1 + 2j, np.complex64)
+        assert not np.can_cast(1 + 1e50j, np.complex64)
+        assert np.can_cast(1., np.complex64)
+        assert not np.can_cast(1e50, np.complex64)
+
+    def test_min_scalar_type(self):
+        import numpy as np
+        assert np.min_scalar_type(2**8 - 1) == np.dtype('uint8')
+        assert np.min_scalar_type(2**64 - 1) == np.dtype('uint64')
+        # XXX: np.asarray(2**64) fails with OverflowError
+        # assert np.min_scalar_type(2**64) == np.dtype('O')
diff --git a/pypy/module/micronumpy/test/test_dtypes.py 
b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -112,6 +112,11 @@
         raises(TypeError, lambda: dtype("int8") == 3)
         assert dtype(bool) == bool
 
+    def test_dtype_cmp(self):
+        from numpy import dtype
+        assert dtype('int8') <= dtype('int8')
+        assert not (dtype('int8') < dtype('int8'))
+
     def test_dtype_aliases(self):
         from numpy import dtype
         assert dtype('bool8') is dtype('bool')
@@ -1287,7 +1292,7 @@
         from cPickle import loads, dumps
 
         d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", 
float)])
-        assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '|', None, 
+        assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '|', None,
                      ('x', 'y', 'z', 'value'),
                      {'y': (dtype('int32'), 4), 'x': (dtype('int32'), 0),
                       'z': (dtype('int32'), 8), 'value': (dtype('float64'), 
12),
diff --git a/pypy/module/micronumpy/test/test_ndarray.py 
b/pypy/module/micronumpy/test/test_ndarray.py
--- a/pypy/module/micronumpy/test/test_ndarray.py
+++ b/pypy/module/micronumpy/test/test_ndarray.py
@@ -1830,7 +1830,7 @@
         s = y.swapaxes(0, 1)
         v = s.view(y.__class__)
         assert v.strides == (4, 24)
-    
+
     def test_tolist_scalar(self):
         from numpy import dtype
         int32 = dtype('int32').type
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -1,5 +1,6 @@
 import functools
 import math
+from rpython.rlib.unroll import unrolling_iterable
 from pypy.interpreter.error import OperationError, oefmt
 from pypy.objspace.std.floatobject import float2string
 from pypy.objspace.std.complexobject import str_format
@@ -22,6 +23,7 @@
 from pypy.module.micronumpy import boxes
 from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage, 
V_OBJECTSTORE
 from pypy.module.micronumpy.strides import calc_strides
+from . import constants as NPY
 
 degToRad = math.pi / 180.0
 log2 = math.log(2)
@@ -147,6 +149,14 @@
         else:
             return alloc_raw_storage(size, track_allocation=False, zero=False)
 
+    @classmethod
+    def basesize(cls):
+        return rffi.sizeof(cls.T)
+
+    def can_cast_to(self, other):
+        # equivalent to PyArray_CanCastSafely
+        return casting_table[self.num][other.num]
+
 class Primitive(object):
     _mixin_ = True
 
@@ -339,6 +349,9 @@
 
 class Bool(BaseType, Primitive):
     T = lltype.Bool
+    num = NPY.BOOL
+    kind = NPY.GENBOOLLTR
+    char = NPY.BOOLLTR
     BoxType = boxes.W_BoolBox
     format_code = "?"
 
@@ -431,6 +444,7 @@
 
 class Integer(Primitive):
     _mixin_ = True
+    signed = True
 
     def _base_coerce(self, space, w_item):
         if w_item is None:
@@ -574,33 +588,54 @@
 
 class Int8(BaseType, Integer):
     T = rffi.SIGNEDCHAR
+    num = NPY.BYTE
+    kind = NPY.SIGNEDLTR
+    char = NPY.BYTELTR
     BoxType = boxes.W_Int8Box
     format_code = "b"
 
 class UInt8(BaseType, Integer):
     T = rffi.UCHAR
+    num = NPY.UBYTE
+    kind = NPY.UNSIGNEDLTR
+    char = NPY.UBYTELTR
     BoxType = boxes.W_UInt8Box
     format_code = "B"
+    signed = False
 
 class Int16(BaseType, Integer):
     T = rffi.SHORT
+    num = NPY.SHORT
+    kind = NPY.SIGNEDLTR
+    char = NPY.SHORTLTR
     BoxType = boxes.W_Int16Box
     format_code = "h"
 
 class UInt16(BaseType, Integer):
     T = rffi.USHORT
+    num = NPY.USHORT
+    kind = NPY.UNSIGNEDLTR
+    char = NPY.USHORTLTR
     BoxType = boxes.W_UInt16Box
     format_code = "H"
+    signed = False
 
 class Int32(BaseType, Integer):
     T = rffi.INT
+    num = NPY.INT
+    kind = NPY.SIGNEDLTR
+    char = NPY.INTLTR
     BoxType = boxes.W_Int32Box
     format_code = "i"
 
 class UInt32(BaseType, Integer):
     T = rffi.UINT
+    num = NPY.UINT
+    kind = NPY.UNSIGNEDLTR
+    char = NPY.UINTLTR
     BoxType = boxes.W_UInt32Box
     format_code = "I"
+    signed = False
 
 def _int64_coerce(self, space, w_item):
     try:
@@ -617,6 +652,9 @@
 
 class Int64(BaseType, Integer):
     T = rffi.LONGLONG
+    num = NPY.LONGLONG
+    kind = NPY.SIGNEDLTR
+    char = NPY.LONGLONGLTR
     BoxType = boxes.W_Int64Box
     format_code = "q"
 
@@ -638,13 +676,20 @@
 
 class UInt64(BaseType, Integer):
     T = rffi.ULONGLONG
+    num = NPY.ULONGLONG
+    kind = NPY.UNSIGNEDLTR
+    char = NPY.ULONGLONGLTR
     BoxType = boxes.W_UInt64Box
     format_code = "Q"
+    signed = False
 
     _coerce = func_with_new_name(_uint64_coerce, '_coerce')
 
 class Long(BaseType, Integer):
     T = rffi.LONG
+    num = NPY.LONG
+    kind = NPY.SIGNEDLTR
+    char = NPY.LONGLTR
     BoxType = boxes.W_LongBox
     format_code = "l"
 
@@ -663,8 +708,12 @@
 
 class ULong(BaseType, Integer):
     T = rffi.ULONG
+    num = NPY.ULONG
+    kind = NPY.UNSIGNEDLTR
+    char = NPY.ULONGLTR
     BoxType = boxes.W_ULongBox
     format_code = "L"
+    signed = False
 
     _coerce = func_with_new_name(_ulong_coerce, '_coerce')
 
@@ -999,7 +1048,11 @@
 class Float16(BaseType, Float):
     _STORAGE_T = rffi.USHORT
     T = rffi.SHORT
+    num = NPY.HALF
+    kind = NPY.FLOATINGLTR
+    char = NPY.HALFLTR
     BoxType = boxes.W_Float16Box
+    max_value = 65000.
 
     @specialize.argtype(1)
     def box(self, value):
@@ -1039,13 +1092,21 @@
 
 class Float32(BaseType, Float):
     T = rffi.FLOAT
+    num = NPY.FLOAT
+    kind = NPY.FLOATINGLTR
+    char = NPY.FLOATLTR
     BoxType = boxes.W_Float32Box
     format_code = "f"
+    max_value = 3.4e38
 
 class Float64(BaseType, Float):
     T = rffi.DOUBLE
+    num = NPY.DOUBLE
+    kind = NPY.FLOATINGLTR
+    char = NPY.DOUBLELTR
     BoxType = boxes.W_Float64Box
     format_code = "d"
+    max_value = 1.7e308
 
 class ComplexFloating(object):
     _mixin_ = True
@@ -1641,28 +1702,46 @@
 
 class Complex64(ComplexFloating, BaseType):
     T = rffi.FLOAT
+    num = NPY.CFLOAT
+    kind = NPY.COMPLEXLTR
+    char = NPY.CFLOATLTR
     BoxType = boxes.W_Complex64Box
     ComponentBoxType = boxes.W_Float32Box
+    ComponentType = Float32
 
 class Complex128(ComplexFloating, BaseType):
     T = rffi.DOUBLE
+    num = NPY.CDOUBLE
+    kind = NPY.COMPLEXLTR
+    char = NPY.CDOUBLELTR
     BoxType = boxes.W_Complex128Box
     ComponentBoxType = boxes.W_Float64Box
+    ComponentType = Float64
 
 if boxes.long_double_size == 8:
     class FloatLong(BaseType, Float):
         T = rffi.DOUBLE
+        num = NPY.LONGDOUBLE
+        kind = NPY.FLOATINGLTR
+        char = NPY.LONGDOUBLELTR
         BoxType = boxes.W_FloatLongBox
         format_code = "d"
 
     class ComplexLong(ComplexFloating, BaseType):
         T = rffi.DOUBLE
+        num = NPY.CLONGDOUBLE
+        kind = NPY.COMPLEXLTR
+        char = NPY.CLONGDOUBLELTR
         BoxType = boxes.W_ComplexLongBox
         ComponentBoxType = boxes.W_FloatLongBox
+        ComponentType = FloatLong
 
 elif boxes.long_double_size in (12, 16):
     class FloatLong(BaseType, Float):
         T = rffi.LONGDOUBLE
+        num = NPY.LONGDOUBLE
+        kind = NPY.FLOATINGLTR
+        char = NPY.LONGDOUBLELTR
         BoxType = boxes.W_FloatLongBox
 
         def runpack_str(self, space, s):
@@ -1680,13 +1759,20 @@
 
     class ComplexLong(ComplexFloating, BaseType):
         T = rffi.LONGDOUBLE
+        num = NPY.CLONGDOUBLE
+        kind = NPY.COMPLEXLTR
+        char = NPY.CLONGDOUBLELTR
         BoxType = boxes.W_ComplexLongBox
         ComponentBoxType = boxes.W_FloatLongBox
+        ComponentType = FloatLong
 
 _all_objs_for_tests = [] # for tests
 
 class ObjectType(Primitive, BaseType):
     T = lltype.Signed
+    num = NPY.OBJECT
+    kind = NPY.OBJECTLTR
+    char = NPY.OBJECTLTR
     BoxType = boxes.W_ObjectBox
 
     def get_element_size(self):
@@ -1747,7 +1833,7 @@
         else:
             raise oefmt(self.space.w_NotImplementedError,
                 "object dtype cannot unbox %s", str(box))
-            
+
     @specialize.argtype(1)
     def box(self, w_obj):
         if isinstance(w_obj, W_Root):
@@ -1998,6 +2084,9 @@
 
 class StringType(FlexibleType):
     T = lltype.Char
+    num = NPY.STRING
+    kind = NPY.STRINGLTR
+    char = NPY.STRINGLTR
 
     @jit.unroll_safe
     def coerce(self, space, dtype, w_item):
@@ -2099,6 +2188,9 @@
 
 class UnicodeType(FlexibleType):
     T = lltype.Char
+    num = NPY.UNICODE
+    kind = NPY.UNICODELTR
+    char = NPY.UNICODELTR
 
     def get_element_size(self):
         return 4  # always UTF-32
@@ -2163,6 +2255,9 @@
 
 class VoidType(FlexibleType):
     T = lltype.Char
+    num = NPY.VOID
+    kind = NPY.VOIDLTR
+    char = NPY.VOIDLTR
 
     def _coerce(self, space, arr, ofs, dtype, w_items, shape):
         # TODO: Make sure the shape and the array match
@@ -2247,8 +2342,14 @@
                     "item() for Void aray with no fields not implemented"))
         return space.newtuple(ret_unwrapped)
 
+class CharType(StringType):
+    char = NPY.CHARLTR
+
 class RecordType(FlexibleType):
     T = lltype.Char
+    num = NPY.VOID
+    kind = NPY.VOIDLTR
+    char = NPY.VOIDLTR
 
     def read(self, arr, i, offset, dtype=None):
         if dtype is None:
@@ -2366,8 +2467,11 @@
 del tp
 
 all_float_types = []
+float_types = []
 all_int_types = []
+int_types = []
 all_complex_types = []
+complex_types = []
 
 def _setup():
     # compute alignment
@@ -2376,9 +2480,168 @@
             tp.alignment = 
widen(clibffi.cast_type_to_ffitype(tp.T).c_alignment)
             if issubclass(tp, Float):
                 all_float_types.append((tp, 'float'))
+                float_types.append(tp)
             if issubclass(tp, Integer):
                 all_int_types.append((tp, 'int'))
+                int_types.append(tp)
             if issubclass(tp, ComplexFloating):
                 all_complex_types.append((tp, 'complex'))
+                complex_types.append(tp)
 _setup()
 del _setup
+
+casting_table = [[False] * NPY.NTYPES for _ in range(NPY.NTYPES)]
+number_types = int_types + float_types + complex_types
+all_types = number_types + [ObjectType, StringType, UnicodeType, VoidType]
+
+def enable_cast(type1, type2):
+    casting_table[type1.num][type2.num] = True
+
+for tp in all_types:
+    enable_cast(tp, tp)
+    if tp.num != NPY.DATETIME:
+        enable_cast(Bool, tp)
+    enable_cast(tp, ObjectType)
+    enable_cast(tp, VoidType)
+enable_cast(StringType, UnicodeType)
+#enable_cast(Bool, TimeDelta)
+
+for tp in number_types:
+    enable_cast(tp, StringType)
+    enable_cast(tp, UnicodeType)
+
+for tp1 in int_types:
+    for tp2 in int_types:
+        if tp1.signed:
+            if tp2.signed and tp1.basesize() <= tp2.basesize():
+                enable_cast(tp1, tp2)
+        else:
+            if tp2.signed and tp1.basesize() < tp2.basesize():
+                enable_cast(tp1, tp2)
+            elif not tp2.signed and tp1.basesize() <= tp2.basesize():
+                enable_cast(tp1, tp2)
+for tp1 in int_types:
+    for tp2 in float_types + complex_types:
+        size1 = tp1.basesize()
+        size2 = tp2.basesize()
+        if (size1 < 8 and size2 > size1) or (size1 >= 8 and size2 >= size1):
+            enable_cast(tp1, tp2)
+for tp1 in float_types:
+    for tp2 in float_types + complex_types:
+        if tp1.basesize() <= tp2.basesize():
+            enable_cast(tp1, tp2)
+for tp1 in complex_types:
+    for tp2 in complex_types:
+        if tp1.basesize() <= tp2.basesize():
+            enable_cast(tp1, tp2)
+
+_int_types = [(Int8, UInt8), (Int16, UInt16), (Int32, UInt32),
+        (Int64, UInt64), (Long, ULong)]
+for Int_t, UInt_t in _int_types:
+    Int_t.Unsigned = UInt_t
+    UInt_t.Signed = Int_t
+    size = rffi.sizeof(Int_t.T)
+    Int_t.min_value = rffi.cast(Int_t.T, -1) << (8*size - 1)
+    Int_t.max_value = ~Int_t.min_value
+    UInt_t.max_value = ~rffi.cast(UInt_t.T, 0)
+
+
+signed_types = [Int8, Int16, Int32, Int64, Long]
+
+def make_integer_min_dtype(Int_t, UInt_t):
+    smaller_types = [tp for tp in signed_types
+            if rffi.sizeof(tp.T) < rffi.sizeof(Int_t.T)]
+    smaller_types = unrolling_iterable(
+            [(tp, tp.Unsigned) for tp in smaller_types])
+    def min_dtype(self):
+        value = rffi.cast(UInt64.T, self.value)
+        for Small, USmall in smaller_types:
+            signed_max = rffi.cast(UInt64.T, Small.max_value)
+            unsigned_max = rffi.cast(UInt64.T, USmall.max_value)
+            if value <= unsigned_max:
+                if value <= signed_max:
+                    return Small.num, USmall.num
+                else:
+                    return USmall.num, USmall.num
+        if value <= rffi.cast(UInt64.T, Int_t.max_value):
+            return Int_t.num, UInt_t.num
+        else:
+            return UInt_t.num, UInt_t.num
+    UInt_t.BoxType.min_dtype = min_dtype
+
+    def min_dtype(self):
+        value = rffi.cast(Int64.T, self.value)
+        if value >= 0:
+            for Small, USmall in smaller_types:
+                signed_max = rffi.cast(Int64.T, Small.max_value)
+                unsigned_max = rffi.cast(Int64.T, USmall.max_value)
+                if value <= unsigned_max:
+                    if value <= signed_max:
+                        return Small.num, USmall.num
+                    else:
+                        return USmall.num, USmall.num
+            return Int_t.num, UInt_t.num
+        else:
+            for Small, USmall in smaller_types:
+                signed_min = rffi.cast(Int64.T, Small.min_value)
+                if value >= signed_min:
+                        return Small.num, Small.num
+            return Int_t.num, Int_t.num
+    Int_t.BoxType.min_dtype = min_dtype
+
+for Int_t in signed_types:
+    UInt_t = Int_t.Unsigned
+    make_integer_min_dtype(Int_t, UInt_t)
+
+
+smaller_float_types = {
+    Float16: [], Float32: [Float16], Float64: [Float16, Float32],
+    FloatLong: [Float16, Float32, Float64]}
+
+def make_float_min_dtype(Float_t):
+    smaller_types = unrolling_iterable(smaller_float_types[Float_t])
+    smallest_type = Float16
+
+    def min_dtype(self):
+        value = float(self.value)
+        if not rfloat.isfinite(value):
+            tp = smallest_type
+        else:
+            for SmallFloat in smaller_types:
+                if -SmallFloat.max_value < value < SmallFloat.max_value:
+                    tp = SmallFloat
+                    break
+            else:
+                tp = Float_t
+        return tp.num, tp.num
+    Float_t.BoxType.min_dtype = min_dtype
+
+for Float_t in float_types:
+    make_float_min_dtype(Float_t)
+
+smaller_complex_types = {
+    Complex64: [], Complex128: [Complex64],
+    ComplexLong: [Complex64, Complex128]}
+
+def make_complex_min_dtype(Complex_t):
+    smaller_types = unrolling_iterable(smaller_complex_types[Complex_t])
+
+    def min_dtype(self):
+        real, imag = float(self.real), float(self.imag)
+        for CSmall in smaller_types:
+            max_value = CSmall.ComponentType.max_value
+
+            if -max_value < real < max_value and -max_value < imag < max_value:
+                tp = CSmall
+                break
+        else:
+            tp = Complex_t
+        return tp.num, tp.num
+    Complex_t.BoxType.min_dtype = min_dtype
+
+for Complex_t in complex_types:
+    make_complex_min_dtype(Complex_t)
+
+def min_dtype(self):
+    return Bool.num, Bool.num
+Bool.BoxType.min_dtype = min_dtype
diff --git a/pypy/module/pypyjit/interp_resop.py 
b/pypy/module/pypyjit/interp_resop.py
--- a/pypy/module/pypyjit/interp_resop.py
+++ b/pypy/module/pypyjit/interp_resop.py
@@ -245,7 +245,7 @@
                             WrappedOp.descr_setresult),
     offset = interp_attrproperty("offset", cls=WrappedOp),
 )
-WrappedOp.acceptable_as_base_class = False
+WrappedOp.typedef.acceptable_as_base_class = False
 
 DebugMergePoint.typedef = TypeDef(
     'DebugMergePoint', WrappedOp.typedef,
@@ -266,7 +266,7 @@
                      doc="Name of the jitdriver 'pypyjit' in the case "
                                     "of the main interpreter loop"),
 )
-DebugMergePoint.acceptable_as_base_class = False
+DebugMergePoint.typedef.acceptable_as_base_class = False
 
 
 class W_JitLoopInfo(W_Root):
@@ -359,7 +359,7 @@
                                   doc="Length of machine code"),
     __repr__ = interp2app(W_JitLoopInfo.descr_repr),
 )
-W_JitLoopInfo.acceptable_as_base_class = False
+W_JitLoopInfo.typedef.acceptable_as_base_class = False
 
 
 class W_JitInfoSnapshot(W_Root):
@@ -379,7 +379,7 @@
                                             cls=W_JitInfoSnapshot,
                                             doc="various JIT timers")
 )
-W_JitInfoSnapshot.acceptable_as_base_class = False
+W_JitInfoSnapshot.typedef.acceptable_as_base_class = False
 
 def get_stats_snapshot(space):
     """ Get the jit status in the specific moment in time. Note that this
diff --git a/pypy/module/test_lib_pypy/test_functools.py 
b/pypy/module/test_lib_pypy/test_functools.py
--- a/pypy/module/test_lib_pypy/test_functools.py
+++ b/pypy/module/test_lib_pypy/test_functools.py
@@ -6,8 +6,10 @@
 def test_partial_reduce():
     partial = _functools.partial(test_partial_reduce)
     state = partial.__reduce__()
+    d = state[2][2]
     assert state == (type(partial), (test_partial_reduce,),
-                     (test_partial_reduce, (), None, None))
+                     (test_partial_reduce, (), d, None))
+    assert d is None or d == {}      # both are acceptable
 
 def test_partial_setstate():
     partial = _functools.partial(object)
@@ -30,3 +32,15 @@
     assert str(exc.value) == "a partial object's dictionary may not be deleted"
     with pytest.raises(AttributeError):
         del partial.zzz
+
+def test_self_keyword():
+    partial = _functools.partial(dict, self=42)
+    assert partial(other=43) == {'self': 42, 'other': 43}
+
+def test_no_keywords():
+    kw1 = _functools.partial(dict).keywords
+    kw2 = _functools.partial(dict, **{}).keywords
+    # CPython gives different results for these two cases, which is not
+    # possible to emulate in pure Python; see issue #2043
+    assert kw1 == {} or kw1 is None
+    assert kw2 == {}
diff --git a/pypy/module/test_lib_pypy/test_gdbm_extra.py 
b/pypy/module/test_lib_pypy/test_gdbm_extra.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/test_lib_pypy/test_gdbm_extra.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+import py
+from rpython.tool.udir import udir
+try:
+    from lib_pypy import gdbm
+except ImportError, e:
+    py.test.skip(e)
+
+def test_len():
+    path = str(udir.join('test_gdbm_extra'))
+    g = gdbm.open(path, 'c')
+    g['abc'] = 'def'
+    assert len(g) == 1
+    g['bcd'] = 'efg'
+    assert len(g) == 2
+    del g['abc']
+    assert len(g) == 1
diff --git a/rpython/jit/metainterp/blackhole.py 
b/rpython/jit/metainterp/blackhole.py
--- a/rpython/jit/metainterp/blackhole.py
+++ b/rpython/jit/metainterp/blackhole.py
@@ -1225,32 +1225,39 @@
 
     @arguments("cpu", "r", "i", "d", "d", returns="i")
     def bhimpl_getarrayitem_vable_i(cpu, vable, index, fielddescr, arraydescr):
+        fielddescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fielddescr)
         return cpu.bh_getarrayitem_gc_i(array, index, arraydescr)
     @arguments("cpu", "r", "i", "d", "d", returns="r")
     def bhimpl_getarrayitem_vable_r(cpu, vable, index, fielddescr, arraydescr):
+        fielddescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fielddescr)
         return cpu.bh_getarrayitem_gc_r(array, index, arraydescr)
     @arguments("cpu", "r", "i", "d", "d", returns="f")
     def bhimpl_getarrayitem_vable_f(cpu, vable, index, fielddescr, arraydescr):
+        fielddescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fielddescr)
         return cpu.bh_getarrayitem_gc_f(array, index, arraydescr)
 
     @arguments("cpu", "r", "i", "i", "d", "d")
     def bhimpl_setarrayitem_vable_i(cpu, vable, index, newval, fdescr, adescr):
+        fdescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fdescr)
         cpu.bh_setarrayitem_gc_i(array, index, newval, adescr)
     @arguments("cpu", "r", "i", "r", "d", "d")
     def bhimpl_setarrayitem_vable_r(cpu, vable, index, newval, fdescr, adescr):
+        fdescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fdescr)
         cpu.bh_setarrayitem_gc_r(array, index, newval, adescr)
     @arguments("cpu", "r", "i", "f", "d", "d")
     def bhimpl_setarrayitem_vable_f(cpu, vable, index, newval, fdescr, adescr):
+        fdescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fdescr)
         cpu.bh_setarrayitem_gc_f(array, index, newval, adescr)
 
     @arguments("cpu", "r", "d", "d", returns="i")
     def bhimpl_arraylen_vable(cpu, vable, fdescr, adescr):
+        fdescr.get_vinfo().clear_vable_token(vable)
         array = cpu.bh_getfield_gc_r(vable, fdescr)
         return cpu.bh_arraylen_gc(array, adescr)
 
@@ -1288,9 +1295,20 @@
     bhimpl_getfield_gc_r_pure = bhimpl_getfield_gc_r
     bhimpl_getfield_gc_f_pure = bhimpl_getfield_gc_f
 
-    bhimpl_getfield_vable_i = bhimpl_getfield_gc_i
-    bhimpl_getfield_vable_r = bhimpl_getfield_gc_r
-    bhimpl_getfield_vable_f = bhimpl_getfield_gc_f
+    @arguments("cpu", "r", "d", returns="i")
+    def bhimpl_getfield_vable_i(cpu, struct, fielddescr):
+        fielddescr.get_vinfo().clear_vable_token(struct)
+        return cpu.bh_getfield_gc_i(struct, fielddescr)
+
+    @arguments("cpu", "r", "d", returns="r")
+    def bhimpl_getfield_vable_r(cpu, struct, fielddescr):
+        fielddescr.get_vinfo().clear_vable_token(struct)
+        return cpu.bh_getfield_gc_r(struct, fielddescr)
+
+    @arguments("cpu", "r", "d", returns="f")
+    def bhimpl_getfield_vable_f(cpu, struct, fielddescr):
+        fielddescr.get_vinfo().clear_vable_token(struct)
+        return cpu.bh_getfield_gc_f(struct, fielddescr)
 
     bhimpl_getfield_gc_i_greenfield = bhimpl_getfield_gc_i
     bhimpl_getfield_gc_r_greenfield = bhimpl_getfield_gc_r
@@ -1321,9 +1339,18 @@
     def bhimpl_setfield_gc_f(cpu, struct, newvalue, fielddescr):
         cpu.bh_setfield_gc_f(struct, newvalue, fielddescr)
 
-    bhimpl_setfield_vable_i = bhimpl_setfield_gc_i
-    bhimpl_setfield_vable_r = bhimpl_setfield_gc_r
-    bhimpl_setfield_vable_f = bhimpl_setfield_gc_f
+    @arguments("cpu", "r", "i", "d")
+    def bhimpl_setfield_vable_i(cpu, struct, newvalue, fielddescr):
+        fielddescr.get_vinfo().clear_vable_token(struct)
+        cpu.bh_setfield_gc_i(struct, newvalue, fielddescr)
+    @arguments("cpu", "r", "r", "d")
+    def bhimpl_setfield_vable_r(cpu, struct, newvalue, fielddescr):
+        fielddescr.get_vinfo().clear_vable_token(struct)
+        cpu.bh_setfield_gc_r(struct, newvalue, fielddescr)
+    @arguments("cpu", "r", "f", "d")
+    def bhimpl_setfield_vable_f(cpu, struct, newvalue, fielddescr):
+        fielddescr.get_vinfo().clear_vable_token(struct)
+        cpu.bh_setfield_gc_f(struct, newvalue, fielddescr)
 
     @arguments("cpu", "i", "i", "d")
     def bhimpl_setfield_raw_i(cpu, struct, newvalue, fielddescr):
diff --git a/rpython/jit/metainterp/test/test_virtualizable.py 
b/rpython/jit/metainterp/test/test_virtualizable.py
--- a/rpython/jit/metainterp/test/test_virtualizable.py
+++ b/rpython/jit/metainterp/test/test_virtualizable.py
@@ -1701,6 +1701,78 @@
         res = self.meta_interp(f, [], listops=True)
         assert res == 0
 
+    def test_tracing_sees_nonstandard_vable_twice(self):
+        # This test might fall we try to remove heapcache.clear_caches()'s
+        # call to reset_keep_likely_virtuals() for CALL_MAY_FORCE, and doing
+        # so, we forget to clean up the "nonstandard_virtualizable" fields.
+
+        class A:
+            _virtualizable_ = ['x']
+            @dont_look_inside
+            def __init__(self, x):
+                self.x = x
+            def check(self, expected_x):
+                if self.x != expected_x:
+                    raise ValueError
+
+        driver1 = JitDriver(greens=[], reds=['a'], virtualizables=['a'])
+        driver2 = JitDriver(greens=[], reds=['i'])
+
+        def f(a):
+            while a.x > 0:
+                driver1.jit_merge_point(a=a)
+                a.x -= 1
+
+        def main():
+            i = 10
+            while i > 0:
+                driver2.jit_merge_point(i=i)
+                a = A(10)
+                a.check(10)    # first time, 'a' has got no vable_token
+                f(a)
+                a.check(0)     # second time, the same 'a' has got one!
+                i -= 1
+            return 42
+
+        res = self.meta_interp(main, [], listops=True)
+        assert res == 42
+
+    def test_blackhole_should_also_force_virtualizables(self):
+        class A:
+            _virtualizable_ = ['x']
+            def __init__(self, x):
+                self.x = x
+
+        driver1 = JitDriver(greens=[], reds=['a'], virtualizables=['a'])
+        driver2 = JitDriver(greens=[], reds=['i'])
+
+        def f(a):
+            while a.x > 0:
+                driver1.jit_merge_point(a=a)
+                a.x -= 1
+
+        def main():
+            i = 10
+            while i > 0:
+                driver2.jit_merge_point(i=i)
+                a = A(10)
+                f(a)
+                # The interesting case is i==2.  We're running the rest of
+                # this function in the blackhole interp, because of this:
+                if i == 2:
+                    pass
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to