Author: Armin Rigo <ar...@tunes.org> Branch: cffi-1.0 Changeset: r77272:59750d4ad33f Date: 2015-05-10 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/59750d4ad33f/
Log: hg merge default diff too long, truncating to 2000 out of 3596 lines diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -276,7 +276,11 @@ if argtypes: args = [argtype._CData_retval(argtype.from_address(arg)._buffer) for argtype, arg in zip(argtypes, args)] - return to_call(*args) + try: + return to_call(*args) + except SystemExit, e: + handle_system_exit(e) + raise return f def __call__(self, *args, **kwargs): @@ -305,7 +309,11 @@ except (UnicodeError, TypeError, ValueError), e: raise ArgumentError(str(e)) try: - res = self.callable(*newargs) + try: + res = self.callable(*newargs) + except SystemExit, e: + handle_system_exit(e) + raise except: exc_info = sys.exc_info() traceback.print_tb(exc_info[2], file=sys.stderr) @@ -715,3 +723,22 @@ make_fastpath_subclass.memo[CFuncPtr] = CFuncPtrFast return CFuncPtrFast make_fastpath_subclass.memo = {} + + +def handle_system_exit(e): + # issue #1194: if we get SystemExit here, then exit the interpreter. + # Highly obscure imho but some people seem to depend on it. + if sys.flags.inspect: + return # Don't exit if -i flag was given. + else: + code = e.code + if isinstance(code, int): + exitcode = code + else: + f = getattr(sys, 'stderr', None) + if f is None: + f = sys.__stderr__ + print >> f, code + exitcode = 1 + + _rawffi.exit(exitcode) diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py --- a/lib_pypy/_functools.py +++ b/lib_pypy/_functools.py @@ -8,16 +8,16 @@ partial(func, *args, **keywords) - new function with partial application of the given arguments and keywords. """ - - def __init__(self, *args, **keywords): - if not args: - raise TypeError('__init__() takes at least 2 arguments (1 given)') - func, args = args[0], args[1:] + def __init__(*args, **keywords): + if len(args) < 2: + raise TypeError('__init__() takes at least 2 arguments (%d given)' + % len(args)) + self, func, args = args[0], args[1], args[2:] if not callable(func): raise TypeError("the first argument must be callable") self._func = func self._args = args - self._keywords = keywords or None + self._keywords = keywords def __delattr__(self, key): if key == '__dict__': @@ -37,19 +37,22 @@ return self._keywords def __call__(self, *fargs, **fkeywords): - if self.keywords is not None: - fkeywords = dict(self.keywords, **fkeywords) - return self.func(*(self.args + fargs), **fkeywords) + if self._keywords: + fkeywords = dict(self._keywords, **fkeywords) + return self._func(*(self._args + fargs), **fkeywords) def __reduce__(self): d = dict((k, v) for k, v in self.__dict__.iteritems() if k not in ('_func', '_args', '_keywords')) if len(d) == 0: d = None - return (type(self), (self.func,), - (self.func, self.args, self.keywords, d)) + return (type(self), (self._func,), + (self._func, self._args, self._keywords, d)) def __setstate__(self, state): - self._func, self._args, self._keywords, d = state + func, args, keywords, d = state if d is not None: self.__dict__.update(d) + self._func = func + self._args = args + self._keywords = keywords diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py --- a/lib_pypy/gdbm.py +++ b/lib_pypy/gdbm.py @@ -1,4 +1,6 @@ import cffi, os, sys +import thread +_lock = thread.allocate_lock() ffi = cffi.FFI() ffi.cdef(''' @@ -40,6 +42,7 @@ try: verify_code = ''' + #include <stdlib.h> #include "gdbm.h" static datum pygdbm_fetch(GDBM_FILE gdbm_file, char *dptr, int dsize) { @@ -86,101 +89,121 @@ return {'dptr': ffi.new("char[]", key), 'dsize': len(key)} class gdbm(object): - ll_dbm = None + __ll_dbm = None + + # All public methods need to acquire the lock; all private methods + # assume the lock is already held. Thus public methods cannot call + # other public methods. def __init__(self, filename, iflags, mode): - res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) - self.size = -1 - if not res: - self._raise_from_errno() - self.ll_dbm = res + with _lock: + res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL) + self.__size = -1 + if not res: + self.__raise_from_errno() + self.__ll_dbm = res def close(self): - if self.ll_dbm: - lib.gdbm_close(self.ll_dbm) - self.ll_dbm = None + with _lock: + if self.__ll_dbm: + lib.gdbm_close(self.__ll_dbm) + self.__ll_dbm = None - def _raise_from_errno(self): + def __raise_from_errno(self): if ffi.errno: raise error(ffi.errno, os.strerror(ffi.errno)) raise error(lib.gdbm_errno, lib.gdbm_strerror(lib.gdbm_errno)) def __len__(self): - if self.size < 0: - self.size = len(self.keys()) - return self.size + with _lock: + if self.__size < 0: + self.__size = len(self.__keys()) + return self.__size def __setitem__(self, key, value): - self._check_closed() - self._size = -1 - r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value), - lib.GDBM_REPLACE) - if r < 0: - self._raise_from_errno() + with _lock: + self.__check_closed() + self.__size = -1 + r = lib.gdbm_store(self.__ll_dbm, _fromstr(key), _fromstr(value), + lib.GDBM_REPLACE) + if r < 0: + self.__raise_from_errno() def __delitem__(self, key): - self._check_closed() - res = lib.gdbm_delete(self.ll_dbm, _fromstr(key)) - if res < 0: - raise KeyError(key) + with _lock: + self.__check_closed() + self.__size = -1 + res = lib.gdbm_delete(self.__ll_dbm, _fromstr(key)) + if res < 0: + raise KeyError(key) def __contains__(self, key): - self._check_closed() - key = _checkstr(key) - return lib.pygdbm_exists(self.ll_dbm, key, len(key)) + with _lock: + self.__check_closed() + key = _checkstr(key) + return lib.pygdbm_exists(self.__ll_dbm, key, len(key)) has_key = __contains__ def __getitem__(self, key): - self._check_closed() - key = _checkstr(key) - drec = lib.pygdbm_fetch(self.ll_dbm, key, len(key)) - if not drec.dptr: - raise KeyError(key) - res = str(ffi.buffer(drec.dptr, drec.dsize)) - lib.free(drec.dptr) - return res + with _lock: + self.__check_closed() + key = _checkstr(key) + drec = lib.pygdbm_fetch(self.__ll_dbm, key, len(key)) + if not drec.dptr: + raise KeyError(key) + res = str(ffi.buffer(drec.dptr, drec.dsize)) + lib.free(drec.dptr) + return res - def keys(self): - self._check_closed() + def __keys(self): + self.__check_closed() l = [] - key = lib.gdbm_firstkey(self.ll_dbm) + key = lib.gdbm_firstkey(self.__ll_dbm) while key.dptr: l.append(str(ffi.buffer(key.dptr, key.dsize))) - nextkey = lib.gdbm_nextkey(self.ll_dbm, key) + nextkey = lib.gdbm_nextkey(self.__ll_dbm, key) lib.free(key.dptr) key = nextkey return l + def keys(self): + with _lock: + return self.__keys() + def firstkey(self): - self._check_closed() - key = lib.gdbm_firstkey(self.ll_dbm) - if key.dptr: - res = str(ffi.buffer(key.dptr, key.dsize)) - lib.free(key.dptr) - return res + with _lock: + self.__check_closed() + key = lib.gdbm_firstkey(self.__ll_dbm) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res def nextkey(self, key): - self._check_closed() - key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key)) - if key.dptr: - res = str(ffi.buffer(key.dptr, key.dsize)) - lib.free(key.dptr) - return res + with _lock: + self.__check_closed() + key = lib.gdbm_nextkey(self.__ll_dbm, _fromstr(key)) + if key.dptr: + res = str(ffi.buffer(key.dptr, key.dsize)) + lib.free(key.dptr) + return res def reorganize(self): - self._check_closed() - if lib.gdbm_reorganize(self.ll_dbm) < 0: - self._raise_from_errno() + with _lock: + self.__check_closed() + if lib.gdbm_reorganize(self.__ll_dbm) < 0: + self.__raise_from_errno() - def _check_closed(self): - if not self.ll_dbm: + def __check_closed(self): + if not self.__ll_dbm: raise error(0, "GDBM object has already been closed") __del__ = close def sync(self): - self._check_closed() - lib.gdbm_sync(self.ll_dbm) + with _lock: + self.__check_closed() + lib.gdbm_sync(self.__ll_dbm) def open(filename, flags='r', mode=0666): if flags[0] == 'r': diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info --- a/lib_pypy/greenlet.egg-info +++ b/lib_pypy/greenlet.egg-info @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: greenlet -Version: 0.4.5 +Version: 0.4.6 Summary: Lightweight in-process concurrent programming Home-page: https://github.com/python-greenlet/greenlet Author: Ralf Schmitt (for CPython), PyPy team diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py --- a/lib_pypy/greenlet.py +++ b/lib_pypy/greenlet.py @@ -1,7 +1,7 @@ import sys import _continuation -__version__ = "0.4.5" +__version__ = "0.4.6" # ____________________________________________________________ # Exceptions diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -71,3 +71,13 @@ .. branch: vmprof2 Add backend support for vmprof - a lightweight statistical profiler - to linux64, see client at https://vmprof.readthedocs.org + +.. branch: jit_hint_docs +Add more detail to @jit.elidable and @jit.promote in rpython/rlib/jit.py + +.. branch: remove-frame-debug-attrs +Remove the debug attributes from frames only used for tracing and replace +them with a debug object that is created on-demand + +.. branch: can_cast +Implement np.can_cast, np.min_scalar_type and missing dtype comparison operations. diff --git a/pypy/goal/pypy.ico b/pypy/goal/pypy.ico new file mode 100644 index 0000000000000000000000000000000000000000..09d07dcc5a783200f440c68c0987926a80d6b667 GIT binary patch [cut] diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -238,6 +238,7 @@ config.translation.suggest(check_str_without_nul=True) config.translation.suggest(shared=True) + config.translation.suggest(icon=os.path.join(this_dir, 'pypy.ico')) if config.translation.shared: if config.translation.output is not None: raise Exception("Cannot use the --output option with PyPy " diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1091,7 +1091,7 @@ def call_valuestack(self, w_func, nargs, frame): from pypy.interpreter.function import Function, Method, is_builtin_code - if frame.is_being_profiled and is_builtin_code(w_func): + if frame.get_is_being_profiled() and is_builtin_code(w_func): # XXX: this code is copied&pasted :-( from the slow path below # call_valuestack(). args = frame.make_arguments(nargs) diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -96,7 +96,7 @@ def _c_call_return_trace(self, frame, w_func, args, event): if self.profilefunc is None: - frame.is_being_profiled = False + frame.getorcreatedebug().is_being_profiled = False else: # undo the effect of the CALL_METHOD bytecode, which would be # that even on a built-in method call like '[].append()', @@ -114,7 +114,7 @@ def c_exception_trace(self, frame, w_exc): "Profile function called upon OperationError." if self.profilefunc is None: - frame.is_being_profiled = False + frame.getorcreatedebug().is_being_profiled = False else: self._trace(frame, 'c_exception', w_exc) @@ -123,7 +123,7 @@ if self.gettrace() is not None or self.profilefunc is not None: self._trace(frame, 'call', self.space.w_None) if self.profilefunc: - frame.is_being_profiled = True + frame.getorcreatedebug().is_being_profiled = True def return_trace(self, frame, w_retval): "Trace the return from a function" @@ -145,7 +145,7 @@ Like bytecode_trace() but doesn't invoke any other events besides the trace function. """ - if (frame.w_f_trace is None or self.is_tracing or + if (frame.get_w_f_trace() is None or self.is_tracing or self.gettrace() is None): return self.run_trace_func(frame) @@ -154,8 +154,9 @@ @jit.unroll_safe def run_trace_func(self, frame): code = frame.pycode - if frame.instr_lb <= frame.last_instr < frame.instr_ub: - if frame.last_instr < frame.instr_prev_plus_one: + d = frame.getorcreatedebug() + if d.instr_lb <= frame.last_instr < d.instr_ub: + if frame.last_instr < d.instr_prev_plus_one: # We jumped backwards in the same line. self._trace(frame, 'line', self.space.w_None) else: @@ -170,7 +171,7 @@ break addr += c if c: - frame.instr_lb = addr + d.instr_lb = addr line += ord(lineno[p + 1]) p += 2 @@ -185,15 +186,15 @@ if ord(lineno[p + 1]): break p += 2 - frame.instr_ub = addr + d.instr_ub = addr else: - frame.instr_ub = sys.maxint + d.instr_ub = sys.maxint - if frame.instr_lb == frame.last_instr: # At start of line! - frame.f_lineno = line + if d.instr_lb == frame.last_instr: # At start of line! + d.f_lineno = line self._trace(frame, 'line', self.space.w_None) - frame.instr_prev_plus_one = frame.last_instr + 1 + d.instr_prev_plus_one = frame.last_instr + 1 def bytecode_trace_after_exception(self, frame): "Like bytecode_trace(), but without increasing the ticker." @@ -288,7 +289,7 @@ frame = self.gettopframe_nohidden() while frame: if is_being_profiled: - frame.is_being_profiled = True + frame.getorcreatedebug().is_being_profiled = True frame = self.getnextframe_nohidden(frame) def call_tracing(self, w_func, w_args): @@ -309,7 +310,7 @@ if event == 'call': w_callback = self.gettrace() else: - w_callback = frame.w_f_trace + w_callback = frame.get_w_f_trace() if w_callback is not None and event != "leaveframe": if operr is not None: @@ -320,15 +321,16 @@ frame.fast2locals() self.is_tracing += 1 try: + d = frame.getorcreatedebug() try: w_result = space.call_function(w_callback, space.wrap(frame), space.wrap(event), w_arg) if space.is_w(w_result, space.w_None): - frame.w_f_trace = None + d.w_f_trace = None else: - frame.w_f_trace = w_result + d.w_f_trace = w_result except: self.settrace(space.w_None) - frame.w_f_trace = None + d.w_f_trace = None raise finally: self.is_tracing -= 1 diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -23,6 +23,19 @@ globals()[op] = stdlib_opcode.opmap[op] HAVE_ARGUMENT = stdlib_opcode.HAVE_ARGUMENT +class FrameDebugData(object): + """ A small object that holds debug data for tracing + """ + w_f_trace = None + instr_lb = 0 + instr_ub = 0 + instr_prev_plus_one = 0 + f_lineno = 0 # current lineno for tracing + is_being_profiled = False + w_locals = None + + def __init__(self, pycode): + self.f_lineno = pycode.co_firstlineno class PyFrame(W_Root): """Represents a frame for a regular Python function @@ -31,7 +44,8 @@ Public fields: * 'space' is the object space this frame is running in * 'code' is the PyCode object this frame runs - * 'w_locals' is the locals dictionary to use + * 'w_locals' is the locals dictionary to use, if needed, stored on a + debug object * 'w_globals' is the attached globals dictionary * 'builtin' is the attached built-in module * 'valuestack_w', 'blockstack', control the interpretation @@ -49,13 +63,26 @@ last_instr = -1 last_exception = None f_backref = jit.vref_None - w_f_trace = None - # For tracing - instr_lb = 0 - instr_ub = 0 - instr_prev_plus_one = 0 - is_being_profiled = False + escaped = False # see mark_as_escaped() + debugdata = None + + w_globals = None + pycode = None # code object executed by that frame + locals_stack_w = None # the list of all locals and valuestack + valuestackdepth = 0 # number of items on valuestack + lastblock = None + cells = None # cells + + # other fields: + + # builtin - builtin cache, only if honor__builtins__ is True + # defaults to False + + # there is also self.space which is removed by the annotator + + # additionally JIT uses vable_token field that is representing + # frame current virtualizable state as seen by the JIT def __init__(self, space, code, w_globals, outer_func): if not we_are_translated(): @@ -65,11 +92,9 @@ assert isinstance(code, pycode.PyCode) self.space = space self.w_globals = w_globals - self.w_locals = None self.pycode = code self.locals_stack_w = [None] * (code.co_nlocals + code.co_stacksize) self.valuestackdepth = code.co_nlocals - self.lastblock = None make_sure_not_resized(self.locals_stack_w) check_nonneg(self.valuestackdepth) # @@ -78,7 +103,32 @@ # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS. # class bodies only have CO_NEWLOCALS. self.initialize_frame_scopes(outer_func, code) - self.f_lineno = code.co_firstlineno + + def getdebug(self): + return self.debugdata + + def getorcreatedebug(self): + if self.debugdata is None: + self.debugdata = FrameDebugData(self.pycode) + return self.debugdata + + def get_w_f_trace(self): + d = self.getdebug() + if d is None: + return None + return d.w_f_trace + + def get_is_being_profiled(self): + d = self.getdebug() + if d is None: + return False + return d.is_being_profiled + + def get_w_locals(self): + d = self.getdebug() + if d is None: + return None + return d.w_locals def __repr__(self): # NOT_RPYTHON: useful in tracebacks @@ -142,10 +192,10 @@ flags = code.co_flags if not (flags & pycode.CO_OPTIMIZED): if flags & pycode.CO_NEWLOCALS: - self.w_locals = self.space.newdict(module=True) + self.getorcreatedebug().w_locals = self.space.newdict(module=True) else: assert self.w_globals is not None - self.w_locals = self.w_globals + self.getorcreatedebug().w_locals = self.w_globals ncellvars = len(code.co_cellvars) nfreevars = len(code.co_freevars) @@ -367,10 +417,10 @@ else: w_cells = space.newlist([space.wrap(cell) for cell in cells]) - if self.w_f_trace is None: + if self.get_w_f_trace() is None: f_lineno = self.get_last_lineno() else: - f_lineno = self.f_lineno + f_lineno = self.getorcreatedebug().f_lineno nlocals = self.pycode.co_nlocals values_w = self.locals_stack_w[nlocals:self.valuestackdepth] @@ -386,6 +436,7 @@ w_exc_value = self.last_exception.get_w_value(space) w_tb = w(self.last_exception.get_traceback()) + d = self.getorcreatedebug() tup_state = [ w(self.f_backref()), w(self.get_builtin()), @@ -402,11 +453,11 @@ space.w_None, #XXX placeholder for f_locals #f_restricted requires no additional data! - space.w_None, ## self.w_f_trace, ignore for now + space.w_None, - w(self.instr_lb), #do we need these three (that are for tracing) - w(self.instr_ub), - w(self.instr_prev_plus_one), + w(d.instr_lb), + w(d.instr_ub), + w(d.instr_prev_plus_one), w_cells, ] return nt(tup_state) @@ -464,18 +515,19 @@ ) new_frame.last_instr = space.int_w(w_last_instr) new_frame.frame_finished_execution = space.is_true(w_finished) - new_frame.f_lineno = space.int_w(w_f_lineno) + d = new_frame.getorcreatedebug() + d.f_lineno = space.int_w(w_f_lineno) fastlocals_w = maker.slp_from_tuple_with_nulls(space, w_fastlocals) new_frame.locals_stack_w[:len(fastlocals_w)] = fastlocals_w if space.is_w(w_f_trace, space.w_None): - new_frame.w_f_trace = None + d.w_f_trace = None else: - new_frame.w_f_trace = w_f_trace + d.w_f_trace = w_f_trace - new_frame.instr_lb = space.int_w(w_instr_lb) #the three for tracing - new_frame.instr_ub = space.int_w(w_instr_ub) - new_frame.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) + d.instr_lb = space.int_w(w_instr_lb) #the three for tracing + d.instr_ub = space.int_w(w_instr_ub) + d.instr_prev_plus_one = space.int_w(w_instr_prev_plus_one) self._setcellvars(cellvars) @@ -503,30 +555,31 @@ Get the locals as a dictionary """ self.fast2locals() - return self.w_locals + return self.debugdata.w_locals def setdictscope(self, w_locals): """ Initialize the locals from a dictionary. """ - self.w_locals = w_locals + self.getorcreatedebug().w_locals = w_locals self.locals2fast() @jit.unroll_safe def fast2locals(self): # Copy values from the fastlocals to self.w_locals - if self.w_locals is None: - self.w_locals = self.space.newdict() + d = self.getorcreatedebug() + if d.w_locals is None: + d.w_locals = self.space.newdict() varnames = self.getcode().getvarnames() for i in range(min(len(varnames), self.getcode().co_nlocals)): name = varnames[i] w_value = self.locals_stack_w[i] if w_value is not None: - self.space.setitem_str(self.w_locals, name, w_value) + self.space.setitem_str(d.w_locals, name, w_value) else: w_name = self.space.wrap(name) try: - self.space.delitem(self.w_locals, w_name) + self.space.delitem(d.w_locals, w_name) except OperationError as e: if not e.match(self.space, self.space.w_KeyError): raise @@ -545,13 +598,14 @@ except ValueError: pass else: - self.space.setitem_str(self.w_locals, name, w_value) + self.space.setitem_str(d.w_locals, name, w_value) @jit.unroll_safe def locals2fast(self): # Copy values from self.w_locals to the fastlocals - assert self.w_locals is not None + w_locals = self.getorcreatedebug().w_locals + assert w_locals is not None varnames = self.getcode().getvarnames() numlocals = self.getcode().co_nlocals @@ -559,7 +613,7 @@ for i in range(min(len(varnames), numlocals)): name = varnames[i] - w_value = self.space.finditem_str(self.w_locals, name) + w_value = self.space.finditem_str(w_locals, name) if w_value is not None: new_fastlocals_w[i] = w_value @@ -578,7 +632,7 @@ for i in range(len(freevarnames)): name = freevarnames[i] cell = self.cells[i] - w_value = self.space.finditem_str(self.w_locals, name) + w_value = self.space.finditem_str(w_locals, name) if w_value is not None: cell.set(w_value) @@ -613,10 +667,10 @@ def fget_f_lineno(self, space): "Returns the line number of the instruction currently being executed." - if self.w_f_trace is None: + if self.get_w_f_trace() is None: return space.wrap(self.get_last_lineno()) else: - return space.wrap(self.f_lineno) + return space.wrap(self.getorcreatedebug().f_lineno) def fset_f_lineno(self, space, w_new_lineno): "Returns the line number of the instruction currently being executed." @@ -626,7 +680,7 @@ raise OperationError(space.w_ValueError, space.wrap("lineno must be an integer")) - if self.w_f_trace is None: + if self.get_w_f_trace() is None: raise OperationError(space.w_ValueError, space.wrap("f_lineno can only be set by a trace function.")) @@ -745,7 +799,7 @@ block.cleanup(self) f_iblock -= 1 - self.f_lineno = new_lineno + self.getorcreatedebug().f_lineno = new_lineno self.last_instr = new_lasti def get_last_lineno(self): @@ -763,17 +817,18 @@ return self.space.wrap(self.last_instr) def fget_f_trace(self, space): - return self.w_f_trace + return self.get_w_f_trace() def fset_f_trace(self, space, w_trace): if space.is_w(w_trace, space.w_None): - self.w_f_trace = None + self.getorcreatedebug().w_f_trace = None else: - self.w_f_trace = w_trace - self.f_lineno = self.get_last_lineno() + d = self.getorcreatedebug() + d.w_f_trace = w_trace + d.f_lineno = self.get_last_lineno() def fdel_f_trace(self, space): - self.w_f_trace = None + self.getorcreatedebug().w_f_trace = None def fget_f_exc_type(self, space): if self.last_exception is not None: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -109,14 +109,14 @@ # dispatch_bytecode(), causing the real exception to be # raised after the exception handler block was popped. try: - trace = self.w_f_trace + trace = self.get_w_f_trace() if trace is not None: - self.w_f_trace = None + self.getorcreatedebug().w_f_trace = None try: ec.bytecode_trace_after_exception(self) finally: if trace is not None: - self.w_f_trace = trace + self.getorcreatedebug().w_f_trace = trace except OperationError, e: operr = e pytraceback.record_application_traceback( @@ -773,7 +773,7 @@ raise RaiseWithExplicitTraceback(operror) def LOAD_LOCALS(self, oparg, next_instr): - self.pushvalue(self.w_locals) + self.pushvalue(self.getorcreatedebug().w_locals) def EXEC_STMT(self, oparg, next_instr): w_locals = self.popvalue() @@ -789,8 +789,8 @@ self.space.gettypeobject(PyCode.typedef)) w_prog, w_globals, w_locals = self.space.fixedview(w_resulttuple, 3) - plain = (self.w_locals is not None and - self.space.is_w(w_locals, self.w_locals)) + plain = (self.get_w_locals() is not None and + self.space.is_w(w_locals, self.get_w_locals())) if plain: w_locals = self.getdictscope() co = self.space.interp_w(eval.Code, w_prog) @@ -840,12 +840,13 @@ def STORE_NAME(self, varindex, next_instr): varname = self.getname_u(varindex) w_newvalue = self.popvalue() - self.space.setitem_str(self.w_locals, varname, w_newvalue) + self.space.setitem_str(self.getorcreatedebug().w_locals, varname, + w_newvalue) def DELETE_NAME(self, varindex, next_instr): w_varname = self.getname_w(varindex) try: - self.space.delitem(self.w_locals, w_varname) + self.space.delitem(self.getorcreatedebug().w_locals, w_varname) except OperationError, e: # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): @@ -881,9 +882,10 @@ self.space.delitem(self.w_globals, w_varname) def LOAD_NAME(self, nameindex, next_instr): - if self.w_locals is not self.w_globals: + if self.getorcreatedebug().w_locals is not self.w_globals: varname = self.getname_u(nameindex) - w_value = self.space.finditem_str(self.w_locals, varname) + w_value = self.space.finditem_str(self.getorcreatedebug().w_locals, + varname) if w_value is not None: self.pushvalue(w_value) return @@ -1013,7 +1015,11 @@ if w_import is None: raise OperationError(space.w_ImportError, space.wrap("__import__ not found")) - w_locals = self.w_locals + d = self.getdebug() + if d is None: + w_locals = None + else: + w_locals = d.w_locals if w_locals is None: # CPython does this w_locals = space.w_None w_modulename = space.wrap(modulename) @@ -1185,7 +1191,7 @@ args = self.argument_factory(arguments, keywords, keywords_w, w_star, w_starstar) w_function = self.popvalue() - if self.is_being_profiled and function.is_builtin_code(w_function): + if self.get_is_being_profiled() and function.is_builtin_code(w_function): w_result = self.space.call_args_and_c_profile(self, w_function, args) else: diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -64,6 +64,8 @@ f.f_lineno += 1 return x + open # force fetching of this name now + def function(): xyz with open(self.tempfile1, 'w') as f: diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -536,7 +536,7 @@ __objclass__ = GetSetProperty(GetSetProperty.descr_get_objclass), __doc__ = interp_attrproperty('doc', cls=GetSetProperty), ) -GetSetProperty.typedef.acceptable_as_base_class = False +assert not GetSetProperty.typedef.acceptable_as_base_class # no __new__ class Member(W_Root): @@ -590,7 +590,7 @@ __name__ = interp_attrproperty('name', cls=Member), __objclass__ = interp_attrproperty_w('w_cls', cls=Member), ) -Member.typedef.acceptable_as_base_class = False +assert not Member.typedef.acceptable_as_base_class # no __new__ # ____________________________________________________________ @@ -719,7 +719,7 @@ co_flags = GetSetProperty(fget_co_flags, cls=Code), co_consts = GetSetProperty(fget_co_consts, cls=Code), ) -Code.typedef.acceptable_as_base_class = False +assert not Code.typedef.acceptable_as_base_class # no __new__ BuiltinCode.typedef = TypeDef('builtin-code', __reduce__ = interp2app(BuiltinCode.descr__reduce__), @@ -729,7 +729,7 @@ co_flags = GetSetProperty(fget_co_flags, cls=BuiltinCode), co_consts = GetSetProperty(fget_co_consts, cls=BuiltinCode), ) -BuiltinCode.typedef.acceptable_as_base_class = False +assert not BuiltinCode.typedef.acceptable_as_base_class # no __new__ PyCode.typedef = TypeDef('code', @@ -774,7 +774,7 @@ f_locals = GetSetProperty(PyFrame.fget_getdictscope), f_globals = interp_attrproperty_w('w_globals', cls=PyFrame), ) -PyFrame.typedef.acceptable_as_base_class = False +assert not PyFrame.typedef.acceptable_as_base_class # no __new__ Module.typedef = TypeDef("module", __new__ = interp2app(Module.descr_module__new__.im_func), @@ -920,7 +920,7 @@ tb_lineno = GetSetProperty(PyTraceback.descr_tb_lineno), tb_next = interp_attrproperty('next', cls=PyTraceback), ) -PyTraceback.typedef.acceptable_as_base_class = False +assert not PyTraceback.typedef.acceptable_as_base_class # no __new__ GeneratorIterator.typedef = TypeDef("generator", __repr__ = interp2app(GeneratorIterator.descr__repr__), @@ -942,7 +942,7 @@ __name__ = GetSetProperty(GeneratorIterator.descr__name__), __weakref__ = make_weakref_descr(GeneratorIterator), ) -GeneratorIterator.typedef.acceptable_as_base_class = False +assert not GeneratorIterator.typedef.acceptable_as_base_class # no __new__ Cell.typedef = TypeDef("cell", __cmp__ = interp2app(Cell.descr__cmp__), @@ -952,17 +952,17 @@ __setstate__ = interp2app(Cell.descr__setstate__), cell_contents= GetSetProperty(Cell.descr__cell_contents, cls=Cell), ) -Cell.typedef.acceptable_as_base_class = False +assert not Cell.typedef.acceptable_as_base_class # no __new__ Ellipsis.typedef = TypeDef("Ellipsis", __repr__ = interp2app(Ellipsis.descr__repr__), ) -Ellipsis.typedef.acceptable_as_base_class = False +assert not Ellipsis.typedef.acceptable_as_base_class # no __new__ NotImplemented.typedef = TypeDef("NotImplemented", __repr__ = interp2app(NotImplemented.descr__repr__), ) -NotImplemented.typedef.acceptable_as_base_class = False +assert not NotImplemented.typedef.acceptable_as_base_class # no __new__ SuspendedUnroller.typedef = TypeDef("SuspendedUnroller") -SuspendedUnroller.typedef.acceptable_as_base_class = False +assert not SuspendedUnroller.typedef.acceptable_as_base_class # no __new__ diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -156,7 +156,7 @@ block_size=GetSetProperty(W_Hash.get_block_size), name=GetSetProperty(W_Hash.get_name), ) -W_Hash.acceptable_as_base_class = False +W_Hash.typedef.acceptable_as_base_class = False @unwrap_spec(name=str, string='bufferstr') def new(space, name, string=''): diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -29,6 +29,7 @@ 'get_last_error' : 'interp_rawffi.get_last_error', 'set_last_error' : 'interp_rawffi.set_last_error', 'SegfaultException' : 'space.new_exception_class("_rawffi.SegfaultException")', + 'exit' : 'interp_exit.exit', } appleveldefs = { diff --git a/pypy/module/_rawffi/interp_exit.py b/pypy/module/_rawffi/interp_exit.py new file mode 100644 --- /dev/null +++ b/pypy/module/_rawffi/interp_exit.py @@ -0,0 +1,9 @@ +from pypy.interpreter.gateway import unwrap_spec +from rpython.rtyper.lltypesystem import lltype, rffi + + +ll_exit = rffi.llexternal('exit', [rffi.INT], lltype.Void, _nowrapper=True) + +@unwrap_spec(status="c_int") +def exit(space, status): + ll_exit(rffi.cast(rffi.INT, status)) diff --git a/pypy/module/_rawffi/test/test_exit.py b/pypy/module/_rawffi/test/test_exit.py new file mode 100644 --- /dev/null +++ b/pypy/module/_rawffi/test/test_exit.py @@ -0,0 +1,16 @@ + +class AppTestFfi: + spaceconfig = dict(usemodules=['_rawffi', 'posix']) + + def test_exit(self): + try: + import posix, _rawffi + except ImportError: + skip("requires posix.fork() to test") + # + pid = posix.fork() + if pid == 0: + _rawffi.exit(5) # in the child + pid, status = posix.waitpid(pid, 0) + assert posix.WIFEXITED(status) + assert posix.WEXITSTATUS(status) == 5 diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -27,7 +27,7 @@ include_dirs = [SRC], includes = ['vmprof.h', 'trampoline.h'], separate_module_files = [SRC.join('trampoline.asmgcc.s')], - link_files = ['-Wl,-Bstatic', '-lunwind', '-Wl,-Bdynamic'], + libraries = ['unwind'], post_include_bits=[""" void pypy_vmprof_init(void); diff --git a/rpython/jit/backend/x86/test/conftest.py b/pypy/module/_vmprof/test/conftest.py copy from rpython/jit/backend/x86/test/conftest.py copy to pypy/module/_vmprof/test/conftest.py --- a/rpython/jit/backend/x86/test/conftest.py +++ b/pypy/module/_vmprof/test/conftest.py @@ -1,12 +1,7 @@ -import py, os +import py from rpython.jit.backend import detect_cpu cpu = detect_cpu.autodetect() def pytest_runtest_setup(item): - if not cpu.startswith('x86'): - py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,)) - if cpu == 'x86_64': - if os.name == "nt": - py.test.skip("Windows cannot allocate non-reserved memory") - from rpython.rtyper.lltypesystem import ll2ctypes - ll2ctypes.do_allocation_in_far_regions() + if cpu != detect_cpu.MODEL_X86_64: + py.test.skip("x86_64 tests only") diff --git a/pypy/module/_vmprof/test/test_direct.py b/pypy/module/_vmprof/test/test_direct.py --- a/pypy/module/_vmprof/test/test_direct.py +++ b/pypy/module/_vmprof/test/test_direct.py @@ -1,5 +1,9 @@ -import cffi, py +import py +try: + import cffi +except ImportError: + py.test.skip('cffi required') srcdir = py.path.local(__file__).join("..", "..", "src") diff --git a/pypy/module/cpyext/frameobject.py b/pypy/module/cpyext/frameobject.py --- a/pypy/module/cpyext/frameobject.py +++ b/pypy/module/cpyext/frameobject.py @@ -35,7 +35,7 @@ py_frame = rffi.cast(PyFrameObject, py_obj) py_frame.c_f_code = rffi.cast(PyCodeObject, make_ref(space, frame.pycode)) py_frame.c_f_globals = make_ref(space, frame.w_globals) - rffi.setintfield(py_frame, 'c_f_lineno', frame.f_lineno) + rffi.setintfield(py_frame, 'c_f_lineno', frame.getorcreatedebug().f_lineno) @cpython_api([PyObject], lltype.Void, external=False) def frame_dealloc(space, py_obj): @@ -58,7 +58,8 @@ w_globals = from_ref(space, py_frame.c_f_globals) frame = space.FrameClass(space, code, w_globals, outer_func=None) - frame.f_lineno = rffi.getintfield(py_frame, 'c_f_lineno') + d = frame.getorcreatedebug() + d.f_lineno = rffi.getintfield(py_frame, 'c_f_lineno') w_obj = space.wrap(frame) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -20,8 +20,10 @@ 'concatenate': 'arrayops.concatenate', 'count_nonzero': 'arrayops.count_nonzero', 'dot': 'arrayops.dot', - 'result_type': 'arrayops.result_type', 'where': 'arrayops.where', + 'result_type': 'casting.result_type', + 'can_cast': 'casting.can_cast', + 'min_scalar_type': 'casting.min_scalar_type', 'set_string_function': 'appbridge.set_string_function', 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,13 +1,11 @@ -from rpython.rlib import jit from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \ constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.converters import clipmode_converter -from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ - shape_agreement_multiple -from .boxes import W_GenericBox +from pypy.module.micronumpy.strides import ( + Chunk, Chunks, shape_agreement, shape_agreement_multiple) def where(space, w_arr, w_x=None, w_y=None): @@ -285,28 +283,3 @@ else: loop.diagonal_array(space, arr, out, offset, axis1, axis2, shape) return out - - -@jit.unroll_safe -def result_type(space, __args__): - args_w, kw_w = __args__.unpack() - if kw_w: - raise oefmt(space.w_TypeError, "result_type() takes no keyword arguments") - if not args_w: - raise oefmt(space.w_ValueError, "at least one array or dtype is required") - result = None - for w_arg in args_w: - if isinstance(w_arg, W_NDimArray): - dtype = w_arg.get_dtype() - elif isinstance(w_arg, W_GenericBox) or ( - space.isinstance_w(w_arg, space.w_int) or - space.isinstance_w(w_arg, space.w_float) or - space.isinstance_w(w_arg, space.w_complex) or - space.isinstance_w(w_arg, space.w_long) or - space.isinstance_w(w_arg, space.w_bool)): - dtype = ufuncs.find_dtype_for_scalar(space, w_arg) - else: - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_arg)) - result = ufuncs.find_binop_result_dtype(space, result, dtype) - return result diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -874,4 +874,3 @@ __new__ = interp2app(W_ObjectBox.descr__new__.im_func), __getattr__ = interp2app(W_ObjectBox.descr__getattr__), ) - diff --git a/pypy/module/micronumpy/casting.py b/pypy/module/micronumpy/casting.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/casting.py @@ -0,0 +1,108 @@ +"""Functions and helpers for converting between dtypes""" + +from rpython.rlib import jit +from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.error import oefmt + +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy import constants as NPY +from pypy.module.micronumpy.ufuncs import ( + find_binop_result_dtype, find_dtype_for_scalar) +from .types import ( + Bool, ULong, Long, Float64, Complex64, UnicodeType, VoidType, ObjectType) +from .descriptor import get_dtype_cache, as_dtype, is_scalar_w + +@jit.unroll_safe +def result_type(space, __args__): + args_w, kw_w = __args__.unpack() + if kw_w: + raise oefmt(space.w_TypeError, + "result_type() takes no keyword arguments") + if not args_w: + raise oefmt(space.w_ValueError, + "at least one array or dtype is required") + result = None + for w_arg in args_w: + dtype = as_dtype(space, w_arg) + result = find_binop_result_dtype(space, result, dtype) + return result + +@unwrap_spec(casting=str) +def can_cast(space, w_from, w_totype, casting='safe'): + try: + target = as_dtype(space, w_totype, allow_None=False) + except TypeError: + raise oefmt(space.w_TypeError, + "did not understand one of the types; 'None' not accepted") + if isinstance(w_from, W_NDimArray): + return space.wrap(can_cast_array(space, w_from, target, casting)) + elif is_scalar_w(space, w_from): + w_scalar = as_scalar(space, w_from) + w_arr = W_NDimArray.from_scalar(space, w_scalar) + return space.wrap(can_cast_array(space, w_arr, target, casting)) + + try: + origin = as_dtype(space, w_from, allow_None=False) + except TypeError: + raise oefmt(space.w_TypeError, + "did not understand one of the types; 'None' not accepted") + return space.wrap(can_cast_type(space, origin, target, casting)) + +kind_ordering = { + Bool.kind: 0, ULong.kind: 1, Long.kind: 2, + Float64.kind: 4, Complex64.kind: 5, + NPY.STRINGLTR: 6, NPY.STRINGLTR2: 6, + UnicodeType.kind: 7, VoidType.kind: 8, ObjectType.kind: 9} + +def can_cast_type(space, origin, target, casting): + # equivalent to PyArray_CanCastTypeTo + if casting == 'no': + return origin.eq(space, target) + elif casting == 'equiv': + return origin.num == target.num and origin.elsize == target.elsize + elif casting == 'unsafe': + return True + elif casting == 'same_kind': + if origin.can_cast_to(target): + return True + if origin.kind in kind_ordering and target.kind in kind_ordering: + return kind_ordering[origin.kind] <= kind_ordering[target.kind] + return False + else: + return origin.can_cast_to(target) + +def can_cast_array(space, w_from, target, casting): + # equivalent to PyArray_CanCastArrayTo + origin = w_from.get_dtype() + if w_from.is_scalar(): + return can_cast_scalar( + space, origin, w_from.get_scalar_value(), target, casting) + else: + return can_cast_type(space, origin, target, casting) + +def can_cast_scalar(space, from_type, value, target, casting): + # equivalent to CNumPy's can_cast_scalar_to + if from_type == target or casting == 'unsafe': + return True + if not from_type.is_number() or casting in ('no', 'equiv'): + return can_cast_type(space, from_type, target, casting) + if not from_type.is_native(): + value = value.descr_byteswap(space) + dtypenum, altnum = value.min_dtype() + if target.is_unsigned(): + dtypenum = altnum + dtype = get_dtype_cache(space).dtypes_by_num[dtypenum] + return can_cast_type(space, dtype, target, casting) + +def as_scalar(space, w_obj): + dtype = find_dtype_for_scalar(space, w_obj) + return dtype.coerce(space, w_obj) + +def min_scalar_type(space, w_a): + w_array = convert_to_array(space, w_a) + dtype = w_array.get_dtype() + if w_array.is_scalar() and dtype.is_number(): + num, alt_num = w_array.get_scalar_value().min_dtype() + return get_dtype_cache(space).dtypes_by_num[num] + else: + return dtype diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -8,7 +8,9 @@ from rpython.rlib import jit from rpython.rlib.objectmodel import specialize, compute_hash, we_are_translated from rpython.rlib.rarithmetic import r_longlong, r_ulonglong -from pypy.module.micronumpy import types, boxes, base, support, constants as NPY +from rpython.rlib.signature import finishsigs, signature, types as ann +from pypy.module.micronumpy import types, boxes, support, constants as NPY +from .base import W_NDimArray from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.converters import byteorder_converter @@ -36,24 +38,21 @@ if not space.is_none(w_arr): dtype = find_binop_result_dtype(space, dtype, w_arr.get_dtype()) assert dtype is not None - out = base.W_NDimArray.from_shape(space, shape, dtype) + out = W_NDimArray.from_shape(space, shape, dtype) return out +_REQ_STRLEN = [0, 3, 5, 10, 10, 20, 20, 20, 20] # data for can_cast_to() + +@finishsigs class W_Dtype(W_Root): _immutable_fields_ = [ - "itemtype?", "num", "kind", "char", "w_box_type", - "byteorder?", "names?", "fields?", "elsize?", "alignment?", - "shape?", "subdtype?", "base?", - ] + "itemtype?", "w_box_type", "byteorder?", "names?", "fields?", + "elsize?", "alignment?", "shape?", "subdtype?", "base?"] - def __init__(self, itemtype, num, kind, char, w_box_type, - byteorder=None, names=[], fields={}, - elsize=None, shape=[], subdtype=None): + def __init__(self, itemtype, w_box_type, byteorder=None, names=[], + fields={}, elsize=None, shape=[], subdtype=None): self.itemtype = itemtype - self.num = num - self.kind = kind - self.char = char self.w_box_type = w_box_type if byteorder is None: if itemtype.get_element_size() == 1 or isinstance(itemtype, types.ObjectType): @@ -74,6 +73,18 @@ else: self.base = subdtype.base + @property + def num(self): + return self.itemtype.num + + @property + def kind(self): + return self.itemtype.kind + + @property + def char(self): + return self.itemtype.char + def __repr__(self): if self.fields: return '<DType %r>' % self.fields @@ -87,6 +98,41 @@ def box_complex(self, real, imag): return self.itemtype.box_complex(real, imag) + @signature(ann.self(), ann.self(), returns=ann.bool()) + def can_cast_to(self, other): + # equivalent to PyArray_CanCastTo + result = self.itemtype.can_cast_to(other.itemtype) + if result: + if self.num == NPY.STRING: + if other.num == NPY.STRING: + return self.elsize <= other.elsize + elif other.num == NPY.UNICODE: + return self.elsize * 4 <= other.elsize + elif self.num == NPY.UNICODE and other.num == NPY.UNICODE: + return self.elsize <= other.elsize + elif other.num in (NPY.STRING, NPY.UNICODE): + if other.num == NPY.STRING: + char_size = 1 + else: # NPY.UNICODE + char_size = 4 + if other.elsize == 0: + return True + if self.is_bool(): + return other.elsize >= 5 * char_size + elif self.is_unsigned(): + if self.elsize > 8 or self.elsize < 0: + return False + else: + return (other.elsize >= + _REQ_STRLEN[self.elsize] * char_size) + elif self.is_signed(): + if self.elsize > 8 or self.elsize < 0: + return False + else: + return (other.elsize >= + (_REQ_STRLEN[self.elsize] + 1) * char_size) + return result + def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) @@ -109,6 +155,9 @@ def is_complex(self): return self.kind == NPY.COMPLEXLTR + def is_number(self): + return self.is_int() or self.is_float() or self.is_complex() + def is_str(self): return self.num == NPY.STRING @@ -259,6 +308,22 @@ def descr_ne(self, space, w_other): return space.wrap(not self.eq(space, w_other)) + def descr_le(self, space, w_other): + w_other = as_dtype(space, w_other) + return space.wrap(self.can_cast_to(w_other)) + + def descr_ge(self, space, w_other): + w_other = as_dtype(space, w_other) + return space.wrap(w_other.can_cast_to(self)) + + def descr_lt(self, space, w_other): + w_other = as_dtype(space, w_other) + return space.wrap(self.can_cast_to(w_other) and not self.eq(space, w_other)) + + def descr_gt(self, space, w_other): + w_other = as_dtype(space, w_other) + return space.wrap(w_other.can_cast_to(self) and not self.eq(space, w_other)) + def _compute_hash(self, space, x): from rpython.rlib.rarithmetic import intmask if not self.fields and self.subdtype is None: @@ -450,7 +515,7 @@ fields = self.fields if fields is None: fields = {} - return W_Dtype(itemtype, self.num, self.kind, self.char, + return W_Dtype(itemtype, self.w_box_type, byteorder=endian, elsize=self.elsize, names=self.names, fields=fields, shape=self.shape, subdtype=self.subdtype) @@ -485,8 +550,7 @@ fields[fldname] = (offset, subdtype) offset += subdtype.elsize names.append(fldname) - return W_Dtype(types.RecordType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, - space.gettypefor(boxes.W_VoidBox), + return W_Dtype(types.RecordType(space), space.gettypefor(boxes.W_VoidBox), names=names, fields=fields, elsize=offset) @@ -553,7 +617,7 @@ if size == 1: return subdtype size *= subdtype.elsize - return W_Dtype(types.VoidType(space), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + return W_Dtype(types.VoidType(space), space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) @@ -630,6 +694,10 @@ __eq__ = interp2app(W_Dtype.descr_eq), __ne__ = interp2app(W_Dtype.descr_ne), + __lt__ = interp2app(W_Dtype.descr_lt), + __le__ = interp2app(W_Dtype.descr_le), + __gt__ = interp2app(W_Dtype.descr_gt), + __ge__ = interp2app(W_Dtype.descr_ge), __hash__ = interp2app(W_Dtype.descr_hash), __str__= interp2app(W_Dtype.descr_str), __repr__ = interp2app(W_Dtype.descr_repr), @@ -654,7 +722,10 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - return new_string_dtype(space, 1, NPY.CHARLTR) + return W_Dtype( + types.CharType(space), + elsize=1, + w_box_type=space.gettypefor(boxes.W_StringBox)) elif char == NPY.STRINGLTR or char == NPY.STRINGLTR2: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: @@ -664,13 +735,10 @@ assert False -def new_string_dtype(space, size, char=NPY.STRINGLTR): +def new_string_dtype(space, size): return W_Dtype( types.StringType(space), elsize=size, - num=NPY.STRING, - kind=NPY.STRINGLTR, - char=char, w_box_type=space.gettypefor(boxes.W_StringBox), ) @@ -680,9 +748,6 @@ return W_Dtype( itemtype, elsize=size * itemtype.get_element_size(), - num=NPY.UNICODE, - kind=NPY.UNICODELTR, - char=NPY.UNICODELTR, w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) @@ -691,9 +756,6 @@ return W_Dtype( types.VoidType(space), elsize=size, - num=NPY.VOID, - kind=NPY.VOIDLTR, - char=NPY.VOIDLTR, w_box_type=space.gettypefor(boxes.W_VoidBox), ) @@ -702,173 +764,93 @@ def __init__(self, space): self.w_booldtype = W_Dtype( types.Bool(space), - num=NPY.BOOL, - kind=NPY.GENBOOLLTR, - char=NPY.BOOLLTR, w_box_type=space.gettypefor(boxes.W_BoolBox), ) self.w_int8dtype = W_Dtype( types.Int8(space), - num=NPY.BYTE, - kind=NPY.SIGNEDLTR, - char=NPY.BYTELTR, w_box_type=space.gettypefor(boxes.W_Int8Box), ) self.w_uint8dtype = W_Dtype( types.UInt8(space), - num=NPY.UBYTE, - kind=NPY.UNSIGNEDLTR, - char=NPY.UBYTELTR, w_box_type=space.gettypefor(boxes.W_UInt8Box), ) self.w_int16dtype = W_Dtype( types.Int16(space), - num=NPY.SHORT, - kind=NPY.SIGNEDLTR, - char=NPY.SHORTLTR, w_box_type=space.gettypefor(boxes.W_Int16Box), ) self.w_uint16dtype = W_Dtype( types.UInt16(space), - num=NPY.USHORT, - kind=NPY.UNSIGNEDLTR, - char=NPY.USHORTLTR, w_box_type=space.gettypefor(boxes.W_UInt16Box), ) self.w_int32dtype = W_Dtype( types.Int32(space), - num=NPY.INT, - kind=NPY.SIGNEDLTR, - char=NPY.INTLTR, w_box_type=space.gettypefor(boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( types.UInt32(space), - num=NPY.UINT, - kind=NPY.UNSIGNEDLTR, - char=NPY.UINTLTR, w_box_type=space.gettypefor(boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( types.Long(space), - num=NPY.LONG, - kind=NPY.SIGNEDLTR, - char=NPY.LONGLTR, w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_ulongdtype = W_Dtype( types.ULong(space), - num=NPY.ULONG, - kind=NPY.UNSIGNEDLTR, - char=NPY.ULONGLTR, w_box_type=space.gettypefor(boxes.W_ULongBox), ) self.w_int64dtype = W_Dtype( types.Int64(space), - num=NPY.LONGLONG, - kind=NPY.SIGNEDLTR, - char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(boxes.W_Int64Box), ) self.w_uint64dtype = W_Dtype( types.UInt64(space), - num=NPY.ULONGLONG, - kind=NPY.UNSIGNEDLTR, - char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(boxes.W_UInt64Box), ) self.w_float32dtype = W_Dtype( types.Float32(space), - num=NPY.FLOAT, - kind=NPY.FLOATINGLTR, - char=NPY.FLOATLTR, w_box_type=space.gettypefor(boxes.W_Float32Box), ) self.w_float64dtype = W_Dtype( types.Float64(space), - num=NPY.DOUBLE, - kind=NPY.FLOATINGLTR, - char=NPY.DOUBLELTR, w_box_type=space.gettypefor(boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( types.FloatLong(space), - num=NPY.LONGDOUBLE, - kind=NPY.FLOATINGLTR, - char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_FloatLongBox), ) self.w_complex64dtype = W_Dtype( types.Complex64(space), - num=NPY.CFLOAT, - kind=NPY.COMPLEXLTR, - char=NPY.CFLOATLTR, w_box_type=space.gettypefor(boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( types.Complex128(space), - num=NPY.CDOUBLE, - kind=NPY.COMPLEXLTR, - char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(space), - num=NPY.CLONGDOUBLE, - kind=NPY.COMPLEXLTR, - char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( types.StringType(space), elsize=0, - num=NPY.STRING, - kind=NPY.STRINGLTR, - char=NPY.STRINGLTR, w_box_type=space.gettypefor(boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( types.UnicodeType(space), elsize=0, - num=NPY.UNICODE, - kind=NPY.UNICODELTR, - char=NPY.UNICODELTR, w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( types.VoidType(space), elsize=0, - num=NPY.VOID, - kind=NPY.VOIDLTR, - char=NPY.VOIDLTR, w_box_type=space.gettypefor(boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( types.Float16(space), - num=NPY.HALF, - kind=NPY.FLOATINGLTR, - char=NPY.HALFLTR, w_box_type=space.gettypefor(boxes.W_Float16Box), ) - self.w_intpdtype = W_Dtype( - types.Long(space), - num=NPY.LONG, - kind=NPY.SIGNEDLTR, - char=NPY.INTPLTR, - w_box_type=space.gettypefor(boxes.W_LongBox), - ) - self.w_uintpdtype = W_Dtype( - types.ULong(space), - num=NPY.ULONG, - kind=NPY.UNSIGNEDLTR, - char=NPY.UINTPLTR, - w_box_type=space.gettypefor(boxes.W_ULongBox), - ) self.w_objectdtype = W_Dtype( types.ObjectType(space), - num=NPY.OBJECT, - kind=NPY.OBJECTLTR, - char=NPY.OBJECTLTR, w_box_type=space.gettypefor(boxes.W_ObjectBox), ) aliases = { @@ -929,7 +911,7 @@ self.w_int64dtype, self.w_uint64dtype, ] + float_dtypes + complex_dtypes + [ self.w_stringdtype, self.w_unicodedtype, self.w_voiddtype, - self.w_intpdtype, self.w_uintpdtype, self.w_objectdtype, + self.w_objectdtype, ] self.float_dtypes_by_num_bytes = sorted( (dtype.elsize, dtype) @@ -970,8 +952,7 @@ 'CLONGDOUBLE': self.w_complexlongdtype, #'DATETIME', 'UINT': self.w_uint32dtype, - 'INTP': self.w_intpdtype, - 'UINTP': self.w_uintpdtype, + 'INTP': self.w_longdtype, 'HALF': self.w_float16dtype, 'BYTE': self.w_int8dtype, #'TIMEDELTA', @@ -1001,7 +982,11 @@ space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): itembits = dtype.elsize * 8 - items_w = [space.wrap(dtype.char), + if k in ('INTP', 'UINTP'): + char = getattr(NPY, k + 'LTR') + else: + char = dtype.char + items_w = [space.wrap(char), space.wrap(dtype.num), space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] @@ -1024,3 +1009,26 @@ def get_dtype_cache(space): return space.fromcache(DtypeCache) + +def as_dtype(space, w_arg, allow_None=True): + from pypy.module.micronumpy.ufuncs import find_dtype_for_scalar + # roughly equivalent to CNumPy's PyArray_DescrConverter2 + if not allow_None and space.is_none(w_arg): + raise TypeError("Cannot create dtype from None here") + if isinstance(w_arg, W_NDimArray): + return w_arg.get_dtype() + elif is_scalar_w(space, w_arg): + result = find_dtype_for_scalar(space, w_arg) + assert result is not None # XXX: not guaranteed + return result + else: + return space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_arg)) + +def is_scalar_w(space, w_arg): + return (isinstance(w_arg, boxes.W_GenericBox) or + space.isinstance_w(w_arg, space.w_int) or + space.isinstance_w(w_arg, space.w_float) or + space.isinstance_w(w_arg, space.w_complex) or + space.isinstance_w(w_arg, space.w_long) or + space.isinstance_w(w_arg, space.w_bool)) diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -199,19 +199,3 @@ a.put(23, -1, mode=1) # wrap assert (a == array([0, 1, -10, -1, -15])).all() raises(TypeError, "arange(5).put(22, -5, mode='zzzz')") # unrecognized mode - - def test_result_type(self): - import numpy as np - exc = raises(ValueError, np.result_type) - assert str(exc.value) == "at least one array or dtype is required" - exc = raises(TypeError, np.result_type, a=2) - assert str(exc.value) == "result_type() takes no keyword arguments" - assert np.result_type(True) is np.dtype('bool') - assert np.result_type(1) is np.dtype('int') - assert np.result_type(1.) is np.dtype('float64') - assert np.result_type(1+2j) is np.dtype('complex128') - assert np.result_type(1, 1.) is np.dtype('float64') - assert np.result_type(np.array([1, 2])) is np.dtype('int') - assert np.result_type(np.array([1, 2]), 1, 1+2j) is np.dtype('complex128') - assert np.result_type(np.array([1, 2]), 1, 'float64') is np.dtype('float64') - assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64') diff --git a/pypy/module/micronumpy/test/test_casting.py b/pypy/module/micronumpy/test/test_casting.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_casting.py @@ -0,0 +1,121 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + + +class AppTestNumSupport(BaseNumpyAppTest): + def test_result_type(self): + import numpy as np + exc = raises(ValueError, np.result_type) + assert str(exc.value) == "at least one array or dtype is required" + exc = raises(TypeError, np.result_type, a=2) + assert str(exc.value) == "result_type() takes no keyword arguments" + assert np.result_type(True) is np.dtype('bool') + assert np.result_type(1) is np.dtype('int') + assert np.result_type(1.) is np.dtype('float64') + assert np.result_type(1+2j) is np.dtype('complex128') + assert np.result_type(1, 1.) is np.dtype('float64') + assert np.result_type(np.array([1, 2])) is np.dtype('int') + assert np.result_type(np.array([1, 2]), 1, 1+2j) is np.dtype('complex128') + assert np.result_type(np.array([1, 2]), 1, 'float64') is np.dtype('float64') + assert np.result_type(np.array([1, 2]), 1, None) is np.dtype('float64') + + def test_can_cast(self): + import numpy as np + + assert np.can_cast(np.int32, np.int64) + assert np.can_cast(np.float64, complex) + assert not np.can_cast(np.complex64, float) + + assert np.can_cast('i8', 'f8') + assert not np.can_cast('i8', 'f4') + assert np.can_cast('i4', 'S11') + + assert np.can_cast('i8', 'i8', 'no') + assert not np.can_cast('<i8', '>i8', 'no') + + assert np.can_cast('<i8', '>i8', 'equiv') + assert not np.can_cast('<i4', '>i8', 'equiv') + + assert np.can_cast('<i4', '>i8', 'safe') + assert not np.can_cast('<i8', '>i4', 'safe') + + assert np.can_cast('<i8', '>i4', 'same_kind') + assert not np.can_cast('<i8', '>u4', 'same_kind') + + assert np.can_cast('<i8', '>u4', 'unsafe') + + assert np.can_cast('bool', 'S5') + assert not np.can_cast('bool', 'S4') + + assert np.can_cast('b', 'S4') + assert not np.can_cast('b', 'S3') + + assert np.can_cast('u1', 'S3') + assert not np.can_cast('u1', 'S2') + assert np.can_cast('u2', 'S5') + assert not np.can_cast('u2', 'S4') + assert np.can_cast('u4', 'S10') + assert not np.can_cast('u4', 'S9') + assert np.can_cast('u8', 'S20') + assert not np.can_cast('u8', 'S19') + + assert np.can_cast('i1', 'S4') + assert not np.can_cast('i1', 'S3') + assert np.can_cast('i2', 'S6') + assert not np.can_cast('i2', 'S5') + assert np.can_cast('i4', 'S11') + assert not np.can_cast('i4', 'S10') + assert np.can_cast('i8', 'S21') + assert not np.can_cast('i8', 'S20') + + assert np.can_cast('bool', 'S5') + assert not np.can_cast('bool', 'S4') + + assert np.can_cast('b', 'U4') + assert not np.can_cast('b', 'U3') + + assert np.can_cast('u1', 'U3') + assert not np.can_cast('u1', 'U2') + assert np.can_cast('u2', 'U5') + assert not np.can_cast('u2', 'U4') + assert np.can_cast('u4', 'U10') + assert not np.can_cast('u4', 'U9') + assert np.can_cast('u8', 'U20') + assert not np.can_cast('u8', 'U19') + + assert np.can_cast('i1', 'U4') + assert not np.can_cast('i1', 'U3') + assert np.can_cast('i2', 'U6') + assert not np.can_cast('i2', 'U5') + assert np.can_cast('i4', 'U11') + assert not np.can_cast('i4', 'U10') + assert np.can_cast('i8', 'U21') + assert not np.can_cast('i8', 'U20') + + raises(TypeError, np.can_cast, 'i4', None) + raises(TypeError, np.can_cast, None, 'i4') + + def test_can_cast_scalar(self): + import numpy as np + assert np.can_cast(True, np.bool_) + assert np.can_cast(True, np.int8) + assert not np.can_cast(0, np.bool_) + assert np.can_cast(127, np.int8) + assert not np.can_cast(128, np.int8) + assert np.can_cast(128, np.int16) + + assert np.can_cast(np.float32('inf'), np.float32) + assert np.can_cast(float('inf'), np.float32) # XXX: False in CNumPy?! + assert np.can_cast(3.3e38, np.float32) + assert not np.can_cast(3.4e38, np.float32) + + assert np.can_cast(1 + 2j, np.complex64) + assert not np.can_cast(1 + 1e50j, np.complex64) + assert np.can_cast(1., np.complex64) + assert not np.can_cast(1e50, np.complex64) + + def test_min_scalar_type(self): + import numpy as np + assert np.min_scalar_type(2**8 - 1) == np.dtype('uint8') + assert np.min_scalar_type(2**64 - 1) == np.dtype('uint64') + # XXX: np.asarray(2**64) fails with OverflowError + # assert np.min_scalar_type(2**64) == np.dtype('O') diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -112,6 +112,11 @@ raises(TypeError, lambda: dtype("int8") == 3) assert dtype(bool) == bool + def test_dtype_cmp(self): + from numpy import dtype + assert dtype('int8') <= dtype('int8') + assert not (dtype('int8') < dtype('int8')) + def test_dtype_aliases(self): from numpy import dtype assert dtype('bool8') is dtype('bool') @@ -1287,7 +1292,7 @@ from cPickle import loads, dumps d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) - assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '|', None, + assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '|', None, ('x', 'y', 'z', 'value'), {'y': (dtype('int32'), 4), 'x': (dtype('int32'), 0), 'z': (dtype('int32'), 8), 'value': (dtype('float64'), 12), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1818,7 +1818,7 @@ s[...] = 2 v = s.view(x.__class__) assert (v == 2).all() - + def test_tolist_scalar(self): from numpy import dtype int32 = dtype('int32').type diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,5 +1,6 @@ import functools import math +from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError, oefmt from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format @@ -22,6 +23,7 @@ from pypy.module.micronumpy import boxes from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage, V_OBJECTSTORE from pypy.module.micronumpy.strides import calc_strides +from . import constants as NPY degToRad = math.pi / 180.0 log2 = math.log(2) @@ -128,6 +130,14 @@ else: return alloc_raw_storage(size, track_allocation=False, zero=False) + @classmethod + def basesize(cls): + return rffi.sizeof(cls.T) + + def can_cast_to(self, other): + # equivalent to PyArray_CanCastSafely + return casting_table[self.num][other.num] + class Primitive(object): _mixin_ = True @@ -316,6 +326,9 @@ class Bool(BaseType, Primitive): T = lltype.Bool + num = NPY.BOOL + kind = NPY.GENBOOLLTR + char = NPY.BOOLLTR BoxType = boxes.W_BoolBox format_code = "?" @@ -408,6 +421,7 @@ _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit