Author: Armin Rigo <ar...@tunes.org> Branch: py3k Changeset: r87265:5d068bf3a13d Date: 2016-09-21 14:20 +0200 http://bitbucket.org/pypy/pypy/changeset/5d068bf3a13d/
Log: hg merge default diff too long, truncating to 2000 out of 2019 lines diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py b/lib-python/2.7/distutils/sysconfig_pypy.py --- a/lib-python/2.7/distutils/sysconfig_pypy.py +++ b/lib-python/2.7/distutils/sysconfig_pypy.py @@ -13,6 +13,7 @@ import sys import os import shlex +import imp from distutils.errors import DistutilsPlatformError @@ -62,8 +63,7 @@ """Initialize the module as appropriate for POSIX systems.""" g = {} g['EXE'] = "" - g['SO'] = ".so" - g['SOABI'] = g['SO'].rsplit('.')[0] + g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] g['LIBDIR'] = os.path.join(sys.prefix, 'lib') g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check @@ -75,8 +75,7 @@ """Initialize the module as appropriate for NT""" g = {} g['EXE'] = ".exe" - g['SO'] = ".pyd" - g['SOABI'] = g['SO'].rsplit('.')[0] + g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0] global _config_vars _config_vars = g diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py --- a/lib-python/2.7/sysconfig.py +++ b/lib-python/2.7/sysconfig.py @@ -529,7 +529,7 @@ for suffix, mode, type_ in imp.get_suffixes(): if type_ == imp.C_EXTENSION: _CONFIG_VARS['SOABI'] = suffix.split('.')[1] - break + break if args: vals = [] diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.8.2 +Version: 1.8.4 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.8.2" -__version_info__ = (1, 8, 2) +__version__ = "1.8.4" +__version_info__ = (1, 8, 4) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h --- a/lib_pypy/cffi/_embedding.h +++ b/lib_pypy/cffi/_embedding.h @@ -233,7 +233,7 @@ f = PySys_GetObject((char *)"stderr"); if (f != NULL && f != Py_None) { PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME - "\ncompiled with cffi version: 1.8.2" + "\ncompiled with cffi version: 1.8.4" "\n_cffi_backend module: ", f); modules = PyImport_GetModuleDict(); mod = PyDict_GetItemString(modules, "_cffi_backend"); diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -332,7 +332,7 @@ realtype = model.unknown_ptr_type(decl.name) else: realtype, quals = self._get_type_and_quals( - decl.type, name=decl.name) + decl.type, name=decl.name, partial_length_ok=True) self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) @@ -781,11 +781,14 @@ exprnode.name in self._int_constants): return self._int_constants[exprnode.name] # - if partial_length_ok: - if (isinstance(exprnode, pycparser.c_ast.ID) and + if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): + if partial_length_ok: self._partial_length = True return '...' + raise api.FFIError(":%d: unsupported '[...]' here, cannot derive " + "the actual array length in this context" + % exprnode.coord.line) # raise api.FFIError(":%d: unsupported expression: expected a " "simple numeric constant" % exprnode.coord.line) diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -587,8 +587,11 @@ # ---------- # typedefs + def _typedef_type(self, tp, name): + return self._global_type(tp, "(*(%s *)0)" % (name,)) + def _generate_cpy_typedef_collecttype(self, tp, name): - self._do_collect_type(tp) + self._do_collect_type(self._typedef_type(tp, name)) def _generate_cpy_typedef_decl(self, tp, name): pass @@ -598,6 +601,7 @@ self._lsts["typename"].append(TypenameExpr(name, type_index)) def _generate_cpy_typedef_ctx(self, tp, name): + tp = self._typedef_type(tp, name) self._typedef_ctx(tp, name) if getattr(tp, "origin", None) == "unknown_type": self._struct_ctx(tp, tp.name, approxname=None) diff --git a/pypy/doc/config/translation.profopt.txt b/pypy/doc/config/translation.profopt.txt --- a/pypy/doc/config/translation.profopt.txt +++ b/pypy/doc/config/translation.profopt.txt @@ -3,3 +3,14 @@ RPython program) to gather profile data. Example for pypy-c: "-c 'from richards import main;main(); from test import pystone; pystone.main()'" + +NOTE: be aware of what this does in JIT-enabled executables. What it +does is instrument and later optimize the C code that happens to run in +the example you specify, ignoring any execution of the JIT-generated +assembler. That means that you have to choose the example wisely. If +it is something that will just generate assembler and stay there, there +is little value. If it is something that exercises heavily library +routines that are anyway written in C, then it will optimize that. Most +interesting would be something that causes a lot of JIT-compilation, +like running a medium-sized test suite several times in a row, in order +to optimize the warm-up in general. diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -449,6 +449,27 @@ support (see ``multiline_input()``). On the other hand, ``parse_and_bind()`` calls are ignored (issue `#2072`_). +* ``sys.getsizeof()`` always raises ``TypeError``. This is because a + memory profiler using this function is most likely to give results + inconsistent with reality on PyPy. It would be possible to have + ``sys.getsizeof()`` return a number (with enough work), but that may + or may not represent how much memory the object uses. It doesn't even + make really sense to ask how much *one* object uses, in isolation with + the rest of the system. For example, instances have maps, which are + often shared across many instances; in this case the maps would + probably be ignored by an implementation of ``sys.getsizeof()``, but + their overhead is important in some cases if they are many instances + with unique maps. Conversely, equal strings may share their internal + string data even if they are different objects---or empty containers + may share parts of their internals as long as they are empty. Even + stranger, some lists create objects as you read them; if you try to + estimate the size in memory of ``range(10**6)`` as the sum of all + items' size, that operation will by itself create one million integer + objects that never existed in the first place. Note that some of + these concerns also exist on CPython, just less so. For this reason + we explicitly don't implement ``sys.getsizeof()``. + + .. _`is ignored in PyPy`: http://bugs.python.org/issue14621 .. _`little point`: http://events.ccc.de/congress/2012/Fahrplan/events/5152.en.html .. _`#2072`: https://bitbucket.org/pypy/pypy/issue/2072/ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -16,3 +16,8 @@ Improve merging of virtual states in the JIT in order to avoid jumping to the preamble. Accomplished by allocating virtual objects where non-virtuals are expected. + +.. branch: conditional_call_value_3 +JIT residual calls: if the called function starts with a fast-path +like "if x.foo != 0: return x.foo", then inline the check before +doing the CALL. For now, string hashing is about the only case. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -246,6 +246,10 @@ raise Exception("Cannot use the --output option with PyPy " "when --shared is on (it is by default). " "See issue #1971.") + if config.translation.profopt is not None: + raise Exception("Cannot use the --profopt option " + "when --shared is on (it is by default). " + "See issue #2398.") if sys.platform == 'win32': libdir = thisdir.join('..', '..', 'libs') libdir.ensure(dir=1) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1952,11 +1952,9 @@ 'NotImplementedError', 'OSError', 'OverflowError', - 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning', 'RuntimeError', - 'RuntimeWarning', 'StopIteration', 'SyntaxError', 'SyntaxWarning', @@ -1970,10 +1968,12 @@ 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', - 'UserWarning', 'ValueError', 'Warning', 'ZeroDivisionError' + 'RuntimeWarning', + 'PendingDeprecationWarning', + 'UserWarning', ] if sys.platform.startswith("win"): diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -63,7 +63,7 @@ """x.__iter__() <==> iter(x)""" return self.space.wrap(self) - def descr_send(self, w_arg=None): + def descr_send(self, w_arg): """send(arg) -> send 'arg' into generator, return next yielded value or raise StopIteration.""" return self.send_ex(w_arg) diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -264,25 +264,22 @@ try: executioncontext.call_trace(self) # - if operr is not None: - ec = self.space.getexecutioncontext() - next_instr = self.handle_operation_error(ec, operr) - self.last_instr = intmask(next_instr - 1) - else: - # Execution starts just after the last_instr. Initially, - # last_instr is -1. After a generator suspends it points to - # the YIELD_VALUE instruction. - next_instr = r_uint(self.last_instr + 1) - if next_instr != 0: - self.pushvalue(w_inputvalue) - # try: + if operr is not None: + ec = self.space.getexecutioncontext() + next_instr = self.handle_operation_error(ec, operr) + self.last_instr = intmask(next_instr - 1) + else: + # Execution starts just after the last_instr. Initially, + # last_instr is -1. After a generator suspends it points to + # the YIELD_VALUE instruction. + next_instr = r_uint(self.last_instr + 1) + if next_instr != 0: + self.pushvalue(w_inputvalue) w_exitvalue = self.dispatch(self.pycode, next_instr, executioncontext) - except Exception: - executioncontext.return_trace(self, self.space.w_None) - raise - executioncontext.return_trace(self, w_exitvalue) + finally: + executioncontext.return_trace(self, w_exitvalue) # it used to say self.last_exception = None # this is now done by the code in pypyjit module # since we don't want to invalidate the virtualizable diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py --- a/pypy/interpreter/test/test_generator.py +++ b/pypy/interpreter/test/test_generator.py @@ -57,12 +57,14 @@ def f(): yield 2 g = f() + # two arguments version raises(NameError, g.throw, NameError, "Error") def test_throw2(self): def f(): yield 2 g = f() + # single argument version raises(NameError, g.throw, NameError("Error")) def test_throw3(self): @@ -241,7 +243,8 @@ def f(): yield 1 g = f() - raises(TypeError, g.send, 1) + raises(TypeError, g.send) # one argument required + raises(TypeError, g.send, 1) # not started, must send None def test_generator_explicit_stopiteration(self): def f(): diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py --- a/pypy/interpreter/test/test_pyframe.py +++ b/pypy/interpreter/test/test_pyframe.py @@ -545,3 +545,21 @@ it = yield_raise() assert next(it) is KeyError assert next(it) is KeyError + + def test_throw_trace_bug(self): + import sys + def f(): + yield 5 + gen = f() + assert next(gen) == 5 + seen = [] + def trace_func(frame, event, *args): + seen.append(event) + return trace_func + sys.settrace(trace_func) + try: + gen.throw(ValueError) + except ValueError: + pass + sys.settrace(None) + assert seen == ['call', 'exception', 'return'] diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -3,7 +3,7 @@ from rpython.rlib import rdynload, clibffi, entrypoint from rpython.rtyper.lltypesystem import rffi -VERSION = "1.8.2" +VERSION = "1.8.4" FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI try: diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -11,7 +11,7 @@ from rpython.rlib.rarithmetic import ovfcheck from pypy.module._cffi_backend import cdataobj -from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray +from pypy.module._cffi_backend.ctypeptr import W_CTypePtrOrArray, W_CTypePointer from pypy.module._cffi_backend import ctypeprim @@ -22,6 +22,7 @@ is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): + assert isinstance(ctptr, W_CTypePointer) W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, ctptr.ctitem) self.length = length diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -35,8 +35,7 @@ assert isinstance(ellipsis, bool) extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, - could_cast_anything=False) + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult) self.fargs = fargs self.ellipsis = ellipsis self.abi = abi @@ -59,6 +58,16 @@ lltype.free(self.cif_descr, flavor='raw') self.cif_descr = lltype.nullptr(CIF_DESCRIPTION) + def is_unichar_ptr_or_array(self): + return False + + def is_char_or_unichar_ptr_or_array(self): + return False + + def string(self, cdataobj, maxlen): + # Can't use ffi.string() on a function pointer + return W_CType.string(self, cdataobj, maxlen) + def new_ctypefunc_completing_argtypes(self, args_w): space = self.space nargs_declared = len(self.fargs) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -19,7 +19,6 @@ # XXX this could be improved with an elidable method get_size() # that raises in case it's still -1... - cast_anything = False is_primitive_integer = False is_nonfunc_pointer_or_array = False is_indirect_arg_for_call_python = False diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -120,7 +120,6 @@ class W_CTypePrimitiveChar(W_CTypePrimitiveCharOrUniChar): _attrs_ = [] - cast_anything = True def cast_to_int(self, cdata): return self.space.wrap(ord(cdata[0])) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -16,12 +16,11 @@ class W_CTypePtrOrArray(W_CType): - _attrs_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length'] - _immutable_fields_ = ['ctitem', 'can_cast_anything', 'accept_str', 'length'] + _attrs_ = ['ctitem', 'accept_str', 'length'] + _immutable_fields_ = ['ctitem', 'accept_str', 'length'] length = -1 - def __init__(self, space, size, extra, extra_position, ctitem, - could_cast_anything=True): + def __init__(self, space, size, extra, extra_position, ctitem): name, name_position = ctitem.insert_name(extra, extra_position) W_CType.__init__(self, space, size, name, name_position) # this is the "underlying type": @@ -29,10 +28,11 @@ # - for arrays, it is the array item type # - for functions, it is the return type self.ctitem = ctitem - self.can_cast_anything = could_cast_anything and ctitem.cast_anything - self.accept_str = (self.can_cast_anything or - (ctitem.is_primitive_integer and - ctitem.size == rffi.sizeof(lltype.Char))) + self.accept_str = (self.is_nonfunc_pointer_or_array and + (isinstance(ctitem, ctypevoid.W_CTypeVoid) or + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar) or + (ctitem.is_primitive_integer and + ctitem.size == rffi.sizeof(lltype.Char)))) def is_unichar_ptr_or_array(self): return isinstance(self.ctitem, ctypeprim.W_CTypePrimitiveUniChar) @@ -139,7 +139,10 @@ class W_CTypePtrBase(W_CTypePtrOrArray): # base class for both pointers and pointers-to-functions - _attrs_ = [] + _attrs_ = ['is_void_ptr', 'is_voidchar_ptr'] + _immutable_fields_ = ['is_void_ptr', 'is_voidchar_ptr'] + is_void_ptr = False + is_voidchar_ptr = False def convert_to_object(self, cdata): ptrdata = rffi.cast(rffi.CCHARPP, cdata)[0] @@ -156,7 +159,16 @@ else: raise self._convert_error("compatible pointer", w_ob) if self is not other: - if not (self.can_cast_anything or other.can_cast_anything): + if self.is_void_ptr or other.is_void_ptr: + pass # cast from or to 'void *' + elif self.is_voidchar_ptr or other.is_voidchar_ptr: + space = self.space + msg = ("implicit cast from '%s' to '%s' " + "will be forbidden in the future (check that the types " + "are as you expect; use an explicit ffi.cast() if they " + "are correct)" % (other.name, self.name)) + space.warn(space.wrap(msg), space.w_UserWarning, stacklevel=1) + else: raise self._convert_error("compatible pointer", w_ob) rffi.cast(rffi.CCHARPP, cdata)[0] = w_ob.unsafe_escaping_ptr() @@ -167,8 +179,8 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] - _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', '_array_types'] + _immutable_fields_ = ['is_file', 'cache_array_type?'] kind = "pointer" cache_array_type = None is_nonfunc_pointer_or_array = True @@ -183,6 +195,8 @@ self.is_file = (ctitem.name == "struct _IO_FILE" or ctitem.name == "FILE") self.is_void_ptr = isinstance(ctitem, ctypevoid.W_CTypeVoid) + self.is_voidchar_ptr = (self.is_void_ptr or + isinstance(ctitem, ctypeprim.W_CTypePrimitiveChar)) W_CTypePtrBase.__init__(self, space, size, extra, 2, ctitem) def newp(self, w_init, allocator): diff --git a/pypy/module/_cffi_backend/ctypevoid.py b/pypy/module/_cffi_backend/ctypevoid.py --- a/pypy/module/_cffi_backend/ctypevoid.py +++ b/pypy/module/_cffi_backend/ctypevoid.py @@ -7,7 +7,6 @@ class W_CTypeVoid(W_CType): _attrs_ = [] - cast_anything = True kind = "void" def __init__(self, space): diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -32,8 +32,8 @@ @unwrap_spec(w_cdata=cdataobj.W_CData) def from_handle(space, w_cdata): ctype = w_cdata.ctype - if (not isinstance(ctype, ctypeptr.W_CTypePtrOrArray) or - not ctype.can_cast_anything): + if (not isinstance(ctype, ctypeptr.W_CTypePointer) or + not ctype.is_voidchar_ptr): raise oefmt(space.w_TypeError, "expected a 'cdata' object with a 'void *' out of " "new_handle(), got '%s'", ctype.name) diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1,7 +1,7 @@ # ____________________________________________________________ import sys -assert __version__ == "1.8.2", ("This test_c.py file is for testing a version" +assert __version__ == "1.8.4", ("This test_c.py file is for testing a version" " of cffi that differs from the one that we" " get from 'import _cffi_backend'") if sys.version_info < (3,): @@ -3665,3 +3665,27 @@ check_dir(pp, []) check_dir(pp[0], ['a1', 'a2']) check_dir(pp[0][0], ['a1', 'a2']) + +def test_char_pointer_conversion(): + import warnings + assert __version__.startswith(("1.8", "1.9")), ( + "consider turning the warning into an error") + BCharP = new_pointer_type(new_primitive_type("char")) + BIntP = new_pointer_type(new_primitive_type("int")) + BVoidP = new_pointer_type(new_void_type()) + z1 = cast(BCharP, 0) + z2 = cast(BIntP, 0) + z3 = cast(BVoidP, 0) + with warnings.catch_warnings(record=True) as w: + newp(new_pointer_type(BIntP), z1) # warn + assert len(w) == 1 + newp(new_pointer_type(BVoidP), z1) # fine + assert len(w) == 1 + newp(new_pointer_type(BCharP), z2) # warn + assert len(w) == 2 + newp(new_pointer_type(BVoidP), z2) # fine + assert len(w) == 2 + newp(new_pointer_type(BCharP), z3) # fine + assert len(w) == 2 + newp(new_pointer_type(BIntP), z3) # fine + assert len(w) == 2 diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py b/pypy/module/_cffi_backend/test/test_ffi_obj.py --- a/pypy/module/_cffi_backend/test/test_ffi_obj.py +++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py @@ -503,3 +503,15 @@ assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" p = ffi.new("int[]", [-123456789]) assert ffi.unpack(p, 1) == [-123456789] + + def test_bug_1(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + q = ffi.new("char[]", "abcd") + p = ffi.cast("char(*)(void)", q) + raises(TypeError, ffi.string, p) + + def test_negative_array_size(self): + import _cffi_backend as _cffi1_backend + ffi = _cffi1_backend.FFI() + raises(ffi.error, ffi.cast, "int[-5]", 0) diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py b/pypy/module/_cffi_backend/test/test_recompiler.py --- a/pypy/module/_cffi_backend/test/test_recompiler.py +++ b/pypy/module/_cffi_backend/test/test_recompiler.py @@ -8,7 +8,7 @@ @unwrap_spec(cdef=str, module_name=str, source=str) def prepare(space, cdef, module_name, source, w_includes=None, - w_extra_source=None): + w_extra_source=None, w_min_version=None): try: import cffi from cffi import FFI # <== the system one, which @@ -16,8 +16,13 @@ from cffi import ffiplatform except ImportError: py.test.skip("system cffi module not found or older than 1.0.0") - if cffi.__version_info__ < (1, 4, 0): - py.test.skip("system cffi module needs to be at least 1.4.0") + if w_min_version is None: + min_version = (1, 4, 0) + else: + min_version = tuple(space.unwrap(w_min_version)) + if cffi.__version_info__ < min_version: + py.test.skip("system cffi module needs to be at least %s, got %s" % ( + min_version, cffi.__version_info__)) space.appexec([], """(): import _cffi_backend # force it to be initialized """) @@ -1790,3 +1795,28 @@ "void f(void) { }") assert lib.f.__get__(42) is lib.f assert lib.f.__get__(42, int) is lib.f + + def test_typedef_array_dotdotdot(self): + ffi, lib = self.prepare(""" + typedef int foo_t[...], bar_t[...]; + int gv[...]; + typedef int mat_t[...][...]; + typedef int vmat_t[][...]; + """, + "test_typedef_array_dotdotdot", """ + typedef int foo_t[50], bar_t[50]; + int gv[23]; + typedef int mat_t[6][7]; + typedef int vmat_t[][8]; + """, min_version=(1, 8, 4)) + assert ffi.sizeof("foo_t") == 50 * ffi.sizeof("int") + assert ffi.sizeof("bar_t") == 50 * ffi.sizeof("int") + assert len(ffi.new("foo_t")) == 50 + assert len(ffi.new("bar_t")) == 50 + assert ffi.sizeof(lib.gv) == 23 * ffi.sizeof("int") + assert ffi.sizeof("mat_t") == 6 * 7 * ffi.sizeof("int") + assert len(ffi.new("mat_t")) == 6 + assert len(ffi.new("mat_t")[3]) == 7 + raises(ffi.error, ffi.sizeof, "vmat_t") + p = ffi.new("vmat_t", 4) + assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int") diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py --- a/pypy/module/cpyext/longobject.py +++ b/pypy/module/cpyext/longobject.py @@ -5,7 +5,6 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.interpreter.error import OperationError from rpython.rlib.rbigint import rbigint -from rpython.rlib.rarithmetic import intmask PyLong_Check, PyLong_CheckExact = build_type_checkers("Long", "w_int") @@ -27,25 +26,25 @@ """Return a new PyLongObject object from a C size_t, or NULL on failure. """ - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([rffi.LONGLONG], PyObject) def PyLong_FromLongLong(space, val): """Return a new PyLongObject object from a C long long, or NULL on failure.""" - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([rffi.ULONG], PyObject) def PyLong_FromUnsignedLong(space, val): """Return a new PyLongObject object from a C unsigned long, or NULL on failure.""" - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([rffi.ULONGLONG], PyObject) def PyLong_FromUnsignedLongLong(space, val): """Return a new PyLongObject object from a C unsigned long long, or NULL on failure.""" - return space.wrap(val) + return space.newlong_from_rarith_int(val) @cpython_api([PyObject], rffi.ULONG, error=-1) def PyLong_AsUnsignedLong(space, w_long): @@ -212,7 +211,10 @@ can be retrieved from the resulting value using PyLong_AsVoidPtr(). If the integer is larger than LONG_MAX, a positive long integer is returned.""" - return space.wrap(rffi.cast(ADDR, p)) + value = rffi.cast(ADDR, p) # signed integer + if value < 0: + return space.newlong_from_rarith_int(rffi.cast(lltype.Unsigned, p)) + return space.wrap(value) @cpython_api([PyObject], rffi.VOIDP, error=lltype.nullptr(rffi.VOIDP.TO)) def PyLong_AsVoidPtr(space, w_long): diff --git a/pypy/module/cpyext/pytraceback.py b/pypy/module/cpyext/pytraceback.py --- a/pypy/module/cpyext/pytraceback.py +++ b/pypy/module/cpyext/pytraceback.py @@ -5,7 +5,6 @@ from pypy.module.cpyext.pyobject import ( PyObject, make_ref, from_ref, Py_DecRef, make_typedescr) from pypy.module.cpyext.frameobject import PyFrameObject -from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.pytraceback import PyTraceback from pypy.interpreter import pycode diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py --- a/pypy/module/cpyext/test/test_longobject.py +++ b/pypy/module/cpyext/test/test_longobject.py @@ -1,5 +1,6 @@ import sys, py from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.rarithmetic import maxint from pypy.objspace.std.longobject import W_LongObject from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase @@ -7,18 +8,20 @@ class TestLongObject(BaseApiTest): def test_FromLong(self, space, api): - value = api.PyLong_FromLong(3) - assert isinstance(value, W_LongObject) - assert space.unwrap(value) == 3 + w_value = api.PyLong_FromLong(3) + assert isinstance(w_value, W_LongObject) + assert space.unwrap(w_value) == 3 - value = api.PyLong_FromLong(sys.maxint) - assert isinstance(value, W_LongObject) - assert space.unwrap(value) == sys.maxint + w_value = api.PyLong_FromLong(sys.maxint) + assert isinstance(w_value, W_LongObject) + assert space.unwrap(w_value) == sys.maxint def test_aslong(self, space, api): w_value = api.PyLong_FromLong((sys.maxint - 1) / 2) + assert isinstance(w_value, W_LongObject) w_value = space.mul(w_value, space.wrap(2)) + assert isinstance(w_value, W_LongObject) value = api.PyLong_AsLong(w_value) assert value == (sys.maxint - 1) @@ -34,12 +37,16 @@ def test_as_ssize_t(self, space, api): w_value = space.newlong(2) + assert isinstance(w_value, W_LongObject) value = api.PyLong_AsSsize_t(w_value) assert value == 2 - assert space.eq_w(w_value, api.PyLong_FromSsize_t(2)) + w_val2 = api.PyLong_FromSsize_t(2) + assert isinstance(w_val2, W_LongObject) + assert space.eq_w(w_value, w_val2) def test_fromdouble(self, space, api): w_value = api.PyLong_FromDouble(-12.74) + assert isinstance(w_value, W_LongObject) assert space.unwrap(w_value) == -12 assert api.PyLong_AsDouble(w_value) == -12 @@ -101,9 +108,26 @@ lltype.free(overflow, flavor='raw') def test_as_voidptr(self, space, api): + # CPython returns an int (not a long) depending on the value + # passed to PyLong_FromVoidPtr(). In all cases, NULL becomes + # the int 0. w_l = api.PyLong_FromVoidPtr(lltype.nullptr(rffi.VOIDP.TO)) - assert space.unwrap(w_l) == 0L + assert space.is_w(space.type(w_l), space.w_int) + assert space.unwrap(w_l) == 0 assert api.PyLong_AsVoidPtr(w_l) == lltype.nullptr(rffi.VOIDP.TO) + # Positive values also return an int (assuming, like always in + # PyPy, that an int is big enough to store any pointer). + p = rffi.cast(rffi.VOIDP, maxint) + w_l = api.PyLong_FromVoidPtr(p) + assert space.is_w(space.type(w_l), space.w_int) + assert space.unwrap(w_l) == maxint + assert api.PyLong_AsVoidPtr(w_l) == p + # Negative values always return a long. + p = rffi.cast(rffi.VOIDP, -maxint-1) + w_l = api.PyLong_FromVoidPtr(p) + assert space.is_w(space.type(w_l), space.w_long) + assert space.unwrap(w_l) == maxint+1 + assert api.PyLong_AsVoidPtr(w_l) == p def test_sign_and_bits(self, space, api): if space.is_true(space.lt(space.sys.get('version_info'), @@ -133,23 +157,58 @@ module = self.import_extension('foo', [ ("from_unsignedlong", "METH_NOARGS", """ - return PyLong_FromUnsignedLong((unsigned long)-1); + PyObject * obj; + obj = PyLong_FromUnsignedLong((unsigned long)-1); + if (obj->ob_type != &PyLong_Type) + { + Py_DECREF(obj); + PyErr_SetString(PyExc_ValueError, + "PyLong_FromLongLong did not return PyLongObject"); + return NULL; + } + return obj; """)]) import sys assert module.from_unsignedlong() == 2 * sys.maxsize + 1 def test_fromlonglong(self): module = self.import_extension('foo', [ - ("from_longlong", "METH_NOARGS", + ("from_longlong", "METH_VARARGS", """ - return PyLong_FromLongLong((long long)-1); + int val; + PyObject * obj; + if (!PyArg_ParseTuple(args, "i", &val)) + return NULL; + obj = PyLong_FromLongLong((long long)val); + if (obj->ob_type != &PyLong_Type) + { + Py_DECREF(obj); + PyErr_SetString(PyExc_ValueError, + "PyLong_FromLongLong did not return PyLongObject"); + return NULL; + } + return obj; """), - ("from_unsignedlonglong", "METH_NOARGS", + ("from_unsignedlonglong", "METH_VARARGS", """ - return PyLong_FromUnsignedLongLong((unsigned long long)-1); + int val; + PyObject * obj; + if (!PyArg_ParseTuple(args, "i", &val)) + return NULL; + obj = PyLong_FromUnsignedLongLong((long long)val); + if (obj->ob_type != &PyLong_Type) + { + Py_DECREF(obj); + PyErr_SetString(PyExc_ValueError, + "PyLong_FromLongLong did not return PyLongObject"); + return NULL; + } + return obj; """)]) - assert module.from_longlong() == -1 - assert module.from_unsignedlonglong() == (1<<64) - 1 + assert module.from_longlong(-1) == -1 + assert module.from_longlong(0) == 0 + assert module.from_unsignedlonglong(0) == 0 + assert module.from_unsignedlonglong(-1) == (1<<64) - 1 def test_from_size_t(self): module = self.import_extension('foo', [ @@ -237,10 +296,15 @@ ("has_sub", "METH_NOARGS", """ PyObject *ret, *obj = PyLong_FromLong(42); - if (obj->ob_type->tp_as_number->nb_subtract) - ret = obj->ob_type->tp_as_number->nb_subtract(obj, obj); + if (obj->ob_type != &PyLong_Type) + ret = PyLong_FromLong(-2); else - ret = PyLong_FromLong(-1); + { + if (obj->ob_type->tp_as_number->nb_subtract) + ret = obj->ob_type->tp_as_number->nb_subtract(obj, obj); + else + ret = PyLong_FromLong(-1); + } Py_DECREF(obj); return ret; """), diff --git a/pypy/module/posix/app_posix.py b/pypy/module/posix/app_posix.py --- a/pypy/module/posix/app_posix.py +++ b/pypy/module/posix/app_posix.py @@ -64,6 +64,18 @@ if self.st_ctime is None: self.__dict__['st_ctime'] = self[9] + @property + def st_atime_ns(self): + return int(self[7]) * 1000000000 + self.nsec_atime + + @property + def st_mtime_ns(self): + return int(self[8]) * 1000000000 + self.nsec_mtime + + @property + def st_ctime_ns(self): + return int(self[9]) * 1000000000 + self.nsec_ctime + class statvfs_result(metaclass=structseqtype): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -359,56 +359,43 @@ STAT_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS)) -N_INDEXABLE_FIELDS = 10 - -def _time_ns_from_float(ftime): - "Convert a floating-point time (in seconds) into a (s, ns) pair of ints" - fracpart, intpart = modf(ftime) - if fracpart < 0: - fracpart += 1. - intpart -= 1. - return int(intpart), int(fracpart * 1e9) - -@specialize.arg(4) -def _fill_time(space, lst, index, w_keywords, attrname, ftime): - stat_float_times = space.fromcache(StatState).stat_float_times - seconds, fractional_ns = _time_ns_from_float(ftime) - lst[index] = space.wrap(seconds) - if stat_float_times: - space.setitem(w_keywords, space.wrap(attrname), space.wrap(ftime)) - else: - space.setitem(w_keywords, space.wrap(attrname), space.wrap(seconds)) - w_billion = space.wrap(1000000000) - w_total_ns = space.add(space.mul(space.wrap(seconds), w_billion), - space.wrap(fractional_ns)) - space.setitem(w_keywords, space.wrap(attrname + '_ns'), w_total_ns) - -STANDARD_FIELDS = unrolling_iterable(enumerate(rposix_stat.STAT_FIELDS[:7])) -EXTRA_FIELDS = unrolling_iterable(rposix_stat.STAT_FIELDS[10:]) +STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_stat_result(space, st): - lst = [None] * N_INDEXABLE_FIELDS + FIELDS = STAT_FIELDS # also when not translating at all + lst = [None] * rposix_stat.N_INDEXABLE_FIELDS w_keywords = space.newdict() - for (i, (name, TYPE)) in STANDARD_FIELDS: - value = getattr(st, name) - w_value = space.wrap(value) - lst[i] = w_value + stat_float_times = space.fromcache(StatState).stat_float_times + for i, (name, TYPE) in FIELDS: + if i < rposix_stat.N_INDEXABLE_FIELDS: + # get the first 10 items by indexing; this gives us + # 'st_Xtime' as an integer, too + w_value = space.wrap(st[i]) + lst[i] = w_value + else: + w_value = space.wrap(getattr(st, name)) + space.setitem(w_keywords, space.wrap(name), w_value) - _fill_time(space, lst, 7, w_keywords, 'st_atime', st.st_atime) - _fill_time(space, lst, 8, w_keywords, 'st_mtime', st.st_mtime) - _fill_time(space, lst, 9, w_keywords, 'st_ctime', st.st_ctime) + # Note: 'w_keywords' contains the three attributes 'nsec_Xtime'. + # We have an app-level property in app_posix.stat_result to + # compute the full 'st_Xtime_ns' value. - for name, TYPE in EXTRA_FIELDS: - value = getattr(st, name) - w_value = space.wrap(value) - space.setitem(w_keywords, space.wrap(name), w_value) + # non-rounded values for name-based access + if stat_float_times: + space.setitem(w_keywords, + space.wrap('st_atime'), space.wrap(st.st_atime)) + space.setitem(w_keywords, + space.wrap('st_mtime'), space.wrap(st.st_mtime)) + space.setitem(w_keywords, + space.wrap('st_ctime'), space.wrap(st.st_ctime)) + #else: + # filled by the __init__ method w_tuple = space.newtuple(lst) w_stat_result = space.getattr(space.getbuiltinmodule(os.name), space.wrap('stat_result')) return space.call_function(w_stat_result, w_tuple, w_keywords) -STATVFS_FIELDS = unrolling_iterable(enumerate(rposix_stat.STATVFS_FIELDS)) def build_statvfs_result(space, st): vals_w = [None] * len(rposix_stat.STATVFS_FIELDS) diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -125,9 +125,9 @@ assert st[4] == st.st_uid assert st[5] == st.st_gid assert st[6] == st.st_size - assert st[7] == int(st.st_atime) - assert st[8] == int(st.st_mtime) - assert st[9] == int(st.st_ctime) + assert st[7] == int(st.st_atime) # in complete corner cases, rounding + assert st[8] == int(st.st_mtime) # here could maybe get the wrong + assert st[9] == int(st.st_ctime) # integer... assert stat.S_IMODE(st.st_mode) & stat.S_IRUSR assert stat.S_IMODE(st.st_mode) & stat.S_IWUSR @@ -137,13 +137,12 @@ assert st.st_size == 14 assert st.st_nlink == 1 - #if sys.platform.startswith('linux'): - # # expects non-integer timestamps - it's unlikely that they are - # # all three integers - # assert ((st.st_atime, st.st_mtime, st.st_ctime) != - # (st[7], st[8], st[9])) - # assert st.st_blksize * st.st_blocks >= st.st_size + assert not hasattr(st, 'nsec_atime') + if sys.platform.startswith('linux'): + assert isinstance(st.st_atime, float) + assert isinstance(st.st_mtime, float) + assert isinstance(st.st_ctime, float) assert hasattr(st, 'st_rdev') def test_stat_float_times(self): diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -242,13 +242,33 @@ from rpython.rtyper.lltypesystem import lltype, rffi return space.wrap(rffi.cast(lltype.Signed, handle)) +getsizeof_missing = """sys.getsizeof() is not implemented on PyPy. + +A memory profiler using this function is most likely to give results +inconsistent with reality on PyPy. It would be possible to have +sys.getsizeof() return a number (with enough work), but that may or +may not represent how much memory the object uses. It doesn't even +make really sense to ask how much *one* object uses, in isolation +with the rest of the system. For example, instances have maps, +which are often shared across many instances; in this case the maps +would probably be ignored by an implementation of sys.getsizeof(), +but their overhead is important in some cases if they are many +instances with unique maps. Conversely, equal strings may share +their internal string data even if they are different objects---or +empty containers may share parts of their internals as long as they +are empty. Even stranger, some lists create objects as you read +them; if you try to estimate the size in memory of range(10**6) as +the sum of all items' size, that operation will by itself create one +million integer objects that never existed in the first place. +""" + def getsizeof(space, w_object, w_default=None): - """Not implemented on PyPy.""" if w_default is None: - raise oefmt(space.w_TypeError, - "sys.getsizeof() not implemented on PyPy") + raise oefmt(space.w_TypeError, getsizeof_missing) return w_default +getsizeof.__doc__ = getsizeof_missing + def intern(space, w_str): """``Intern'' the given string. This enters the string in the (global) table of interned strings whose purpose is to speed up dictionary lookups. @@ -257,4 +277,3 @@ if space.is_w(space.type(w_str), space.w_unicode): return space.new_interned_w_str(w_str) raise oefmt(space.w_TypeError, "intern() argument must be string.") - diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py @@ -480,3 +480,7 @@ assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" p = ffi.new("int[]", [-123456789]) assert ffi.unpack(p, 1) == [-123456789] + + def test_negative_array_size(self): + ffi = FFI() + py.test.raises(ValueError, ffi.cast, "int[-5]", 0) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -503,3 +503,7 @@ assert ffi.unpack(p+1, 7) == b"bc\x00def\x00" p = ffi.new("int[]", [-123456789]) assert ffi.unpack(p, 1) == [-123456789] + +def test_negative_array_size(): + ffi = _cffi1_backend.FFI() + py.test.raises(ffi.error, ffi.cast, "int[-5]", 0) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1981,3 +1981,29 @@ static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; } """) assert lib.f1(52).a == 52 + +def test_typedef_array_dotdotdot(): + ffi = FFI() + ffi.cdef(""" + typedef int foo_t[...], bar_t[...]; + int gv[...]; + typedef int mat_t[...][...]; + typedef int vmat_t[][...]; + """) + lib = verify(ffi, "test_typedef_array_dotdotdot", """ + typedef int foo_t[50], bar_t[50]; + int gv[23]; + typedef int mat_t[6][7]; + typedef int vmat_t[][8]; + """) + assert ffi.sizeof("foo_t") == 50 * ffi.sizeof("int") + assert ffi.sizeof("bar_t") == 50 * ffi.sizeof("int") + assert len(ffi.new("foo_t")) == 50 + assert len(ffi.new("bar_t")) == 50 + assert ffi.sizeof(lib.gv) == 23 * ffi.sizeof("int") + assert ffi.sizeof("mat_t") == 6 * 7 * ffi.sizeof("int") + assert len(ffi.new("mat_t")) == 6 + assert len(ffi.new("mat_t")[3]) == 7 + py.test.raises(ffi.error, ffi.sizeof, "vmat_t") + p = ffi.new("vmat_t", 4) + assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int") diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -288,6 +288,10 @@ return W_SmallLongObject.fromint(val) return W_LongObject.fromint(self, val) + @specialize.argtype(1) + def newlong_from_rarith_int(self, val): # val is an rarithmetic type + return W_LongObject.fromrarith_int(val) + def newlong_from_rbigint(self, val): return newlong(self, val) diff --git a/pypy/objspace/std/test/test_longobject.py b/pypy/objspace/std/test/test_longobject.py --- a/pypy/objspace/std/test/test_longobject.py +++ b/pypy/objspace/std/test/test_longobject.py @@ -26,7 +26,6 @@ space.raises_w(space.w_OverflowError, space.float_w, w_big) def test_rint_variants(self): - py.test.skip("XXX broken!") from rpython.rtyper.tool.rfficache import platform space = self.space for r in platform.numbertype_to_rclass.values(): @@ -37,8 +36,8 @@ for x in values: if not r.SIGNED: x &= r.MASK - w_obj = space.wrap(r(x)) - assert space.bigint_w(w_obj).eq(rbigint.fromint(x)) + w_obj = space.newlong_from_rarith_int(r(x)) + assert space.bigint_w(w_obj).eq(rbigint.fromlong(x)) class AppTestLong: diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -805,9 +805,7 @@ class AA(object): __slots__ = ('a',) aa = AA() - # the following line works on CPython >= 2.6 but not on PyPy. - # but see below for more - raises(TypeError, "aa.__class__ = A") + aa.__class__ = A raises(TypeError, "aa.__class__ = object") class Z1(A): pass @@ -869,9 +867,13 @@ __slots__ = ['a', 'b'] class Order2(object): __slots__ = ['b', 'a'] - # the following line works on CPython >= 2.6 but not on PyPy. - # but see below for more - raises(TypeError, "Order1().__class__ = Order2") + Order1().__class__ = Order2 + + # like CPython, the order of slot names doesn't matter + x = Order1() + x.a, x.b = 1, 2 + x.__class__ = Order2 + assert (x.a, x.b) == (1, 2) class U1(object): __slots__ = ['a', 'b'] @@ -881,10 +883,11 @@ __slots__ = ['a', 'b'] class V2(V1): __slots__ = ['c', 'd', 'e'] - # the following line does not work on CPython >= 2.6 either. - # that's just obscure. Really really. So we just ignore - # the whole issue until someone comes complaining. Then we'll - # just kill slots altogether apart from maybe doing a few checks. + # the following line does not work on CPython either: we can't + # change a class if the old and new class have different layouts + # that look compatible but aren't, because they don't have the + # same base-layout class (even if these base classes are + # themselves compatible)... obscure. raises(TypeError, "U2().__class__ = V2") def test_name(self): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -103,9 +103,10 @@ """ _immutable_ = True - def __init__(self, typedef, nslots, base_layout=None): + def __init__(self, typedef, nslots, newslotnames=[], base_layout=None): self.typedef = typedef self.nslots = nslots + self.newslotnames = newslotnames[:] # make a fixed-size list self.base_layout = base_layout def issublayout(self, parent): @@ -115,6 +116,12 @@ return False return True + def expand(self, hasdict, weakrefable): + """Turn this Layout into a tuple. If two classes get equal + tuples, it means their instances have a fully compatible layout.""" + return (self.typedef, self.newslotnames, self.base_layout, + hasdict, weakrefable) + # possible values of compares_by_identity_status UNKNOWN = 0 @@ -287,8 +294,7 @@ # compute a tuple that fully describes the instance layout def get_full_instance_layout(self): - layout = self.layout - return (layout, self.hasdict, self.weakrefable) + return self.layout.expand(self.hasdict, self.weakrefable) def compute_default_mro(self): return compute_C3_mro(self.space, self) @@ -1022,11 +1028,15 @@ w_self.weakrefable = w_self.weakrefable or w_base.weakrefable return hasoldstylebase + def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout): + from pypy.objspace.std.listobject import StringSort + base_layout = w_bestbase.layout index_next_extra_slot = base_layout.nslots space = w_self.space dict_w = w_self.dict_w + newslotnames = [] if '__slots__' not in dict_w: wantdict = True wantweakref = True @@ -1052,9 +1062,22 @@ "__weakref__ slot disallowed: we already got one") wantweakref = True else: - index_next_extra_slot = create_slot(w_self, w_slot_name, - slot_name, - index_next_extra_slot) + newslotnames.append(slot_name) + # Sort the list of names collected so far + sorter = StringSort(newslotnames, len(newslotnames)) + sorter.sort() + # Try to create all slots in order. The creation of some of + # them might silently fail; then we delete the name from the + # list. At the end, 'index_next_extra_slot' has been advanced + # by the final length of 'newslotnames'. + i = 0 + while i < len(newslotnames): + if create_slot(w_self, newslotnames[i], index_next_extra_slot): + index_next_extra_slot += 1 + i += 1 + else: + del newslotnames[i] + # wantdict = wantdict or hasoldstylebase if wantdict: create_dict_slot(w_self) @@ -1063,13 +1086,14 @@ if '__del__' in dict_w: w_self.hasuserdel = True # + assert index_next_extra_slot == base_layout.nslots + len(newslotnames) if index_next_extra_slot == base_layout.nslots and not force_new_layout: return base_layout else: return Layout(base_layout.typedef, index_next_extra_slot, - base_layout=base_layout) + newslotnames, base_layout=base_layout) -def create_slot(w_self, w_slot_name, slot_name, index_next_extra_slot): +def create_slot(w_self, slot_name, index_next_extra_slot): space = w_self.space if not valid_slot_name(slot_name): raise oefmt(space.w_TypeError, "__slots__ must be identifiers") @@ -1077,16 +1101,17 @@ slot_name = mangle(slot_name, w_self.name) if slot_name in w_self.dict_w: raise oefmt(space.w_ValueError, - "%R in __slots__ conflicts with class variable", - w_slot_name) + "'%8' in __slots__ conflicts with class variable", + slot_name) else: # Force interning of slot names. slot_name = space.str_w(space.new_interned_str(slot_name)) # in cpython it is ignored less, but we probably don't care member = Member(index_next_extra_slot, slot_name, w_self) - index_next_extra_slot += 1 w_self.dict_w[slot_name] = space.wrap(member) - return index_next_extra_slot + return True + else: + return False def create_dict_slot(w_self): if not w_self.hasdict: diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -200,8 +200,12 @@ or v.concretetype != lltype.Bool): return False for op in block.operations[::-1]: - if v in op.args: - return False # variable is also used in cur block + # check if variable is used in block + for arg in op.args: + if arg == v: + return False + if isinstance(arg, ListOfKind) and v in arg.content: + return False if v is op.result: if op.opname not in ('int_lt', 'int_le', 'int_eq', 'int_ne', 'int_gt', 'int_ge', diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -243,6 +243,20 @@ assert block.exitswitch == (opname, v1, '-live-before') assert block.exits == exits +def test_optimize_goto_if_not__argument_to_call(): + for opname in ['ptr_iszero', 'ptr_nonzero']: + v1 = Variable() + v3 = Variable(); v3.concretetype = lltype.Bool + v4 = Variable() + block = Block([v1]) + callop = SpaceOperation('residual_call_r_i', + ["fake", ListOfKind('int', [v3])], v4) + block.operations = [SpaceOperation(opname, [v1], v3), callop] + block.exitswitch = v3 + block.exits = exits = [FakeLink(False), FakeLink(True)] + res = Transformer().optimize_goto_if_not(block) + assert not res + def test_symmetric(): ops = {'int_add': 'int_add', 'int_or': 'int_or', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -90,18 +90,23 @@ assert vs.make_inputargs(args, optimizer) == [] def test_make_inputargs_2(self): - # Ensure that make_inputargs properly errors with VirtualStatesCantMatch - # when the type information for a virtual field conflicts. In practice the - # expected and given field always share a common subclass. - # This check is needed as not all paths to make_inputargs in unroll.py - # are guarded by a call to generate_guards. + # Ensure that make_inputargs does not error when the lengths of the fields + # for the runtime box does not match what the virtual state expected. + # This can occur in unroll.py, as not all paths to make_inputargs are + # guareded with a generalization_of check. The property is validated + # subsequently in all cases, so we just need to ensure that this case does + # not cause segfaults. optimizer = FakeOptimizer(self.cpu) classbox1 = self.cpu.ts.cls_of_box(InputArgRef(self.nodeaddr)) - innervalue1 = info.InstancePtrInfo(known_class=classbox1, is_virtual=True, descr=self.valuedescr.get_parent_descr()) + innervalue1 = info.InstancePtrInfo( + known_class=classbox1, is_virtual=True, + descr=self.valuedescr.get_parent_descr()) for field in self.valuedescr.get_parent_descr().get_all_fielddescrs(): innervalue1.setfield(field, None, ConstInt(42)) classbox2 = self.cpu.ts.cls_of_box(InputArgRef(self.myptr3)) - innervalue2 = info.InstancePtrInfo(known_class=classbox2, is_virtual=True, descr=self.valuedescr3.get_parent_descr()) + innervalue2 = info.InstancePtrInfo( + known_class=classbox2, is_virtual=True, + descr=self.valuedescr3.get_parent_descr()) for field in self.valuedescr3.get_parent_descr().get_all_fielddescrs(): innervalue2.setfield(field, None, ConstInt(42)) @@ -111,10 +116,14 @@ nodebox2.set_forwarded(innervalue2) constr = VirtualStateConstructor(optimizer) - vs = constr.get_virtual_state([nodebox1]) + vs1 = constr.get_virtual_state([nodebox1]) + constr = VirtualStateConstructor(optimizer) + vs2 = constr.get_virtual_state([nodebox2]) - with py.test.raises(VirtualStatesCantMatch): - args = vs.make_inputargs([nodebox2], optimizer, force_boxes=True) + # This should succeed with no exceptions + vs1.make_inputargs([nodebox2], optimizer, force_boxes=False) + assert not vs1.generalization_of(vs2, optimizer) + assert not vs2.generalization_of(vs1, optimizer) def test_position_generalization(self): def postest(info1, info2): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -167,7 +167,8 @@ [self.get_box_replacement(x) for x in end_jump.getarglist()], self.optimizer, force_boxes=True) for arg in args: - self.optimizer.force_box(arg) + if arg is not None: + self.optimizer.force_box(arg) except VirtualStatesCantMatch: raise InvalidLoop("Virtual states did not match " "after picking the virtual state, when forcing" diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -177,14 +177,6 @@ def _generalization_of_structpart(self, other): raise NotImplementedError - @staticmethod - def descr_issubclass(descr1, descr2, optimizer): - if not descr1.is_object() or not descr2.is_object(): - return True - vtable1 = descr1.get_vtable() - vtable2 = descr2.get_vtable() - return optimizer._check_subclass(vtable1, vtable2) - def enum_forced_boxes(self, boxes, box, optimizer, force_boxes=False): box = optimizer.get_box_replacement(box) info = optimizer.getptrinfo(box) @@ -193,13 +185,12 @@ else: assert isinstance(info, AbstractStructPtrInfo) - for i in range(len(self.fielddescrs)): + # The min operation ensures we don't wander off either array, as not all + # to make_inputargs have validated their inputs with generate_guards. + for i in range(min(len(self.fielddescrs), len(info._fields))): state = self.fieldstate[i] - descr = self.fielddescrs[i].get_parent_descr() if not state: continue - if not self.descr_issubclass(info.descr, descr, optimizer.optimizer): - raise VirtualStatesCantMatch() if state.position > self.position: fieldbox = info._fields[i] state.enum_forced_boxes(boxes, fieldbox, optimizer, force_boxes) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4558,3 +4558,20 @@ self.meta_interp(f, []) self.check_resops(guard_nonnull=0) + def test_loop_before_main_loop(self): + fdriver = JitDriver(greens=[], reds='auto') + gdriver = JitDriver(greens=[], reds='auto') + def f(i, j): + while j > 0: # this loop unrolls because it is in the same + j -= 1 # function as a jit_merge_point() + while i > 0: + fdriver.jit_merge_point() + i -= 1 + def g(i, j, k): + while k > 0: + gdriver.jit_merge_point() + f(i, j) + k -= 1 + + self.meta_interp(g, [5, 5, 5]) + self.check_resops(guard_true=10) # 5 unrolled, plus 5 unrelated diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py --- a/rpython/rlib/rposix_stat.py +++ b/rpython/rlib/rposix_stat.py @@ -17,7 +17,7 @@ from rpython.rtyper.error import TyperError from rpython.rlib._os_support import _preferred_traits, string_traits -from rpython.rlib.objectmodel import specialize +from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rlib.rarithmetic import intmask @@ -51,15 +51,18 @@ ("st_uid", lltype.Signed), ("st_gid", lltype.Signed), ("st_size", lltype.SignedLongLong), - ("st_atime", lltype.Float), - ("st_mtime", lltype.Float), - ("st_ctime", lltype.Float), + ("st_atime", lltype.SignedLongLong), # integral number of seconds + ("st_mtime", lltype.SignedLongLong), # + ("st_ctime", lltype.SignedLongLong), # ("st_blksize", lltype.Signed), ("st_blocks", lltype.Signed), ("st_rdev", lltype.Signed), ("st_flags", lltype.Signed), #("st_gen", lltype.Signed), -- new in CPy 2.5, not implemented #("st_birthtime", lltype.Float), -- new in CPy 2.5, not implemented + ("nsec_atime", lltype.Signed), # number of nanoseconds + ("nsec_mtime", lltype.Signed), # + ("nsec_ctime", lltype.Signed), # ] N_INDEXABLE_FIELDS = 10 @@ -79,6 +82,37 @@ ("f_namemax", lltype.Signed), ] +@specialize.arg(1) +def get_stat_ns_as_bigint(st, name): + """'name' is one of the strings "atime", "mtime" or "ctime". + Returns a bigint that represents the number of nanoseconds + stored inside the RPython-level os.stat_result 'st'. + + Note that when running untranslated, the os.stat_result type + is from Python 2.7, which doesn't store more precision than + a float anyway. You will only get more after translation. + """ + from rpython.rlib.rbigint import rbigint + + if not we_are_translated(): + as_float = getattr(st, "st_" + name) + return rbigint.fromfloat(as_float * 1e9) + + if name == "atime": + i, j = 7, -3 + elif name == "mtime": + i, j = 8, -2 + elif name == "ctime": + i, j = 9, -1 + else: + raise AssertionError(name) + + sec = st[i] + nsec = st[j] + result = rbigint.fromrarith_int(sec).int_mul(1000000000) + result = result.int_add(nsec) + return result + # ____________________________________________________________ # @@ -97,7 +131,15 @@ if not s_attr.is_constant(): raise annmodel.AnnotatorError("non-constant attr name in getattr()") attrname = s_attr.const - TYPE = STAT_FIELD_TYPES[attrname] + if attrname in ('st_atime', 'st_mtime', 'st_ctime'): + # like CPython, in RPython we can read the st_Xtime + # attribute and get a floating-point result. We can also + # get a full-precision bigint with get_stat_ns_as_bigint(). + # The floating-point result is computed like a property + # by _ll_get_st_Xtime(). + TYPE = lltype.Float + else: + TYPE = STAT_FIELD_TYPES[attrname] return lltype_to_annotation(TYPE) def _get_rmarshall_support_(self): # for rlib.rmarshal @@ -105,13 +147,14 @@ # (we ignore the extra values here for simplicity and portability) def stat_result_reduce(st): return (st[0], st[1], st[2], st[3], st[4], - st[5], st[6], st[7], st[8], st[9]) + st[5], st[6], st[7], st[8], st[9], + st[-3], st[-2], st[-1]) def stat_result_recreate(tup): - return make_stat_result(tup + extra_zeroes) + return make_stat_result(tup[:10] + extra_zeroes + tup[-3:]) s_reduced = annmodel.SomeTuple([lltype_to_annotation(TYPE) for name, TYPE in PORTABLE_STAT_FIELDS]) - extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS)) + extra_zeroes = (0,) * (len(STAT_FIELDS) - len(PORTABLE_STAT_FIELDS) - 3) return s_reduced, stat_result_reduce, stat_result_recreate @@ -119,7 +162,7 @@ def getitem((s_sta, s_int)): assert s_int.is_constant(), "os.stat()[index]: index must be constant" index = s_int.const - assert 0 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" + assert -3 <= index < N_INDEXABLE_FIELDS, "os.stat()[index] out of range" name, TYPE = STAT_FIELDS[index] return lltype_to_annotation(TYPE) @@ -152,28 +195,61 @@ def rtype_getattr(self, hop): s_attr = hop.args_s[1] attr = s_attr.const + if attr in ('st_atime', 'st_mtime', 'st_ctime'): + ll_func = globals()['_ll_get_' + attr] + v_tuple = hop.inputarg(self, arg=0) + return hop.gendirectcall(ll_func, v_tuple) try: index = self.stat_field_indexes[attr] except KeyError: raise TyperError("os.stat().%s: field not available" % (attr,)) return self.redispatch_getfield(hop, index) +@specialize.memo() +def _stfld(name): + index = STAT_FIELD_NAMES.index(name) + return 'item%d' % index + +def _ll_get_st_atime(tup): + return (float(getattr(tup, _stfld("st_atime"))) + + 1E-9 * getattr(tup, _stfld("nsec_atime"))) + +def _ll_get_st_mtime(tup): + return (float(getattr(tup, _stfld("st_mtime"))) + + 1E-9 * getattr(tup, _stfld("nsec_mtime"))) + +def _ll_get_st_ctime(tup): + return (float(getattr(tup, _stfld("st_ctime"))) + + 1E-9 * getattr(tup, _stfld("nsec_ctime"))) + class __extend__(pairtype(StatResultRepr, IntegerRepr)): def rtype_getitem((r_sta, r_int), hop): s_int = hop.args_s[1] index = s_int.const + if index < 0: + index += len(STAT_FIELDS) return r_sta.redispatch_getfield(hop, index) s_StatResult = SomeStatResult() + def make_stat_result(tup): - """Turn a tuple into an os.stat_result object.""" - positional = tuple( - lltype.cast_primitive(TYPE, value) for value, (name, TYPE) in - zip(tup, STAT_FIELDS)[:N_INDEXABLE_FIELDS]) + """NOT_RPYTHON: Turn a tuple into an os.stat_result object.""" + assert len(tup) == len(STAT_FIELDS) + assert float not in [type(x) for x in tup] + positional = [] + for i in range(N_INDEXABLE_FIELDS): + name, TYPE = STAT_FIELDS[i] + value = lltype.cast_primitive(TYPE, tup[i]) + positional.append(value) kwds = {} + kwds['st_atime'] = tup[7] + 1e-9 * tup[-3] + kwds['st_mtime'] = tup[8] + 1e-9 * tup[-2] + kwds['st_ctime'] = tup[9] + 1e-9 * tup[-1] for value, (name, TYPE) in zip(tup, STAT_FIELDS)[N_INDEXABLE_FIELDS:]: + if name.startswith('nsec_'): + continue # ignore the nsec_Xtime here kwds[name] = lltype.cast_primitive(TYPE, value) return os.stat_result(positional, kwds) @@ -360,6 +436,8 @@ posix_declaration(ALL_STAT_FIELDS[_i]) del _i +STAT_FIELDS += ALL_STAT_FIELDS[-3:] # nsec_Xtime + # these two global vars only list the fields defined in the underlying platform STAT_FIELD_TYPES = dict(STAT_FIELDS) # {'st_xxx': TYPE} STAT_FIELD_NAMES = [_name for (_name, _TYPE) in STAT_FIELDS] @@ -368,16 +446,20 @@ STATVFS_FIELD_TYPES = dict(STATVFS_FIELDS) STATVFS_FIELD_NAMES = [name for name, tp in STATVFS_FIELDS] + def build_stat_result(st): # only for LL backends if TIMESPEC is not None: - atim = st.c_st_atim; atime = int(atim.c_tv_sec) + 1E-9 * int(atim.c_tv_nsec) - mtim = st.c_st_mtim; mtime = int(mtim.c_tv_sec) + 1E-9 * int(mtim.c_tv_nsec) - ctim = st.c_st_ctim; ctime = int(ctim.c_tv_sec) + 1E-9 * int(ctim.c_tv_nsec) + atim = st.c_st_atim + mtim = st.c_st_mtim + ctim = st.c_st_ctim + atime, extra_atime = atim.c_tv_sec, int(atim.c_tv_nsec) + mtime, extra_mtime = mtim.c_tv_sec, int(mtim.c_tv_nsec) + ctime, extra_ctime = ctim.c_tv_sec, int(ctim.c_tv_nsec) else: - atime = st.c_st_atime - mtime = st.c_st_mtime - ctime = st.c_st_ctime + atime, extra_atime = st.c_st_atime, 0 + mtime, extra_mtime = st.c_st_mtime, 0 + ctime, extra_ctime = st.c_st_ctime, 0 result = (st.c_st_mode, st.c_st_ino, @@ -395,6 +477,10 @@ if "st_rdev" in STAT_FIELD_TYPES: result += (st.c_st_rdev,) if "st_flags" in STAT_FIELD_TYPES: result += (st.c_st_flags,) + result += (extra_atime, + extra_mtime, + extra_ctime) + return make_stat_result(result) @@ -455,12 +541,14 @@ # console or LPT device return make_stat_result((win32traits._S_IFCHR, 0, 0, 0, 0, 0, - 0, 0, 0, 0)) + 0, 0, 0, 0, + 0, 0, 0)) elif filetype == win32traits.FILE_TYPE_PIPE: # socket or named pipe return make_stat_result((win32traits._S_IFIFO, 0, 0, 0, 0, 0, - 0, 0, 0, 0)) + 0, 0, 0, 0, + 0, 0, 0)) elif filetype == win32traits.FILE_TYPE_UNKNOWN: error = rwin32.GetLastError_saved() if error != 0: @@ -539,14 +627,11 @@ #__________________________________________________ # Helper functions for win32 if _WIN32: - from rpython.rlib.rwin32file import FILE_TIME_to_time_t_float + from rpython.rlib.rwin32file import FILE_TIME_to_time_t_nsec def make_longlong(high, low): return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) - # Seconds between 1.1.1601 and 1.1.1970 - secs_between_epochs = rffi.r_longlong(11644473600) - @specialize.arg(0) def win32_xstat(traits, path, traverse=False): win32traits = make_win32_traits(traits) @@ -582,14 +667,15 @@ def win32_attribute_data_to_stat(win32traits, info): st_mode = win32_attributes_to_mode(win32traits, info.c_dwFileAttributes) st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) - ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) - mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) - atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) + ctime, extra_ctime = FILE_TIME_to_time_t_nsec(info.c_ftCreationTime) + mtime, extra_mtime = FILE_TIME_to_time_t_nsec(info.c_ftLastWriteTime) + atime, extra_atime = FILE_TIME_to_time_t_nsec(info.c_ftLastAccessTime) result = (st_mode, 0, 0, 0, 0, 0, st_size, - atime, mtime, ctime) + atime, mtime, ctime, + extra_atime, extra_mtime, extra_ctime) return make_stat_result(result) @@ -597,9 +683,9 @@ # similar to the one above st_mode = win32_attributes_to_mode(win32traits, info.c_dwFileAttributes) st_size = make_longlong(info.c_nFileSizeHigh, info.c_nFileSizeLow) - ctime = FILE_TIME_to_time_t_float(info.c_ftCreationTime) - mtime = FILE_TIME_to_time_t_float(info.c_ftLastWriteTime) - atime = FILE_TIME_to_time_t_float(info.c_ftLastAccessTime) + ctime, extra_ctime = FILE_TIME_to_time_t_nsec(info.c_ftCreationTime) + mtime, extra_mtime = FILE_TIME_to_time_t_nsec(info.c_ftLastWriteTime) + atime, extra_atime = FILE_TIME_to_time_t_nsec(info.c_ftLastAccessTime) # specific to fstat() st_ino = make_longlong(info.c_nFileIndexHigh, info.c_nFileIndexLow) @@ -608,7 +694,8 @@ result = (st_mode, st_ino, 0, st_nlink, 0, 0, st_size, - atime, mtime, ctime) + atime, mtime, ctime, + extra_atime, extra_mtime, extra_ctime) return make_stat_result(result) diff --git a/rpython/rlib/rwin32file.py b/rpython/rlib/rwin32file.py --- a/rpython/rlib/rwin32file.py +++ b/rpython/rlib/rwin32file.py @@ -6,6 +6,7 @@ from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.rtyper.tool import rffi_platform as platform from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import intmask @specialize.memo() def make_win32_traits(traits): @@ -213,13 +214,25 @@ return (rffi.r_longlong(high) << 32) + rffi.r_longlong(low) # Seconds between 1.1.1601 and 1.1.1970 -secs_between_epochs = rffi.r_longlong(11644473600) +secs_between_epochs = 11644473600.0 +hns_between_epochs = rffi.r_longlong(116444736000000000) # units of 100 nsec def FILE_TIME_to_time_t_float(filetime): ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) # FILETIME is in units of 100 nsec return float(ft) * (1.0 / 10000000.0) - secs_between_epochs +def FILE_TIME_to_time_t_nsec(filetime): + """Like the previous function, but returns a pair: (integer part + 'time_t' as a r_longlong, fractional part as an int measured in + nanoseconds). + """ + ft = make_longlong(filetime.c_dwHighDateTime, filetime.c_dwLowDateTime) + ft -= hns_between_epochs + int_part = ft / 10000000 + frac_part = ft - (int_part * 10000000) + return (int_part, intmask(frac_part) * 100) + def time_t_to_FILE_TIME(time, filetime): ft = rffi.r_longlong((time + secs_between_epochs) * 10000000) filetime.c_dwHighDateTime = rffi.r_uint(ft >> 32) diff --git a/rpython/rlib/test/test_rposix_stat.py b/rpython/rlib/test/test_rposix_stat.py --- a/rpython/rlib/test/test_rposix_stat.py +++ b/rpython/rlib/test/test_rposix_stat.py @@ -2,12 +2,20 @@ import py from rpython.rlib import rposix_stat from rpython.tool.udir import udir +from rpython.translator.c.test.test_genc import compile +from rpython.rtyper.lltypesystem import lltype + class TestPosixStatFunctions: @py.test.mark.skipif("sys.platform == 'win32'", reason="win32 only has the portable fields") def test_has_all_fields(self): - assert rposix_stat.STAT_FIELDS == rposix_stat.ALL_STAT_FIELDS[:13] + # XXX this test is obscure! it will fail if the exact set of + # XXX stat fields found differs from the one we expect on Linux. + # XXX Why? + assert rposix_stat.STAT_FIELDS == ( + rposix_stat.ALL_STAT_FIELDS[:13] + + rposix_stat.ALL_STAT_FIELDS[-3:]) def test_stat(self): def check(f): @@ -66,3 +74,27 @@ finally: os.close(dirfd) assert result.st_atime == tmpdir.join('file').atime() + +def test_high_precision_stat_time(): + def f(): + st = os.stat('.') + # should be supported on all platforms, but give a result whose + # precision might be lower than full nanosecond + highprec = rposix_stat.get_stat_ns_as_bigint(st, "ctime") + return '%s;%s' % (st.st_ctime, highprec.str()) + fc = compile(f, []) + as_string = fc() + asfloat, highprec = as_string.split(';') + asfloat = float(asfloat) + highprec = int(highprec) + st = os.stat('.') + assert abs(asfloat - st.st_ctime) < 500e-9 + assert abs(highprec - int(st.st_ctime * 1e9)) < 500 + assert abs(rposix_stat.get_stat_ns_as_bigint(st, "ctime").tolong() + - st.st_ctime * 1e9) < 3 + if rposix_stat.TIMESPEC is not None: + with lltype.scoped_alloc(rposix_stat.STAT_STRUCT.TO) as stresult: + rposix_stat.c_stat(".", stresult) + assert 0 <= stresult.c_st_ctim.c_tv_nsec <= 999999999 + assert highprec == (int(stresult.c_st_ctim.c_tv_sec) * 1000000000 + + int(stresult.c_st_ctim.c_tv_nsec)) diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -646,6 +646,9 @@ def ll_str(self, x): return self.getstr() + def get_ll_eq_function(self): + return None + class MultipleFrozenPBCReprBase(CanBeNull, Repr): def convert_const(self, pbc): @@ -654,6 +657,9 @@ frozendesc = self.rtyper.annotator.bookkeeper.getdesc(pbc) return self.convert_desc(frozendesc) + def get_ll_eq_function(self): + return None + class MultipleUnrelatedFrozenPBCRepr(MultipleFrozenPBCReprBase): """For a SomePBC of frozen PBCs that have no common access set. The only possible operation on such a thing is comparison with 'is'.""" diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1729,6 +1729,23 @@ res = self.interpret(f, []) assert res == 42 + def test_equality_of_frozen_pbcs_inside_data_structures(self): + class A: + def _freeze_(self): + return True + a1 = A() + a2 = A() + def f(): + return [a1] == [a1] + def g(i): + x1 = [a1, a2][i] + x2 = [a1, a2][i] + return (x1,) == (x2,) + res = self.interpret(f, []) + assert res == True + res = self.interpret(g, [1]) + assert res == True + # ____________________________________________________________ def test_hlinvoke_simple(): diff --git a/rpython/translator/backendopt/merge_if_blocks.py b/rpython/translator/backendopt/merge_if_blocks.py --- a/rpython/translator/backendopt/merge_if_blocks.py +++ b/rpython/translator/backendopt/merge_if_blocks.py @@ -20,6 +20,14 @@ return False if isinstance(op.args[0], Constant) and isinstance(op.args[1], Constant): return False + # check that the constant is hashable (ie not a symbolic) + try: + if isinstance(op.args[0], Constant): + hash(op.args[0].value) + else: + hash(op.args[1].value) + except TypeError: + return False return True def merge_chain(chain, checkvar, varmap, graph): diff --git a/rpython/translator/backendopt/test/test_merge_if_blocks.py b/rpython/translator/backendopt/test/test_merge_if_blocks.py --- a/rpython/translator/backendopt/test/test_merge_if_blocks.py +++ b/rpython/translator/backendopt/test/test_merge_if_blocks.py @@ -2,11 +2,12 @@ from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks from rpython.translator.backendopt.all import backend_optimizations from rpython.translator.translator import TranslationContext, graphof as tgraphof -from rpython.flowspace.model import Block +from rpython.flowspace.model import Block, checkgraph from rpython.translator.backendopt.removenoops import remove_same_as from rpython.rtyper.llinterp import LLInterpreter from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int from rpython.annotator.model import SomeChar, SomeUnicodeCodePoint +from rpython.rlib.objectmodel import CDefinedIntSymbolic def do_test_merge(fn, testvalues): t = TranslationContext() @@ -225,3 +226,29 @@ malloc.remove_mallocs(t, t.graphs) from rpython.translator import simplify simplify.join_blocks(graph) + +def test_switch_on_symbolic(): + symb1 = CDefinedIntSymbolic("1", 1) + symb2 = CDefinedIntSymbolic("2", 2) + symb3 = CDefinedIntSymbolic("3", 3) + def fn(x): + res = 0 + if x == symb1: + res += x + 1 + elif x == symb2: + res += x + 2 + elif x == symb3: + res += x + 3 + res += 1 + return res + t = TranslationContext() + a = t.buildannotator() + a.build_types(fn, [int]) _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit