Author: fijal Branch: share-guard-info Changeset: r79943:c95aabc0cb96 Date: 2015-10-03 10:55 +0200 http://bitbucket.org/pypy/pypy/changeset/c95aabc0cb96/
Log: merge default diff too long, truncating to 2000 out of 2171 lines diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -609,7 +609,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +620,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +631,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -192,6 +192,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +203,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +257,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +273,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +290,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +301,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +354,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +394,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +444,21 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) + result, quals = self._get_type_and_quals(typenode.type) return model.RawFunctionType(tuple(args), result, ellipsis) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +497,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +539,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +553,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +562,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +647,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -4,11 +4,26 @@ from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -225,16 +241,14 @@ class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +257,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +266,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +327,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +344,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +363,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -774,7 +773,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +788,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +823,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +879,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1006,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1085,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -260,7 +263,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +272,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +284,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +346,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,6 +5,7 @@ from __future__ import with_statement import operator from __pypy__ import resizelist_hint, newlist_hint +from __pypy__ import specialized_zip_2_lists # ____________________________________________________________ @@ -217,11 +218,16 @@ in length to the length of the shortest argument sequence.""" l = len(sequences) if l == 2: + # A very fast path if the two sequences are lists + seq0 = sequences[0] + seq1 = sequences[1] + try: + return specialized_zip_2_lists(seq0, seq1) + except TypeError: + pass # This is functionally the same as the code below, but more # efficient because it unrolls the loops over 'sequences'. # Only for two arguments, which is the most common case. - seq0 = sequences[0] - seq1 = sequences[1] iter0 = iter(seq0) iter1 = iter(seq1) hint = min(100000000, # max 100M diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -83,6 +83,7 @@ 'newdict' : 'interp_dict.newdict', 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list + 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'save_module_content_for_future_reload': diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -147,3 +147,7 @@ @unwrap_spec(w_module=MixedModule) def save_module_content_for_future_reload(space, w_module): w_module.save_module_content_for_future_reload() + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists + return specialized_zip_2_lists(space, w_list1, w_list2) diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -130,7 +131,8 @@ # though it may be signed when 'wchar_t' is written to C). WCHAR_INT = {(2, False): rffi.USHORT, (4, False): rffi.UINT, - (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), rffi.r_wchar_t.SIGN] + (4, True): rffi.INT}[rffi.sizeof(lltype.UniChar), + rfficache.signof_c_type('wchar_t')] WCHAR_INTP = rffi.CArrayPtr(WCHAR_INT) class W_CTypePrimitiveUniChar(W_CTypePrimitiveCharOrUniChar): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -168,7 +168,7 @@ class W_CTypePointer(W_CTypePtrBase): - _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr'] + _attrs_ = ['is_file', 'cache_array_type', 'is_void_ptr', '_array_types'] _immutable_fields_ = ['is_file', 'cache_array_type?', 'is_void_ptr'] kind = "pointer" cache_array_type = None diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -4,7 +4,7 @@ from rpython.rlib.objectmodel import specialize, r_dict, compute_identity_hash from rpython.rlib.rarithmetic import ovfcheck, intmask -from rpython.rlib import jit +from rpython.rlib import jit, rweakref from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.tool import rffi_platform @@ -23,27 +23,12 @@ class UniqueCache: def __init__(self, space): - self.ctvoid = None # There can be only one - self.ctvoidp = None # Cache for self.pointers[self.ctvoid] - self.ctchara = None # Cache for self.arrays[charp, -1] - self.primitives = {} # Keys: name - self.pointers = {} # Keys: base_ctype - self.arrays = {} # Keys: (ptr_ctype, length_or_-1) - self.functions = r_dict(# Keys: (fargs, w_fresult, ellipsis) - _func_key_eq, _func_key_hash) - -def _func_key_eq((fargs1, w_fresult1, ellipsis1), - (fargs2, w_fresult2, ellipsis2)): - return (fargs1 == fargs2 and # list equality here - w_fresult1 is w_fresult2 and - ellipsis1 == ellipsis2) - -def _func_key_hash((fargs, w_fresult, ellipsis)): - x = compute_identity_hash(w_fresult) ^ ellipsis - for w_arg in fargs: - y = compute_identity_hash(w_arg) - x = intmask((1000003 * x) ^ y) - return x + self.ctvoid = None # Cache for the 'void' type + self.ctvoidp = None # Cache for the 'void *' type + self.ctchara = None # Cache for the 'char[]' type + self.primitives = {} # Cache for {name: primitive_type} + self.functions = [] # see _new_function_type() + self.for_testing = False def _clean_cache(space): "NOT_RPYTHON" @@ -165,20 +150,24 @@ # ____________________________________________________________ +@specialize.memo() +def _setup_wref(has_weakref_support): + assert has_weakref_support, "_cffi_backend requires weakrefs" + ctypeobj.W_CType._pointer_type = rweakref.dead_ref + ctypeptr.W_CTypePointer._array_types = None + @unwrap_spec(w_ctype=ctypeobj.W_CType) def new_pointer_type(space, w_ctype): return _new_pointer_type(space, w_ctype) @jit.elidable def _new_pointer_type(space, w_ctype): - unique_cache = space.fromcache(UniqueCache) - try: - return unique_cache.pointers[w_ctype] - except KeyError: - pass - ctypepointer = ctypeptr.W_CTypePointer(space, w_ctype) - unique_cache.pointers[w_ctype] = ctypepointer - return ctypepointer + _setup_wref(rweakref.has_weakref_support()) + ctptr = w_ctype._pointer_type() + if ctptr is None: + ctptr = ctypeptr.W_CTypePointer(space, w_ctype) + w_ctype._pointer_type = rweakref.ref(ctptr) + return ctptr # ____________________________________________________________ @@ -195,16 +184,19 @@ @jit.elidable def _new_array_type(space, w_ctptr, length): - unique_cache = space.fromcache(UniqueCache) - unique_key = (w_ctptr, length) - try: - return unique_cache.arrays[unique_key] - except KeyError: - pass - # + _setup_wref(rweakref.has_weakref_support()) if not isinstance(w_ctptr, ctypeptr.W_CTypePointer): raise OperationError(space.w_TypeError, space.wrap("first arg must be a pointer ctype")) + arrays = w_ctptr._array_types + if arrays is None: + arrays = rweakref.RWeakValueDictionary(int, ctypearray.W_CTypeArray) + w_ctptr._array_types = arrays + else: + ctype = arrays.get(length) + if ctype is not None: + return ctype + # ctitem = w_ctptr.ctitem if ctitem.size < 0: raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", @@ -222,7 +214,7 @@ extra = '[%d]' % length # ctype = ctypearray.W_CTypeArray(space, w_ctptr, length, arraysize, extra) - unique_cache.arrays[unique_key] = ctype + arrays.set(length, ctype) return ctype # ____________________________________________________________ @@ -612,29 +604,69 @@ fargs.append(w_farg) return _new_function_type(space, fargs, w_fresult, bool(ellipsis)) +def _func_key_hash(unique_cache, fargs, fresult, ellipsis): + x = compute_identity_hash(fresult) + for w_arg in fargs: + y = compute_identity_hash(w_arg) + x = intmask((1000003 * x) ^ y) + x ^= ellipsis + if unique_cache.for_testing: # constant-folded to False in translation; + x &= 3 # but for test, keep only 2 bits of hash + return x + # can't use @jit.elidable here, because it might call back to random # space functions via force_lazy_struct() -def _new_function_type(space, fargs, w_fresult, ellipsis=False): +def _new_function_type(space, fargs, fresult, ellipsis=False): + try: + return _get_function_type(space, fargs, fresult, ellipsis) + except KeyError: + return _build_function_type(space, fargs, fresult, ellipsis) + +@jit.elidable +def _get_function_type(space, fargs, fresult, ellipsis): + # This function is elidable because if called again with exactly the + # same arguments (and if it didn't raise KeyError), it would give + # the same result, at least as long as this result is still live. + # + # 'unique_cache.functions' is a list of weak dicts, each mapping + # the func_hash number to a W_CTypeFunc. There is normally only + # one such dict, but in case of hash collision, there might be + # more. + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + ctype = weakdict.get(func_hash) + if (ctype is not None and + ctype.ctitem is fresult and + ctype.fargs == fargs and + ctype.ellipsis == ellipsis): + return ctype + raise KeyError + +@jit.dont_look_inside +def _build_function_type(space, fargs, fresult, ellipsis): from pypy.module._cffi_backend import ctypefunc # - unique_cache = space.fromcache(UniqueCache) - unique_key = (fargs, w_fresult, ellipsis) - try: - return unique_cache.functions[unique_key] - except KeyError: - pass - # - if ((w_fresult.size < 0 and - not isinstance(w_fresult, ctypevoid.W_CTypeVoid)) - or isinstance(w_fresult, ctypearray.W_CTypeArray)): - if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and - w_fresult.size < 0): + if ((fresult.size < 0 and + not isinstance(fresult, ctypevoid.W_CTypeVoid)) + or isinstance(fresult, ctypearray.W_CTypeArray)): + if (isinstance(fresult, ctypestruct.W_CTypeStructOrUnion) and + fresult.size < 0): raise oefmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + "result type '%s' is opaque", fresult.name) else: raise oefmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + "invalid result type: '%s'", fresult.name) # - fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) - unique_cache.functions[unique_key] = fct + fct = ctypefunc.W_CTypeFunc(space, fargs, fresult, ellipsis) + unique_cache = space.fromcache(UniqueCache) + func_hash = _func_key_hash(unique_cache, fargs, fresult, ellipsis) + for weakdict in unique_cache.functions: + if weakdict.get(func_hash) is None: + weakdict.set(func_hash, fct) + break + else: + weakdict = rweakref.RWeakValueDictionary(int, ctypefunc.W_CTypeFunc) + unique_cache.functions.append(weakdict) + weakdict.set(func_hash, fct) return fct diff --git a/pypy/module/_cffi_backend/test/test_c.py b/pypy/module/_cffi_backend/test/test_c.py --- a/pypy/module/_cffi_backend/test/test_c.py +++ b/pypy/module/_cffi_backend/test/test_c.py @@ -22,7 +22,7 @@ from rpython.tool.udir import udir from pypy.interpreter import gateway from pypy.module._cffi_backend import Module -from pypy.module._cffi_backend.newtype import _clean_cache +from pypy.module._cffi_backend.newtype import _clean_cache, UniqueCache from rpython.translator import cdir from rpython.translator.platform import host from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -86,8 +86,10 @@ _all_test_c.find_and_load_library = func _all_test_c._testfunc = testfunc """) + UniqueCache.for_testing = True def teardown_method(self, method): + UniqueCache.for_testing = False _clean_cache(self.space) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -844,6 +844,18 @@ b.byteswap() assert a != b + def test_unicode_ord_positive(self): + import sys + if sys.maxunicode == 0xffff: + skip("test for 32-bit unicodes") + a = self.array('u', '\xff\xff\xff\xff') + assert len(a) == 1 + assert repr(a[0]) == "u'\Uffffffff'" + if sys.maxint == 2147483647: + assert ord(a[0]) == -1 + else: + assert ord(a[0]) == 4294967295 + def test_weakref(self): import weakref a = self.array('c', 'Hi!') diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -117,12 +117,14 @@ return W_NDimArray(impl) @staticmethod - def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): + def new_slice(space, offset, strides, backstrides, shape, parent, w_arr, dtype=None): from pypy.module.micronumpy import concrete - + w_base = w_arr + if w_arr.implementation.base() is not None: + w_base = w_arr.implementation.base() impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, - orig_arr, dtype) - return wrap_impl(space, space.type(orig_arr), orig_arr, impl) + w_base, dtype) + return wrap_impl(space, space.type(w_arr), w_arr, impl) @staticmethod def new_scalar(space, dtype, w_val=None): diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -568,11 +568,6 @@ self.size = ovfcheck(support.product_check(shape) * self.dtype.elsize) except OverflowError: raise oefmt(dtype.itemtype.space.w_ValueError, "array is too big.") - while orig_arr is not None: - assert isinstance(orig_arr, W_NDimArray) - if orig_arr.implementation.base() is None: - break - orig_arr = orig_arr.implementation.base() self.start = start self.orig_arr = orig_arr flags = parent.flags & NPY.ARRAY_ALIGNED diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -133,7 +133,9 @@ return w_arr else: imp = w_object.implementation - w_base = imp.base() or w_object + w_base = w_object + if imp.base() is not None: + w_base = imp.base() with imp as storage: sz = support.product(w_object.get_shape()) * dtype.elsize return W_NDimArray.from_shape_and_storage(space, diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1308,6 +1308,9 @@ [space.wrap(0)]), space.wrap("b")]) builder = StringBuilder() + if self.get_dtype().is_object(): + raise oefmt(space.w_NotImplementedError, + "reduce for 'object' dtype not supported yet") if isinstance(self.implementation, SliceArray): iter, state = self.implementation.create_iter() while not iter.done(state): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2519,10 +2519,10 @@ assert b.shape == b[...].shape assert (b == b[...]).all() - a = np.arange(6).reshape(2, 3) + a = np.arange(6) if '__pypy__' in sys.builtin_module_names: raises(ValueError, "a[..., ...]") - b = a [..., 0] + b = a.reshape(2, 3)[..., 0] assert (b == [0, 3]).all() assert b.base is a diff --git a/pypy/module/micronumpy/test/test_object_arrays.py b/pypy/module/micronumpy/test/test_object_arrays.py --- a/pypy/module/micronumpy/test/test_object_arrays.py +++ b/pypy/module/micronumpy/test/test_object_arrays.py @@ -3,6 +3,8 @@ class AppTestObjectDtypes(BaseNumpyAppTest): + spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_runappdirect = cls.space.wrap(option.runappdirect) @@ -187,3 +189,21 @@ assert b.shape == (1,) assert b.dtype == np.float_ assert (b == 1.0).all() + + + def test__reduce__(self): + from numpy import arange, dtype + from cPickle import loads, dumps + import sys + + a = arange(15).astype(object) + if '__pypy__' in sys.builtin_module_names: + raises(NotImplementedError, dumps, a) + skip('not implemented yet') + b = loads(dumps(a)) + assert (a == b).all() + + a = arange(15).astype(object).reshape((3, 5)) + b = loads(dumps(a)) + assert (a == b).all() + diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -315,11 +315,12 @@ """ ll_times = jit_hooks.stats_get_loop_run_times(None) w_times = space.newdict() - for i in range(len(ll_times)): - w_key = space.newtuple([space.wrap(ll_times[i].type), - space.wrap(ll_times[i].number)]) - space.setitem(w_times, w_key, - space.wrap(ll_times[i].counter)) + if ll_times: + for i in range(len(ll_times)): + w_key = space.newtuple([space.wrap(ll_times[i].type), + space.wrap(ll_times[i].number)]) + space.setitem(w_times, w_key, + space.wrap(ll_times[i].counter)) w_counters = space.newdict() for i, counter_name in enumerate(Counters.counter_names): v = jit_hooks.stats_get_counter_value(None, i) diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py --- a/pypy/module/pypyjit/test/test_jit_hook.py +++ b/pypy/module/pypyjit/test/test_jit_hook.py @@ -213,22 +213,6 @@ self.on_abort() assert l == [('pypyjit', 'ABORT_TOO_LONG', [])] - def test_on_optimize(self): - import pypyjit - l = [] - - def hook(info): - l.append(info.jitdriver_name) - - def optimize_hook(info): - return [] - - pypyjit.set_compile_hook(hook) - pypyjit.set_optimize_hook(optimize_hook) - self.on_optimize() - self.on_compile() - assert l == ['pypyjit'] - def test_creation(self): from pypyjit import ResOperation diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -28,7 +28,7 @@ def test_struct_unpack(self): def main(n): - import struct + import _struct as struct import array a = array.array('c', struct.pack('i', 42)) i = 0 diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -76,6 +76,6 @@ assert len(mod_bridges) in (1, 2, 3) # check that counts are reasonable (precise # may change in the future) - assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1000 + assert N - 2000 < sum(l.count for l in fn_with_bridges_loops) < N + 1500 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/backend_tests.py @@ -2,7 +2,7 @@ import py import platform import sys, ctypes -from cffi import FFI, CDefError, FFIError +from cffi import FFI, CDefError, FFIError, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.support import * SIZE_OF_INT = ctypes.sizeof(ctypes.c_int) @@ -927,6 +927,14 @@ assert ffi.string(ffi.cast("enum foo", -16)) == "E" assert ffi.string(ffi.cast("enum foo", -8)) == "F" + def test_enum_partial(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef(r"enum foo {A, ...}; enum bar { B, C };") + lib = ffi.dlopen(None) + assert lib.B == 0 + py.test.raises(VerificationMissing, getattr, lib, "A") + assert lib.C == 1 + def test_array_of_struct(self): ffi = FFI(backend=self.Backend()) ffi.cdef("struct foo { int a, b; };") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_model.py @@ -58,6 +58,11 @@ assert ptr_type.get_c_name("") == "int(const *)[5]" assert ptr_type.get_c_name("*x") == "int(const * *x)[5]" +def test_qual_pointer_type(): + ptr_type = PointerType(PrimitiveType("long long"), Q_RESTRICT) + assert ptr_type.get_c_name("") == "long long __restrict *" + assert const_voidp_type.get_c_name("") == "void const *" + def test_unknown_pointer_type(): ptr_type = unknown_ptr_type("foo_p") assert ptr_type.get_c_name("") == "foo_p" diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_parsing.py @@ -308,7 +308,6 @@ ffi.cdef("void f(WPARAM);") def test__is_constant_globalvar(): - from cffi.cparser import Parser, _get_parser for input, expected_output in [ ("int a;", False), ("const int a;", True), @@ -325,11 +324,36 @@ ("int a[5][6];", False), ("const int a[5][6];", False), ]: - p = Parser() - ast = _get_parser().parse(input) - decl = ast.children()[0][1] - node = decl.type - assert p._is_constant_globalvar(node) == expected_output + ffi = FFI() + ffi.cdef(input) + declarations = ffi._parser._declarations + assert ('constant a' in declarations) == expected_output + assert ('variable a' in declarations) == (not expected_output) + +def test_restrict(): + from cffi import model + for input, expected_output in [ + ("int a;", False), + ("restrict int a;", True), + ("int *a;", False), + ]: + ffi = FFI() + ffi.cdef(input) + tp, quals = ffi._parser._declarations['variable a'] + assert bool(quals & model.Q_RESTRICT) == expected_output + +def test_different_const_funcptr_types(): + lst = [] + for input in [ + "int(*)(int *a)", + "int(*)(int const *a)", + "int(*)(int * const a)", + "int(*)(int const a[])"]: + ffi = FFI(backend=FakeBackend()) + lst.append(ffi._parser.parse_type(input)) + assert lst[0] != lst[1] + assert lst[0] == lst[2] + assert lst[1] == lst[3] def test_enum(): ffi = FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_verify.py @@ -1636,11 +1636,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include <stdio.h> FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1650,7 +1650,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -2248,3 +2248,13 @@ e = py.test.raises(VerificationError, ffi.verify, "") assert str(e.value) == ("feature not supported with ffi.verify(), but only " "with ffi.set_source(): 'typedef unsigned long... t1'") + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + ffi.verify("""struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py @@ -30,6 +30,32 @@ assert ffi.typeof("int[][10]") is ffi.typeof("int[][10]") assert ffi.typeof("int(*)()") is ffi.typeof("int(*)()") +def test_ffi_type_not_immortal(): + import weakref, gc + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int **") + t2 = ffi.typeof("int *") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t1, ffi + gc.collect() + assert w1() is None + assert w2() is t2 + ffi = _cffi1_backend.FFI() + assert ffi.typeof(ffi.new("int **")[0]) is t2 + # + ffi = _cffi1_backend.FFI() + t1 = ffi.typeof("int ***") + t2 = ffi.typeof("int **") + w1 = weakref.ref(t1) + w2 = weakref.ref(t2) + del t2, ffi + gc.collect() + assert w1() is t1 + assert w2() is not None # kept alive by t1 + ffi = _cffi1_backend.FFI() + assert ffi.typeof("int * *") is t1.item + def test_ffi_cache_type_globally(): ffi1 = _cffi1_backend.FFI() ffi2 = _cffi1_backend.FFI() diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_re_python.py @@ -2,7 +2,7 @@ import sys import py from cffi import FFI -from cffi import recompiler, ffiplatform +from cffi import recompiler, ffiplatform, VerificationMissing from pypy.module.test_lib_pypy.cffi_tests.udir import udir @@ -204,3 +204,10 @@ "foobar", _version=0x2594) assert str(e.value).startswith( "cffi out-of-line Python module 'foobar' has unknown version") + +def test_partial_enum(): + ffi = FFI() + ffi.cdef("enum foo { A, B, ... };") + ffi.set_source('test_partial_enum', None) + py.test.raises(VerificationMissing, ffi.emit_python_code, + str(tmpdir.join('test_partial_enum.py'))) diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py @@ -1192,3 +1192,92 @@ py.test.raises(ffi.error, getattr, lib, 'my_value') e = py.test.raises(ffi.error, setattr, lib, 'my_value', 50) assert str(e.value) == "global variable 'my_value' is at address NULL" + +def test_const_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a; void *const b; };""") + lib = verify(ffi, 'test_const_fields', """ + struct foo_s { const int a; void *const b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int") + assert foo_s.fields[1][0] == 'b' + assert foo_s.fields[1][1].type is ffi.typeof("void *") + +def test_restrict_fields(): + if sys.platform == 'win32': + py.test.skip("'__restrict__' probably not recognized") + ffi = FFI() + ffi.cdef("""struct foo_s { void * restrict b; };""") + lib = verify(ffi, 'test_restrict_fields', """ + struct foo_s { void * __restrict__ b; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'b' + assert foo_s.fields[0][1].type is ffi.typeof("void *") + +def test_const_array_fields(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[4]; };""") + lib = verify(ffi, 'test_const_array_fields', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_array_fields_varlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_varlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[]") + +def test_const_array_fields_unknownlength(): + ffi = FFI() + ffi.cdef("""struct foo_s { const int a[...]; ...; };""") + lib = verify(ffi, 'test_const_array_fields_unknownlength', """ + struct foo_s { const int a[4]; };""") + foo_s = ffi.typeof("struct foo_s") + assert foo_s.fields[0][0] == 'a' + assert foo_s.fields[0][1].type is ffi.typeof("int[4]") + +def test_const_function_args(): + ffi = FFI() + ffi.cdef("""int foobar(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_args', """ + int foobar(const int a, const int *b, const int c[]) { + return a + *b + *c; + } + """) + assert lib.foobar(100, ffi.new("int *", 40), ffi.new("int *", 2)) == 142 + +def test_const_function_type_args(): + ffi = FFI() + ffi.cdef("""int (*foobar)(const int a, const int *b, const int c[]);""") + lib = verify(ffi, 'test_const_function_type_args', """ + int (*foobar)(const int a, const int *b, const int c[]); + """) + t = ffi.typeof(lib.foobar) + assert t.args[0] is ffi.typeof("int") + assert t.args[1] is ffi.typeof("int *") + assert t.args[2] is ffi.typeof("int *") + +def test_const_constant(): + ffi = FFI() + ffi.cdef("""struct foo_s { int x,y; }; const struct foo_s myfoo;""") + lib = verify(ffi, 'test_const_constant', """ + struct foo_s { int x,y; }; const struct foo_s myfoo = { 40, 2 }; + """) + assert lib.myfoo.x == 40 + assert lib.myfoo.y == 2 + +def test_const_via_typedef(): + ffi = FFI() + ffi.cdef("""typedef const int const_t; const_t aaa;""") + lib = verify(ffi, 'test_const_via_typedef', """ + typedef const int const_t; + #define aaa 42 + """) + assert lib.aaa == 42 + py.test.raises(AttributeError, "lib.aaa = 43") diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_verify1.py @@ -1623,11 +1623,11 @@ def test_FILE_stored_explicitly(): ffi = FFI() - ffi.cdef("int myprintf(const char *, int); FILE *myfile;") + ffi.cdef("int myprintf11(const char *, int); FILE *myfile;") lib = ffi.verify(""" #include <stdio.h> FILE *myfile; - int myprintf(const char *out, int value) { + int myprintf11(const char *out, int value) { return fprintf(myfile, out, value); } """) @@ -1637,7 +1637,7 @@ lib.myfile = ffi.cast("FILE *", fw1) # fw1.write(b"X") - r = lib.myprintf(b"hello, %d!\n", ffi.cast("int", 42)) + r = lib.myprintf11(b"hello, %d!\n", ffi.cast("int", 42)) fw1.close() assert r == len("hello, 42!\n") # @@ -1923,7 +1923,7 @@ assert repr(ffi.typeof(lib.a)) == "<ctype 'char *[5]'>" def test_bug_const_char_ptr_array_2(): - ffi = FFI_warnings_not_error() # ignore warnings + ffi = FFI() ffi.cdef("""const int a[];""") lib = ffi.verify("""const int a[5];""") assert repr(ffi.typeof(lib.a)) == "<ctype 'int *'>" diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -123,7 +123,7 @@ f1 = self.floatval i2 = space.int_w(w_other) # (double-)floats have always at least 48 bits of precision - if LONG_BIT > 32 and not int_between((-1)<<48, i2, 1<<48): + if LONG_BIT > 32 and not int_between(-1, i2 >> 48, 1): res = do_compare_bigint(f1, rbigint.fromint(i2)) else: f2 = float(i2) diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -1396,16 +1396,19 @@ else: subitems_w = [self._none_value] * length l = self.unerase(w_list.lstorage) - for i in range(length): - try: - subitems_w[i] = l[start] - start += step - except IndexError: - raise + self._fill_in_with_sliced_items(subitems_w, l, start, step, length) storage = self.erase(subitems_w) return W_ListObject.from_storage_and_strategy( self.space, storage, self) + def _fill_in_with_sliced_items(self, subitems_w, l, start, step, length): + for i in range(length): + try: + subitems_w[i] = l[start] + start += step + except IndexError: + raise + def switch_to_next_strategy(self, w_list, w_sample_item): w_list.switch_to_object_strategy() diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -1,10 +1,12 @@ """Slice object""" +import sys from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.typedef import GetSetProperty, TypeDef from rpython.rlib.objectmodel import specialize +from rpython.rlib import jit class W_SliceObject(W_Root): @@ -234,10 +236,19 @@ assert length >= 0 if start < 0: start = 0 - if stop < start: - stop = start - if stop > length: - stop = length - if start > length: - start = length + # hack for the JIT, for slices with no end specified: + # this avoids the two comparisons that follow + if jit.isconstant(stop) and stop == sys.maxint: + pass + else: + if stop < start: + stop = start + if stop <= length: + return start, stop + # here is the case where 'stop' is larger than the list + stop = length + if jit.isconstant(start) and start == 0: + pass # no need to do the following check here + elif start > stop: + start = stop return start, stop diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.util import negate -from rpython.rlib.objectmodel import compute_hash +from rpython.rlib.objectmodel import compute_hash, specialize from rpython.rlib.rarithmetic import intmask from rpython.rlib.unroll import unrolling_iterable from rpython.tool.sourcetools import func_with_new_name @@ -146,3 +146,64 @@ return Cls_oo(space, w_arg1, w_arg2) else: raise NotSpecialised + +# -------------------------------------------------- +# Special code based on list strategies to implement zip(), +# here with two list arguments only. This builds a zipped +# list that differs from what the app-level code would build: +# if the source lists contain sometimes ints/floats and +# sometimes not, here we will use uniformly 'Cls_oo' instead +# of using 'Cls_ii' or 'Cls_ff' for the elements that match. +# This is a trade-off, but it looks like a good idea to keep +# the list uniform for the JIT---not to mention, it is much +# faster to move the decision out of the loop. + +@specialize.arg(1) +def _build_zipped_spec(space, Cls, lst1, lst2): + length = min(len(lst1), len(lst2)) + return [Cls(space, space.wrap(lst1[i]), + space.wrap(lst2[i])) for i in range(length)] + +def _build_zipped_spec_oo(space, w_list1, w_list2): + strat1 = w_list1.strategy + strat2 = w_list2.strategy + length = min(strat1.length(w_list1), strat2.length(w_list2)) + return [Cls_oo(space, strat1.getitem(w_list1, i), + strat2.getitem(w_list2, i)) for i in range(length)] + +def _build_zipped_unspec(space, w_list1, w_list2): + strat1 = w_list1.strategy + strat2 = w_list2.strategy + length = min(strat1.length(w_list1), strat2.length(w_list2)) + return [space.newtuple([strat1.getitem(w_list1, i), + strat2.getitem(w_list2, i)]) for i in range(length)] + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.listobject import W_ListObject + if (not isinstance(w_list1, W_ListObject) or + not isinstance(w_list2, W_ListObject)): + raise OperationError(space.w_TypeError, + space.wrap("expected two lists")) + + if space.config.objspace.std.withspecialisedtuple: + intlist1 = w_list1.getitems_int() + if intlist1 is not None: + intlist2 = w_list2.getitems_int() + if intlist2 is not None: + lst_w = _build_zipped_spec(space, Cls_ii, intlist1, intlist2) + return space.newlist(lst_w) + else: + floatlist1 = w_list1.getitems_float() + if floatlist1 is not None: + floatlist2 = w_list2.getitems_float() + if floatlist2 is not None: + lst_w = _build_zipped_spec(space, Cls_ff, floatlist1, + floatlist2) + return space.newlist(lst_w) + + lst_w = _build_zipped_spec_oo(space, w_list1, w_list2) + return space.newlist(lst_w) + + else: + lst_w = _build_zipped_unspec(space, w_list1, w_list2) + return space.newlist(lst_w) diff --git a/pypy/objspace/std/test/test_tupleobject.py b/pypy/objspace/std/test/test_tupleobject.py --- a/pypy/objspace/std/test/test_tupleobject.py +++ b/pypy/objspace/std/test/test_tupleobject.py @@ -407,3 +407,21 @@ assert (() != object()) is True assert ((1,) != object()) is True assert ((1, 2) != object()) is True + + def test_zip_two_lists(self): + try: + from __pypy__ import specialized_zip_2_lists + except ImportError: + specialized_zip_2_lists = zip + raises(TypeError, specialized_zip_2_lists, [], ()) + raises(TypeError, specialized_zip_2_lists, (), []) + assert specialized_zip_2_lists([], []) == [ + ] + assert specialized_zip_2_lists([2, 3], []) == [ + ] + assert specialized_zip_2_lists([2, 3], [4, 5, 6]) == [ + (2, 4), (3, 5)] + assert specialized_zip_2_lists([4.1, 3.6, 7.2], [2.3, 4.8]) == [ + (4.1, 2.3), (3.6, 4.8)] + assert specialized_zip_2_lists(["foo", "bar"], [6, 2]) == [ + ("foo", 6), ("bar", 2)] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -652,11 +652,11 @@ def len(self): return immutablevalue(1) +class __extend__(SomeChar): + def ord(self): return SomeInteger(nonneg=True) -class __extend__(SomeChar): - def method_isspace(self): return s_Bool @@ -675,6 +675,13 @@ def method_upper(self): return self +class __extend__(SomeUnicodeCodePoint): + + def ord(self): + # warning, on 32-bit with 32-bit unichars, this might return + # negative numbers + return SomeInteger() + class __extend__(SomeIterator): def iter(self): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1073,7 +1073,6 @@ genop_nursery_ptr_increment = _binaryop_or_lea('ADD', is_add=True) genop_int_sub = _binaryop_or_lea("SUB", is_add=False) genop_int_mul = _binaryop("IMUL") - genop_int_and = _binaryop("AND") genop_int_or = _binaryop("OR") genop_int_xor = _binaryop("XOR") genop_int_lshift = _binaryop("SHL") @@ -1084,6 +1083,15 @@ genop_float_mul = _binaryop('MULSD') genop_float_truediv = _binaryop('DIVSD') + def genop_int_and(self, op, arglocs, result_loc): + arg1 = arglocs[1] + if IS_X86_64 and (isinstance(arg1, ImmedLoc) and + arg1.value == (1 << 32) - 1): + # special case + self.mc.MOV32(arglocs[0], arglocs[0]) + else: + self.mc.AND(arglocs[0], arg1) + genop_int_lt = _cmpop("L", "G") genop_int_le = _cmpop("LE", "GE") genop_int_eq = _cmpop("E", "E") diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -272,6 +272,17 @@ 'void', ofsi) assert p.i == 3**33 + def test_and_mask_common_patterns(self): + cases = [8, 16, 24] + if WORD == 8: + cases.append(32) + for i in cases: + box = InputArgInt(0xAAAAAAAAAAAA) + res = self.execute_operation(rop.INT_AND, + [box, ConstInt(2 ** i - 1)], + 'int') + assert res == 0xAAAAAAAAAAAA & (2 ** i - 1) + def test_nullity_with_guard(self): allops = [rop.INT_IS_TRUE] guards = [rop.GUARD_TRUE, rop.GUARD_FALSE] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -8912,6 +8912,8 @@ guard_value(i2, 12345) [] jump() """ + # getting InvalidLoop would be a good idea, too. + # (this test was written to show it would previously crash) self.optimize_loop(ops, ops) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4343,14 +4343,14 @@ self.meta_interp(allfuncs, [9, 2000]) - def test_unichar_might_be_signed(self): - py.test.skip("wchar_t is sometimes a signed 32-bit integer type, " - "but RPython inteprets it as unsigned (but still " - "translates to wchar_t, so can create confusion)") + def test_unichar_ord_is_never_signed_on_64bit(self): + import sys + if sys.maxunicode == 0xffff: + py.test.skip("test for 32-bit unicodes") def f(x): - return rffi.cast(lltype.Signed, rffi.cast(lltype.UniChar, x)) + return ord(rffi.cast(lltype.UniChar, x)) res = self.interp_operations(f, [-1]) - if rffi.r_wchar_t.SIGN: + if sys.maxint == 2147483647: assert res == -1 else: - assert res == 2 ** 16 - 1 or res == 2 ** 32 - 1 + assert res == 4294967295 diff --git a/rpython/rlib/_rweakvaldict.py b/rpython/rlib/_rweakvaldict.py --- a/rpython/rlib/_rweakvaldict.py +++ b/rpython/rlib/_rweakvaldict.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rdict from rpython.rtyper.lltypesystem.llmemory import weakref_create, weakref_deref from rpython.rtyper import rclass +from rpython.rtyper.error import TyperError from rpython.rtyper.rclass import getinstancerepr from rpython.rtyper.rmodel import Repr from rpython.rlib.rweakref import RWeakValueDictionary @@ -60,6 +61,8 @@ self.dict_cache = {} _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit