Author: Philip Jenvey <pjen...@underboss.org> Branch: py3k Changeset: r70597:fe69dee13c0f Date: 2014-04-11 17:10 -0700 http://bitbucket.org/pypy/pypy/changeset/fe69dee13c0f/
Log: merge default diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -1,3 +1,6 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + .. _`ctypes_configure/doc/sample.py`: https://bitbucket.org/pypy/pypy/src/default/ctypes_configure/doc/sample.py .. _`dotviewer/`: https://bitbucket.org/pypy/pypy/src/default/dotviewer/ .. _`lib-python/`: https://bitbucket.org/pypy/pypy/src/default/lib-python/ diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -106,23 +106,43 @@ Differences related to garbage collection strategies ---------------------------------------------------- -Most of the garbage collectors used or implemented by PyPy are not based on +The garbage collectors used or implemented by PyPy are not based on reference counting, so the objects are not freed instantly when they are no longer reachable. The most obvious effect of this is that files are not promptly closed when they go out of scope. For files that are opened for writing, data can be left sitting in their output buffers for a while, making -the on-disk file appear empty or truncated. +the on-disk file appear empty or truncated. Moreover, you might reach your +OS's limit on the number of concurrently opened files. -Fixing this is essentially not possible without forcing a +Fixing this is essentially impossible without forcing a reference-counting approach to garbage collection. The effect that you get in CPython has clearly been described as a side-effect of the implementation and not a language design decision: programs relying on this are basically bogus. It would anyway be insane to try to enforce CPython's behavior in a language spec, given that it has no chance to be adopted by Jython or IronPython (or any other port of Python to Java or -.NET, like PyPy itself). +.NET). -This affects the precise time at which ``__del__`` methods are called, which +Even the naive idea of forcing a full GC when we're getting dangerously +close to the OS's limit can be very bad in some cases. If your program +leaks open files heavily, then it would work, but force a complete GC +cycle every n'th leaked file. The value of n is a constant, but the +program can take an arbitrary amount of memory, which makes a complete +GC cycle arbitrarily long. The end result is that PyPy would spend an +arbitrarily large fraction of its run time in the GC --- slowing down +the actual execution, not by 10% nor 100% nor 1000% but by essentially +any factor. + +To the best of our knowledge this problem has no better solution than +fixing the programs. If it occurs in 3rd-party code, this means going +to the authors and explaining the problem to them: they need to close +their open files in order to run on any non-CPython-based implementation +of Python. + +--------------------------------- + +Here are some more technical details. This issue affects the precise +time at which ``__del__`` methods are called, which is not reliable in PyPy (nor Jython nor IronPython). It also means that weak references may stay alive for a bit longer than expected. This makes "weak proxies" (as returned by ``weakref.proxy()``) somewhat less diff --git a/pypy/doc/jit/_ref.txt b/pypy/doc/jit/_ref.txt --- a/pypy/doc/jit/_ref.txt +++ b/pypy/doc/jit/_ref.txt @@ -0,0 +1,4 @@ +.. This file is generated automatically by makeref.py script, + which in turn is run manually. + + diff --git a/pypy/doc/jit/pyjitpl5.rst b/pypy/doc/jit/pyjitpl5.rst --- a/pypy/doc/jit/pyjitpl5.rst +++ b/pypy/doc/jit/pyjitpl5.rst @@ -177,6 +177,12 @@ .. __: https://bitbucket.org/pypy/extradoc/src/tip/talk/icooolps2009/bolz-tracing-jit-final.pdf -as well as the `blog posts with the JIT tag.`__ +Chapters 5 and 6 of `Antonio Cuni's PhD thesis`__ contain an overview of how +Tracing JITs work in general and more informations about the concrete case of +PyPy's JIT. + +.. __: http://antocuni.eu/download/antocuni-phd-thesis.pdf + +The `blog posts with the JIT tag`__ might also contain additional information. .. __: http://morepypy.blogspot.com/search/label/jit diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst --- a/pypy/doc/stm.rst +++ b/pypy/doc/stm.rst @@ -26,8 +26,8 @@ ============ ``pypy-stm`` is a variant of the regular PyPy interpreter. With caveats -listed below, it should be in theory within 25%-50% of the speed of a -regular PyPy, comparing the JITting version in both cases. It is called +listed below, it should be in theory within 25%-50% slower than a +regular PyPy, comparing the JIT version in both cases. It is called STM for Software Transactional Memory, which is the internal technique used (see `Reference to implementation details`_). @@ -55,9 +55,9 @@ interested in trying it out, you can download a Ubuntu 12.04 binary here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode, but not stripped of debug symbols). The current version supports four -"segments", which means that it will run up to four threads in parallel -(in other words, you get a GIL effect again, but only if trying to -execute more than 4 threads). +"segments", which means that it will run up to four threads in parallel, +in other words it is running a thread pool up to 4 threads emulating normal +threads. To build a version from sources, you first need to compile a custom version of clang; we recommend downloading `llvm and clang like diff --git a/pypy/doc/tool/makeref.py b/pypy/doc/tool/makeref.py --- a/pypy/doc/tool/makeref.py +++ b/pypy/doc/tool/makeref.py @@ -1,3 +1,12 @@ + +# patch sys.path so that this script can be executed standalone +import sys +from os.path import abspath, dirname +# this script is assumed to be at pypy/doc/tool/makeref.py +path = dirname(abspath(__file__)) +path = dirname(dirname(dirname(path))) +sys.path.insert(0, path) + import py import pypy @@ -51,8 +60,11 @@ lines.append(".. _`%s`:" % linkname) lines.append(".. _`%s`: %s" %(linknamelist[-1], linktarget)) - lines.append('') - reffile.write("\n".join(lines)) + content = ".. This file is generated automatically by makeref.py script,\n" + content += " which in turn is run manually.\n\n" + content += "\n".join(lines) + "\n" + reffile.write(content, mode="wb") + print "wrote %d references to %r" %(len(lines), reffile) #print "last ten lines" #for x in lines[-10:]: print x diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1091,7 +1091,7 @@ def test_read_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') @@ -1101,7 +1101,7 @@ def test_read_variable_as_unknown_length_array(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BCharP = new_pointer_type(new_primitive_type("char")) BArray = new_array_type(BCharP, None) @@ -1113,7 +1113,7 @@ def test_write_variable(): ## FIXME: this test assumes glibc specific behavior, it's not compliant with C standard ## https://bugs.pypy.org/issue1643 - if sys.platform == 'win32' or sys.platform == 'darwin' or sys.platform.startswith('freebsd'): + if not sys.platform.startswith("linux"): py.test.skip("untested") BVoidP = new_pointer_type(new_void_type()) ll = find_and_load_library('c') diff --git a/pypy/module/_cffi_backend/test/test_fastpath.py b/pypy/module/_cffi_backend/test/test_fastpath.py --- a/pypy/module/_cffi_backend/test/test_fastpath.py +++ b/pypy/module/_cffi_backend/test/test_fastpath.py @@ -51,11 +51,9 @@ def test_fast_init_longlong_from_list(self): py3k_skip('XXX: strategies are currently broken') - if type(2 ** 50) is long: - large_int = 2 ** 30 - else: - large_int = 2 ** 50 import _cffi_backend + import sys + large_int = 2 ** (50 if sys.maxsize > 2**31 - 1 else 30) LONGLONG = _cffi_backend.new_primitive_type('long long') P_LONGLONG = _cffi_backend.new_pointer_type(LONGLONG) LONGLONG_ARRAY = _cffi_backend.new_array_type(P_LONGLONG, None) diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1297,7 +1297,6 @@ class TestBytesDictImplementation(BaseTestRDictImplementation): StrategyClass = BytesDictStrategy - #ImplementionClass = BytesDictImplementation def test_str_shortcut(self): self.fill_impl() @@ -1310,9 +1309,6 @@ self.fill_impl() assert self.fakespace.view_as_kwargs(self.impl) == (["fish", "fish2"], [1000, 2000]) -## class TestMeasuringDictImplementation(BaseTestRDictImplementation): -## ImplementionClass = MeasuringDictImplementation -## DevolvedClass = MeasuringDictImplementation class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation): def fill_impl(self): diff --git a/pypy/tool/readdictinfo.py b/pypy/tool/readdictinfo.py deleted file mode 100644 --- a/pypy/tool/readdictinfo.py +++ /dev/null @@ -1,115 +0,0 @@ -# this is for use with a pypy-c build with multidicts and using the -# MeasuringDictImplementation -- it will create a file called -# 'dictinfo.txt' in the local directory and this file will turn the -# contents back into DictInfo objects. - -# run with python -i ! - -import sys - -if __name__ == '__main__': - infile = open(sys.argv[1]) - - curr = None - slots = [] - for line in infile: - if line == '------------------\n': - if curr: - break - curr = 1 - else: - attr, val = [s.strip() for s in line.split(':')] - slots.append(attr) - - class DictInfo(object): - __slots__ = slots - - infile = open(sys.argv[1]) - - infos = [] - - for line in infile: - if line == '------------------\n': - curr = object.__new__(DictInfo) - infos.append(curr) - else: - attr, val = [s.strip() for s in line.split(':')] - if '.' in val: - val = float(val) - else: - val = int(val) - setattr(curr, attr, val) - -def histogram(infos, keyattr, *attrs): - r = {} - for info in infos: - v = getattr(info, keyattr) - l = r.setdefault(v, [0, {}]) - l[0] += 1 - for a in attrs: - d2 = l[1].setdefault(a, {}) - v2 = getattr(info, a) - d2[v2] = d2.get(v2, 0) + 1 - return sorted(r.items()) - -def reportDictInfos(): - d = {} - stillAlive = 0 - totLifetime = 0.0 - for info in infos: - for attr in slots: - if attr == 'maxcontents': - continue - v = getattr(info, attr) - if not isinstance(v, int): - continue - d[attr] = d.get(attr, 0) + v - if info.lifetime != -1.0: - totLifetime += info.lifetime - else: - stillAlive += 1 - print 'read info on', len(infos), 'dictionaries' - if stillAlive != len(infos): - print 'average lifetime', totLifetime/(len(infos) - stillAlive), - print '('+str(stillAlive), 'still alive at exit)' - print d - -def Rify(fname, *attributes): - output = open(fname, 'w') - for attr in attributes: - print >>output, attr, - print >>output - for info in infos: - for attr in attributes: - print >>output, getattr(info, attr), - print >>output - -if __name__ == '__main__': -# reportDictInfos() - - # interactive stuff: - - import __builtin__ - - def displayhook(v): - if v is not None: - __builtin__._ = v - pprint.pprint(v) - sys.displayhook = displayhook - - import pprint - try: - import readline - except ImportError: - pass - else: - import rlcompleter - readline.parse_and_bind('tab: complete') - - if len(sys.argv) > 2: - attrs = sys.argv[2].split(',') - if attrs == ['all']: - attrs = slots - Rify("R.txt", *attrs) - - diff --git a/pypy/tool/rundictbenchmarks.py b/pypy/tool/rundictbenchmarks.py deleted file mode 100644 --- a/pypy/tool/rundictbenchmarks.py +++ /dev/null @@ -1,27 +0,0 @@ -import sys, os - -# this file runs some benchmarks with a pypy-c that is assumed to be -# built using the MeasuringDictImplementation. - -# it should be run with pypy/goal as the cwd, and you'll -# need to hack a copy of rst2html for yourself (svn docutils -# required). - -if __name__ == '__main__': - try: - os.unlink("dictinfo.txt") - except os.error: - pass - - progs = [('pystone', ['-c', 'from test import pystone; pystone.main()']), - ('richards', ['richards.py']), - ('docutils', ['rst2html.py', '../../doc/coding-guide.txt', 'foo.html']), - ('translate', ['translate.py', '--backendopt', '--no-compile', '--batch', - 'targetrpystonedalone.py']) - ] - - EXE = sys.argv[1] - - for suffix, args in progs: - os.spawnv(os.P_WAIT, EXE, [EXE] + args) - os.rename('dictinfo.txt', 'dictinfo-%s.txt'%suffix) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -387,24 +387,17 @@ @arguments("descr") def opimpl_new(self, sizedescr): - resbox = self.execute_with_descr(rop.NEW, sizedescr) - self.metainterp.heapcache.new(resbox) - return resbox + return self.metainterp.execute_new(sizedescr) @arguments("descr") def opimpl_new_with_vtable(self, sizedescr): cpu = self.metainterp.cpu cls = heaptracker.descr2vtable(cpu, sizedescr) - resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls)) - self.metainterp.heapcache.new(resbox) - self.metainterp.heapcache.class_now_known(resbox) - return resbox + return self.metainterp.execute_new_with_vtable(ConstInt(cls)) @arguments("box", "descr") def opimpl_new_array(self, lengthbox, itemsizedescr): - resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox) - self.metainterp.heapcache.new_array(resbox, lengthbox) - return resbox + return self.metainterp.execute_new_array(itemsizedescr, lengthbox) @specialize.arg(1) def _do_getarrayitem_gc_any(self, op, arraybox, indexbox, arraydescr): @@ -467,10 +460,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_setarrayitem_gc_any(self, arraybox, indexbox, itembox, arraydescr): - self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox, - indexbox, itembox) - self.metainterp.heapcache.setarrayitem( - arraybox, indexbox, itembox, arraydescr) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + indexbox, itembox) opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any @@ -623,21 +614,22 @@ tobox = self.metainterp.heapcache.getfield(box, fielddescr) if tobox is valuebox: return - # The following test is disabled because buggy. It is supposed + self.metainterp.execute_setfield_gc(fielddescr, box, valuebox) + # The following logic is disabled because buggy. It is supposed # to be: not(we're writing null into a freshly allocated object) # but the bug is that is_unescaped() can be True even after the # field cache is cleared --- see test_ajit:test_unescaped_write_zero - if 1: # tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): - self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) - self.metainterp.heapcache.setfield(box, valuebox, fielddescr) + # + # if tobox is not None or not self.metainterp.heapcache.is_unescaped(box) or not isinstance(valuebox, Const) or valuebox.nonnull(): + # self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox) + # self.metainterp.heapcache.setfield(box, valuebox, fielddescr) opimpl_setfield_gc_i = _opimpl_setfield_gc_any opimpl_setfield_gc_r = _opimpl_setfield_gc_any opimpl_setfield_gc_f = _opimpl_setfield_gc_any @arguments("box", "box", "box", "descr") def _opimpl_setinteriorfield_gc_any(self, array, index, value, descr): - self.execute_with_descr(rop.SETINTERIORFIELD_GC, descr, - array, index, value) + self.metainterp.execute_setinteriorfield_gc(descr, array, index, value) opimpl_setinteriorfield_gc_i = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_f = _opimpl_setinteriorfield_gc_any opimpl_setinteriorfield_gc_r = _opimpl_setinteriorfield_gc_any @@ -664,8 +656,8 @@ @arguments("box", "box", "box", "descr") def _opimpl_raw_store(self, addrbox, offsetbox, valuebox, arraydescr): - self.execute_with_descr(rop.RAW_STORE, arraydescr, - addrbox, offsetbox, valuebox) + self.metainterp.execute_raw_store(arraydescr, + addrbox, offsetbox, valuebox) opimpl_raw_store_i = _opimpl_raw_store opimpl_raw_store_f = _opimpl_raw_store @@ -1891,6 +1883,41 @@ self.attach_debug_info(op) return resbox + def execute_new_with_vtable(self, known_class): + resbox = self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + self.heapcache.new(resbox) + self.heapcache.class_now_known(resbox) + return resbox + + def execute_new(self, typedescr): + resbox = self.execute_and_record(rop.NEW, typedescr) + self.heapcache.new(resbox) + return resbox + + def execute_new_array(self, itemsizedescr, lengthbox): + resbox = self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + self.heapcache.new_array(resbox, lengthbox) + return resbox + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + self.heapcache.setfield(box, valuebox, fielddescr) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + self.heapcache.setarrayitem(arraybox, indexbox, itembox, arraydescr) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + def attach_debug_info(self, op): if (not we_are_translated() and op is not None diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -954,15 +954,14 @@ return virtualizable_boxes, virtualref_boxes def allocate_with_vtable(self, known_class): - return self.metainterp.execute_and_record(rop.NEW_WITH_VTABLE, - None, known_class) + return self.metainterp.execute_new_with_vtable(known_class) def allocate_struct(self, typedescr): - return self.metainterp.execute_and_record(rop.NEW, typedescr) + return self.metainterp.execute_new(typedescr) def allocate_array(self, length, arraydescr): - return self.metainterp.execute_and_record(rop.NEW_ARRAY, - arraydescr, ConstInt(length)) + lengthbox = ConstInt(length) + return self.metainterp.execute_new_array(arraydescr, lengthbox) def allocate_raw_buffer(self, size): cic = self.metainterp.staticdata.callinfocollection @@ -1034,8 +1033,7 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETFIELD_GC, descr, - structbox, fieldbox) + self.metainterp.execute_setfield_gc(descr, structbox, fieldbox) def setinteriorfield(self, index, array, fieldnum, descr): if descr.is_pointer_field(): @@ -1045,8 +1043,8 @@ else: kind = INT fieldbox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETINTERIORFIELD_GC, descr, - array, ConstInt(index), fieldbox) + self.metainterp.execute_setinteriorfield_gc(descr, array, + ConstInt(index), fieldbox) def setarrayitem_int(self, arraybox, index, fieldnum, arraydescr): self._setarrayitem(arraybox, index, fieldnum, arraydescr, INT) @@ -1059,9 +1057,8 @@ def _setarrayitem(self, arraybox, index, fieldnum, arraydescr, kind): itembox = self.decode_box(fieldnum, kind) - self.metainterp.execute_and_record(rop.SETARRAYITEM_GC, - arraydescr, arraybox, - ConstInt(index), itembox) + self.metainterp.execute_setarrayitem_gc(arraydescr, arraybox, + ConstInt(index), itembox) def setrawbuffer_item(self, bufferbox, fieldnum, offset, arraydescr): if arraydescr.is_array_of_pointers(): @@ -1071,8 +1068,8 @@ else: kind = INT itembox = self.decode_box(fieldnum, kind) - return self.metainterp.execute_and_record(rop.RAW_STORE, arraydescr, bufferbox, - ConstInt(offset), itembox) + self.metainterp.execute_raw_store(arraydescr, bufferbox, + ConstInt(offset), itembox) def decode_int(self, tagged): return self.decode_box(tagged, INT) diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -14,7 +14,6 @@ from rpython.rlib.longlong2float import float2longlong, longlong2float from rpython.rlib.rarithmetic import ovfcheck, is_valid_int from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.translator.tool.cbuild import ExternalCompilationInfo class BasicTests: @@ -3229,12 +3228,9 @@ self.check_resops(arraylen_gc=2) def test_release_gil_flush_heap_cache(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T, releasegil=True) # Not a real lock, has all the same properties with respect to GIL # release though, so good for this test. class Lock(object): @@ -3583,6 +3579,24 @@ 'guard_true': 2, 'int_sub': 2, 'jump': 1, 'guard_false': 1}) + def test_virtual_after_bridge(self): + myjitdriver = JitDriver(greens = [], reds = ["n"]) + @look_inside_iff(lambda x: isvirtual(x)) + def g(x): + return x[0] + def f(n): + while n > 0: + myjitdriver.jit_merge_point(n=n) + x = [1] + if n & 1: # bridge + n -= g(x) + else: + n -= g(x) + return n + res = self.meta_interp(f, [10]) + assert res == 0 + self.check_resops(call=0, call_may_force=0, new_array=0) + def test_convert_from_SmallFunctionSetPBCRepr_to_FunctionsPBCRepr(self): f1 = lambda n: n+1 @@ -3922,13 +3936,10 @@ self.interp_operations(f, []) def test_external_call(self): - eci = ExternalCompilationInfo() - if sys.platform == "win32": - eci = ExternalCompilationInfo(libraries=["msvcrt"]) from rpython.rlib.objectmodel import invoke_around_extcall T = rffi.CArrayPtr(rffi.TIME_T) - external = rffi.llexternal("time", [T], rffi.TIME_T, compilation_info=eci) + external = rffi.llexternal("time", [T], rffi.TIME_T) class Oups(Exception): pass @@ -3952,9 +3963,9 @@ external(lltype.nullptr(T.TO)) return len(state.l) - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 - res = self.interp_operations(f, []) + res = self.interp_operations(f, [], supports_longlong=True) assert res == 2 self.check_operations_history(call_release_gil=1, call_may_force=0) diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -93,6 +93,32 @@ self.resboxes.append(resbox) return resbox + def execute_new_with_vtable(self, known_class): + return self.execute_and_record(rop.NEW_WITH_VTABLE, None, + known_class) + + def execute_new(self, typedescr): + return self.execute_and_record(rop.NEW, typedescr) + + def execute_new_array(self, itemsizedescr, lengthbox): + return self.execute_and_record(rop.NEW_ARRAY, itemsizedescr, + lengthbox) + + def execute_setfield_gc(self, fielddescr, box, valuebox): + self.execute_and_record(rop.SETFIELD_GC, fielddescr, box, valuebox) + + def execute_setarrayitem_gc(self, arraydescr, arraybox, indexbox, itembox): + self.execute_and_record(rop.SETARRAYITEM_GC, arraydescr, + arraybox, indexbox, itembox) + + def execute_setinteriorfield_gc(self, descr, array, index, value): + self.execute_and_record(rop.SETINTERIORFIELD_GC, descr, + array, index, value) + + def execute_raw_store(self, arraydescr, addrbox, offsetbox, valuebox): + self.execute_and_record(rop.RAW_STORE, arraydescr, + addrbox, offsetbox, valuebox) + S = lltype.GcStruct('S') gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(S)) diff --git a/rpython/rlib/parsing/lexer.py b/rpython/rlib/parsing/lexer.py --- a/rpython/rlib/parsing/lexer.py +++ b/rpython/rlib/parsing/lexer.py @@ -8,7 +8,7 @@ self.source_pos = source_pos def copy(self): - return Token(self.name, self.source, self.source_pos) + return self.__class__(self.name, self.source, self.source_pos) def __eq__(self, other): # for testing only @@ -57,9 +57,9 @@ self.ignore = dict.fromkeys(ignore) self.matcher = self.automaton.make_lexing_code() - def get_runner(self, text, eof=False): + def get_runner(self, text, eof=False, token_class=None): return LexingDFARunner(self.matcher, self.automaton, text, - self.ignore, eof) + self.ignore, eof, token_class=token_class) def tokenize(self, text, eof=False): """Return a list of Token's from text.""" @@ -184,7 +184,12 @@ return self class LexingDFARunner(AbstractLexingDFARunner): - def __init__(self, matcher, automaton, text, ignore, eof=False): + def __init__(self, matcher, automaton, text, ignore, eof=False, + token_class=None): + if token_class is None: + self.token_class = Token + else: + self.token_class = token_class AbstractLexingDFARunner.__init__(self, matcher, automaton, text, eof) self.ignore = ignore @@ -195,6 +200,6 @@ assert (eof and state == -1) or 0 <= state < len(self.automaton.names) source_pos = SourcePos(index, self.lineno, self.columnno) if eof: - return Token("EOF", "EOF", source_pos) - return Token(self.automaton.names[self.last_matched_state], - text, source_pos) + return self.token_class("EOF", "EOF", source_pos) + return self.token_class(self.automaton.names[self.last_matched_state], + text, source_pos) _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit