Author: Richard Plangger <planri...@gmail.com> Branch: new-jit-log Changeset: r84348:cfecd970a924 Date: 2016-05-09 13:29 +0200 http://bitbucket.org/pypy/pypy/changeset/cfecd970a924/
Log: merged default diff too long, truncating to 2000 out of 2152 lines diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst --- a/pypy/doc/discussion/finalizer-order.rst +++ b/pypy/doc/discussion/finalizer-order.rst @@ -33,26 +33,25 @@ it from a finalizer. A finalizer runs earlier, and in topological order; care must be taken that the object might still be reachable at this point if we're clever enough. A destructor on the other hand runs -last; nothing can be done with the object any more. +last; nothing can be done with the object any more, and the GC frees it +immediately. Destructors ----------- A destructor is an RPython ``__del__()`` method that is called directly -by the GC when there is no more reference to an object. Intended for -objects that just need to free a block of raw memory or close a file. +by the GC when it is about to free the memory. Intended for objects +that just need to free an extra block of raw memory. There are restrictions on the kind of code you can put in ``__del__()``, including all other functions called by it. These restrictions are -checked. In particular you cannot access fields containing GC objects; -and if you call an external C function, it must be a "safe" function -(e.g. not releasing the GIL; use ``releasegil=False`` in -``rffi.llexternal()``). +checked. In particular you cannot access fields containing GC objects. +Right now you can't call any external C function either. -If there are several objects with destructors that die during the same -GC cycle, they are called in a completely random order --- but that -should not matter because destructors cannot do much anyway. +Destructors are called precisely when the GC frees the memory of the +object. As long as the object exists (even in some finalizer queue or +anywhere), its destructor is not called. Register_finalizer @@ -95,10 +94,15 @@ To find the queued items, call ``fin.next_dead()`` repeatedly. It returns the next queued item, or ``None`` when the queue is empty. -It is allowed in theory to cumulate several different +In theory, it would kind of work if you cumulate several different ``FinalizerQueue`` instances for objects of the same class, and (always in theory) the same ``obj`` could be registered several times in the same queue, or in several queues. This is not tested though. +For now the untranslated emulation does not support registering the +same object several times. + +Note that the Boehm garbage collector, used in ``rpython -O0``, +completely ignores ``register_finalizer()``. Ordering of finalizers diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -84,3 +84,8 @@ .. branch: cpyext-more-slots +.. branch: use-gc-del-3 + +Use the new rgc.FinalizerQueue mechanism to clean up the handling of +``__del__`` methods. Fixes notably issue #2287. (All RPython +subclasses of W_Root need to use FinalizerQueue now.) diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,7 +11,7 @@ INT_MIN, INT_MAX, UINT_MAX, USHRT_MAX from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, - UserDelAction) + make_finalizer_queue) from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals, make_weak_value_dictionary @@ -28,6 +28,7 @@ """This is the abstract root class of all wrapped objects that live in a 'normal' object space like StdObjSpace.""" __slots__ = ('__weakref__',) + _must_be_light_finalizer_ = True user_overridden_class = False def getdict(self, space): @@ -136,9 +137,8 @@ pass def clear_all_weakrefs(self): - """Call this at the beginning of interp-level __del__() methods - in subclasses. It ensures that weakrefs (if any) are cleared - before the object is further destroyed. + """Ensures that weakrefs (if any) are cleared now. This is + called by UserDelAction before the object is finalized further. """ lifeline = self.getweakref() if lifeline is not None: @@ -151,25 +151,37 @@ self.delweakref() lifeline.clear_all_weakrefs() - __already_enqueued_for_destruction = () + def _finalize_(self): + """The RPython-level finalizer. - def enqueue_for_destruction(self, space, callback, descrname): - """Put the object in the destructor queue of the space. - At a later, safe point in time, UserDelAction will call - callback(self). If that raises OperationError, prints it - to stderr with the descrname string. + By default, it is *not called*. See self.register_finalizer(). + Be ready to handle the case where the object is only half + initialized. Also, in some cases the object might still be + visible to app-level after _finalize_() is called (e.g. if + there is a __del__ that resurrects). + """ - Note that 'callback' will usually need to start with: - assert isinstance(self, W_SpecificClass) + def register_finalizer(self, space): + """Register a finalizer for this object, so that + self._finalize_() will be called. You must call this method at + most once. Be ready to handle in _finalize_() the case where + the object is half-initialized, even if you only call + self.register_finalizer() at the end of the initialization. + This is because there are cases where the finalizer is already + registered before: if the user makes an app-level subclass with + a __del__. (In that case only, self.register_finalizer() does + nothing, because the finalizer is already registered in + allocate_instance().) """ - # this function always resurect the object, so when - # running on top of CPython we must manually ensure that - # we enqueue it only once - if not we_are_translated(): - if callback in self.__already_enqueued_for_destruction: - return - self.__already_enqueued_for_destruction += (callback,) - space.user_del_action.register_callback(self, callback, descrname) + if self.user_overridden_class and self.getclass(space).hasuserdel: + # already registered by space.allocate_instance() + if not we_are_translated(): + assert space.finalizer_queue._already_registered(self) + else: + if not we_are_translated(): + # does not make sense if _finalize_ is not overridden + assert self._finalize_.im_func is not W_Root._finalize_.im_func + space.finalizer_queue.register_finalizer(self) # hooks that the mapdict implementations needs: def _get_mapdict_map(self): @@ -389,9 +401,9 @@ self.interned_strings = make_weak_value_dictionary(self, str, W_Root) self.actionflag = ActionFlag() # changed by the signal module self.check_signal_action = None # changed by the signal module - self.user_del_action = UserDelAction(self) + make_finalizer_queue(W_Root, self) self._code_of_sys_exc_info = None - + # can be overridden to a subclass self.initialize() @@ -1844,7 +1856,6 @@ ('get', 'get', 3, ['__get__']), ('set', 'set', 3, ['__set__']), ('delete', 'delete', 2, ['__delete__']), - ('userdel', 'del', 1, ['__del__']), ] ObjSpace.BuiltinModuleTable = [ diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize -from rpython.rlib import jit +from rpython.rlib import jit, rgc TICK_COUNTER_STEP = 100 @@ -141,6 +141,12 @@ actionflag.action_dispatcher(self, frame) # slow path bytecode_trace._always_inline_ = True + def _run_finalizers_now(self): + # Tests only: run the actions now, to ensure that the + # finalizable objects are really finalized. Used notably by + # pypy.tool.pytest.apptest. + self.space.actionflag.action_dispatcher(self, None) + def bytecode_only_trace(self, frame): """ Like bytecode_trace() but doesn't invoke any other events besides the @@ -515,75 +521,98 @@ """ -class UserDelCallback(object): - def __init__(self, w_obj, callback, descrname): - self.w_obj = w_obj - self.callback = callback - self.descrname = descrname - self.next = None - class UserDelAction(AsyncAction): """An action that invokes all pending app-level __del__() method. This is done as an action instead of immediately when the - interp-level __del__() is invoked, because the latter can occur more + WRootFinalizerQueue is triggered, because the latter can occur more or less anywhere in the middle of code that might not be happy with random app-level code mutating data structures under its feet. """ def __init__(self, space): AsyncAction.__init__(self, space) - self.dying_objects = None - self.dying_objects_last = None - self.finalizers_lock_count = 0 - self.enabled_at_app_level = True - - def register_callback(self, w_obj, callback, descrname): - cb = UserDelCallback(w_obj, callback, descrname) - if self.dying_objects_last is None: - self.dying_objects = cb - else: - self.dying_objects_last.next = cb - self.dying_objects_last = cb - self.fire() + self.finalizers_lock_count = 0 # see pypy/module/gc + self.enabled_at_app_level = True # see pypy/module/gc + self.pending_with_disabled_del = None def perform(self, executioncontext, frame): - if self.finalizers_lock_count > 0: - return self._run_finalizers() + @jit.dont_look_inside def _run_finalizers(self): - # Each call to perform() first grabs the self.dying_objects - # and replaces it with an empty list. We do this to try to - # avoid too deep recursions of the kind of __del__ being called - # while in the middle of another __del__ call. - pending = self.dying_objects - self.dying_objects = None - self.dying_objects_last = None + while True: + w_obj = self.space.finalizer_queue.next_dead() + if w_obj is None: + break + self._call_finalizer(w_obj) + + def gc_disabled(self, w_obj): + # If we're running in 'gc.disable()' mode, record w_obj in the + # "call me later" list and return True. In normal mode, return + # False. Use this function from some _finalize_() methods: + # if a _finalize_() method would call some user-defined + # app-level function, like a weakref callback, then first do + # 'if gc.disabled(self): return'. Another attempt at + # calling _finalize_() will be made after 'gc.enable()'. + # (The exact rule for when to use gc_disabled() or not is a bit + # vague, but most importantly this includes all user-level + # __del__().) + pdd = self.pending_with_disabled_del + if pdd is None: + return False + else: + pdd.append(w_obj) + return True + + def _call_finalizer(self, w_obj): + # Before calling the finalizers, clear the weakrefs, if any. + w_obj.clear_all_weakrefs() + + # Look up and call the app-level __del__, if any. space = self.space - while pending is not None: + if w_obj.typedef is None: + w_del = None # obscure case: for WeakrefLifeline + else: + w_del = space.lookup(w_obj, '__del__') + if w_del is not None: + if self.gc_disabled(w_obj): + return try: - pending.callback(pending.w_obj) - except OperationError as e: - e.write_unraisable(space, pending.descrname, pending.w_obj) - e.clear(space) # break up reference cycles - pending = pending.next - # - # Note: 'dying_objects' used to be just a regular list instead - # of a chained list. This was the cause of "leaks" if we have a - # program that constantly creates new objects with finalizers. - # Here is why: say 'dying_objects' is a long list, and there - # are n instances in it. Then we spend some time in this - # function, possibly triggering more GCs, but keeping the list - # of length n alive. Then the list is suddenly freed at the - # end, and we return to the user program. At this point the - # GC limit is still very high, because just before, there was - # a list of length n alive. Assume that the program continues - # to allocate a lot of instances with finalizers. The high GC - # limit means that it could allocate a lot of instances before - # reaching it --- possibly more than n. So the whole procedure - # repeats with higher and higher values of n. - # - # This does not occur in the current implementation because - # there is no list of length n: if n is large, then the GC - # will run several times while walking the list, but it will - # see lower and lower memory usage, with no lower bound of n. + space.get_and_call_function(w_del, w_obj) + except Exception as e: + report_error(space, e, "method __del__ of ", w_obj) + + # Call the RPython-level _finalize_() method. + try: + w_obj._finalize_() + except Exception as e: + report_error(space, e, "finalizer of ", w_obj) + + +def report_error(space, e, where, w_obj): + if isinstance(e, OperationError): + e.write_unraisable(space, where, w_obj) + e.clear(space) # break up reference cycles + else: + addrstring = w_obj.getaddrstring(space) + msg = ("RPython exception %s in %s<%s at 0x%s> ignored\n" % ( + str(e), where, space.type(w_obj).name, addrstring)) + space.call_method(space.sys.get('stderr'), 'write', + space.wrap(msg)) + + +def make_finalizer_queue(W_Root, space): + """Make a FinalizerQueue subclass which responds to GC finalizer + events by 'firing' the UserDelAction class above. It does not + directly fetches the objects to finalize at all; they stay in the + GC-managed queue, and will only be fetched by UserDelAction + (between bytecodes).""" + + class WRootFinalizerQueue(rgc.FinalizerQueue): + Class = W_Root + + def finalizer_trigger(self): + space.user_del_action.fire() + + space.user_del_action = UserDelAction(space) + space.finalizer_queue = WRootFinalizerQueue() diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -1,6 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.pyopcode import LoopBlock +from pypy.interpreter.pycode import CO_YIELD_INSIDE_TRY from rpython.rlib import jit @@ -13,6 +14,8 @@ self.frame = frame # turned into None when frame_finished_execution self.pycode = frame.pycode self.running = False + if self.pycode.co_flags & CO_YIELD_INSIDE_TRY: + self.register_finalizer(self.space) def descr__repr__(self, space): if self.pycode is None: @@ -139,7 +142,6 @@ def descr_close(self): """x.close(arg) -> raise GeneratorExit inside generator.""" - assert isinstance(self, GeneratorIterator) space = self.space try: w_retval = self.throw(space.w_GeneratorExit, space.w_None, @@ -212,25 +214,21 @@ unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() - -class GeneratorIteratorWithDel(GeneratorIterator): - - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() + def _finalize_(self): + # This is only called if the CO_YIELD_INSIDE_TRY flag is set + # on the code object. If the frame is still not finished and + # finally or except blocks are present at the current + # position, then raise a GeneratorExit. Otherwise, there is + # no point. if self.frame is not None: block = self.frame.lastblock while block is not None: if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") + self.descr_close() break block = block.previous - def get_printable_location_genentry(bytecode): return '%s <generator>' % (bytecode.get_repr(),) generatorentry_driver = jit.JitDriver(greens=['pycode'], diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -241,12 +241,8 @@ def run(self): """Start this frame's execution.""" if self.getcode().co_flags & pycode.CO_GENERATOR: - if self.getcode().co_flags & pycode.CO_YIELD_INSIDE_TRY: - from pypy.interpreter.generator import GeneratorIteratorWithDel - return self.space.wrap(GeneratorIteratorWithDel(self)) - else: - from pypy.interpreter.generator import GeneratorIterator - return self.space.wrap(GeneratorIterator(self)) + from pypy.interpreter.generator import GeneratorIterator + return self.space.wrap(GeneratorIterator(self)) else: return self.execute_frame() diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py --- a/pypy/interpreter/test/test_typedef.py +++ b/pypy/interpreter/test/test_typedef.py @@ -127,10 +127,7 @@ """ % (slots, methodname, checks[0], checks[1], checks[2], checks[3])) subclasses = {} - for key, subcls in typedef._subclass_cache.items(): - if key[0] is not space.config: - continue - cls = key[1] + for cls, subcls in typedef._unique_subclass_cache.items(): subclasses.setdefault(cls, {}) prevsubcls = subclasses[cls].setdefault(subcls.__name__, subcls) assert subcls is prevsubcls @@ -186,35 +183,20 @@ class W_Level1(W_Root): def __init__(self, space1): assert space1 is space - def __del__(self): + self.register_finalizer(space) + def _finalize_(self): space.call_method(w_seen, 'append', space.wrap(1)) - class W_Level2(W_Root): - def __init__(self, space1): - assert space1 is space - def __del__(self): - self.enqueue_for_destruction(space, W_Level2.destructormeth, - 'FOO ') - def destructormeth(self): - space.call_method(w_seen, 'append', space.wrap(2)) W_Level1.typedef = typedef.TypeDef( 'level1', __new__ = typedef.generic_new_descr(W_Level1)) - W_Level2.typedef = typedef.TypeDef( - 'level2', - __new__ = typedef.generic_new_descr(W_Level2)) # w_seen = space.newlist([]) W_Level1(space) gc.collect(); gc.collect() - assert space.unwrap(w_seen) == [1] - # - w_seen = space.newlist([]) - W_Level2(space) - gc.collect(); gc.collect() assert space.str_w(space.repr(w_seen)) == "[]" # not called yet ec = space.getexecutioncontext() self.space.user_del_action.perform(ec, None) - assert space.unwrap(w_seen) == [2] + assert space.unwrap(w_seen) == [1] # called by user_del_action # w_seen = space.newlist([]) self.space.appexec([self.space.gettypeobject(W_Level1.typedef)], @@ -236,29 +218,17 @@ A4() """) gc.collect(); gc.collect() - assert space.unwrap(w_seen) == [4, 1] + assert space.unwrap(w_seen) == [4, 1] # user __del__, and _finalize_ # w_seen = space.newlist([]) - self.space.appexec([self.space.gettypeobject(W_Level2.typedef)], + self.space.appexec([self.space.gettypeobject(W_Level1.typedef)], """(level2): class A5(level2): pass A5() """) gc.collect(); gc.collect() - assert space.unwrap(w_seen) == [2] - # - w_seen = space.newlist([]) - self.space.appexec([self.space.gettypeobject(W_Level2.typedef), - w_seen], - """(level2, seen): - class A6(level2): - def __del__(self): - seen.append(6) - A6() - """) - gc.collect(); gc.collect() - assert space.unwrap(w_seen) == [6, 2] + assert space.unwrap(w_seen) == [1] # _finalize_ only def test_multiple_inheritance(self): class W_A(W_Root): diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -24,6 +24,8 @@ self.bases = bases self.heaptype = False self.hasdict = '__dict__' in rawdict + # no __del__: use an RPython _finalize_() method and register_finalizer + assert '__del__' not in rawdict self.weakrefable = '__weakref__' in rawdict self.doc = rawdict.pop('__doc__', None) for base in bases: @@ -103,26 +105,20 @@ # we need two subclasses of the app-level type, one to add mapdict, and then one # to add del to not slow down the GC. -def get_unique_interplevel_subclass(space, cls, needsdel=False): +def get_unique_interplevel_subclass(space, cls): "NOT_RPYTHON: initialization-time only" - if hasattr(cls, '__del__') and getattr(cls, "handle_del_manually", False): - needsdel = False assert cls.typedef.acceptable_as_base_class - key = space, cls, needsdel try: - return _subclass_cache[key] + return _unique_subclass_cache[cls] except KeyError: - # XXX can save a class if cls already has a __del__ - if needsdel: - cls = get_unique_interplevel_subclass(space, cls, False) - subcls = _getusercls(space, cls, needsdel) - assert key not in _subclass_cache - _subclass_cache[key] = subcls + subcls = _getusercls(cls) + assert cls not in _unique_subclass_cache + _unique_subclass_cache[cls] = subcls return subcls get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" -_subclass_cache = {} +_unique_subclass_cache = {} -def _getusercls(space, cls, wants_del, reallywantdict=False): +def _getusercls(cls, reallywantdict=False): from rpython.rlib import objectmodel from pypy.objspace.std.objectobject import W_ObjectObject from pypy.module.__builtin__.interp_classobj import W_InstanceObject @@ -132,11 +128,10 @@ typedef = cls.typedef name = cls.__name__ + "User" - mixins_needed = [] if cls is W_ObjectObject or cls is W_InstanceObject: - mixins_needed.append(_make_storage_mixin_size_n()) + base_mixin = _make_storage_mixin_size_n() else: - mixins_needed.append(MapdictStorageMixin) + base_mixin = MapdictStorageMixin copy_methods = [BaseUserClassMapdict] if reallywantdict or not typedef.hasdict: # the type has no dict, mapdict to provide the dict @@ -147,44 +142,12 @@ # support copy_methods.append(MapdictWeakrefSupport) name += "Weakrefable" - if wants_del: - # This subclass comes with an app-level __del__. To handle - # it, we make an RPython-level __del__ method. This - # RPython-level method is called directly by the GC and it - # cannot do random things (calling the app-level __del__ would - # be "random things"). So instead, we just call here - # enqueue_for_destruction(), and the app-level __del__ will be - # called later at a safe point (typically between bytecodes). - # If there is also an inherited RPython-level __del__, it is - # called afterwards---not immediately! This base - # RPython-level __del__ is supposed to run only when the - # object is not reachable any more. NOTE: it doesn't fully - # work: see issue #2287. - name += "Del" - parent_destructor = getattr(cls, '__del__', None) - def call_parent_del(self): - assert isinstance(self, subcls) - parent_destructor(self) - def call_applevel_del(self): - assert isinstance(self, subcls) - space.userdel(self) - class Proto(object): - def __del__(self): - self.clear_all_weakrefs() - self.enqueue_for_destruction(space, call_applevel_del, - 'method __del__ of ') - if parent_destructor is not None: - self.enqueue_for_destruction(space, call_parent_del, - 'internal destructor of ') - mixins_needed.append(Proto) class subcls(cls): user_overridden_class = True - for base in mixins_needed: - objectmodel.import_from_mixin(base) + objectmodel.import_from_mixin(base_mixin) for copycls in copy_methods: _copy_methods(copycls, subcls) - del subcls.base subcls.__name__ = name return subcls diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -44,13 +44,12 @@ self.bases_w = bases self.w_dict = w_dict + def has_user_del(self, space): + return self.lookup(space, '__del__') is not None + def instantiate(self, space): cache = space.fromcache(Cache) - if self.lookup(space, '__del__') is not None: - w_inst = cache.cls_with_del(space, self) - else: - w_inst = cache.cls_without_del(space, self) - return w_inst + return cache.InstanceObjectCls(space, self) def getdict(self, space): return self.w_dict @@ -132,9 +131,9 @@ self.setbases(space, w_value) return elif name == "__del__": - if self.lookup(space, name) is None: + if not self.has_user_del(space): msg = ("a __del__ method added to an existing class will " - "not be called") + "only be called on instances made from now on") space.warn(space.wrap(msg), space.w_RuntimeWarning) space.setitem(self.w_dict, w_attr, w_value) @@ -184,14 +183,11 @@ if hasattr(space, 'is_fake_objspace'): # hack: with the fake objspace, we don't want to see typedef's # _getusercls() at all - self.cls_without_del = W_InstanceObject - self.cls_with_del = W_InstanceObject + self.InstanceObjectCls = W_InstanceObject return - self.cls_without_del = _getusercls( - space, W_InstanceObject, False, reallywantdict=True) - self.cls_with_del = _getusercls( - space, W_InstanceObject, True, reallywantdict=True) + self.InstanceObjectCls = _getusercls( + W_InstanceObject, reallywantdict=True) def class_descr_call(space, w_self, __args__): @@ -297,12 +293,15 @@ class W_InstanceObject(W_Root): def __init__(self, space, w_class): # note that user_setup is overridden by the typedef.py machinery + self.space = space self.user_setup(space, space.gettypeobject(self.typedef)) assert isinstance(w_class, W_ClassObject) self.w_class = w_class + if w_class.has_user_del(space): + space.finalizer_queue.register_finalizer(self) def user_setup(self, space, w_subtype): - self.space = space + pass def set_oldstyle_class(self, space, w_class): if w_class is None or not isinstance(w_class, W_ClassObject): @@ -368,8 +367,7 @@ self.set_oldstyle_class(space, w_value) return if name == '__del__' and w_meth is None: - cache = space.fromcache(Cache) - if (not isinstance(self, cache.cls_with_del) + if (not self.w_class.has_user_del(space) and self.getdictvalue(space, '__del__') is None): msg = ("a __del__ method added to an instance with no " "__del__ in the class will not be called") @@ -646,13 +644,14 @@ raise oefmt(space.w_TypeError, "instance has no next() method") return space.call_function(w_func) - def descr_del(self, space): - # Note that this is called from executioncontext.UserDelAction - # via the space.userdel() method. + def _finalize_(self): + space = self.space w_func = self.getdictvalue(space, '__del__') if w_func is None: w_func = self.getattr_from_class(space, '__del__') if w_func is not None: + if self.space.user_del_action.gc_disabled(self): + return space.call_function(w_func) def descr_exit(self, space, w_type, w_value, w_tb): @@ -729,7 +728,6 @@ __pow__ = interp2app(W_InstanceObject.descr_pow), __rpow__ = interp2app(W_InstanceObject.descr_rpow), next = interp2app(W_InstanceObject.descr_next), - __del__ = interp2app(W_InstanceObject.descr_del), __exit__ = interp2app(W_InstanceObject.descr_exit), __dict__ = dict_descr, **rawdict diff --git a/pypy/module/_cffi_backend/allocator.py b/pypy/module/_cffi_backend/allocator.py --- a/pypy/module/_cffi_backend/allocator.py +++ b/pypy/module/_cffi_backend/allocator.py @@ -45,14 +45,11 @@ rffi.c_memset(rffi.cast(rffi.VOIDP, ptr), 0, rffi.cast(rffi.SIZE_T, datasize)) # - if self.w_free is None: - # use this class which does not have a __del__, but still - # keeps alive w_raw_cdata - res = cdataobj.W_CDataNewNonStdNoFree(space, ptr, ctype, length) - else: - res = cdataobj.W_CDataNewNonStdFree(space, ptr, ctype, length) + res = cdataobj.W_CDataNewNonStd(space, ptr, ctype, length) + res.w_raw_cdata = w_raw_cdata + if self.w_free is not None: res.w_free = self.w_free - res.w_raw_cdata = w_raw_cdata + res.register_finalizer(space) return res @unwrap_spec(w_init=WrappedDefault(None)) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -449,22 +449,11 @@ lltype.free(self._ptr, flavor='raw') -class W_CDataNewNonStdNoFree(W_CDataNewOwning): - """Subclass using a non-standard allocator, no free()""" - _attrs_ = ['w_raw_cdata'] +class W_CDataNewNonStd(W_CDataNewOwning): + """Subclass using a non-standard allocator""" + _attrs_ = ['w_raw_cdata', 'w_free'] -class W_CDataNewNonStdFree(W_CDataNewNonStdNoFree): - """Subclass using a non-standard allocator, with a free()""" - _attrs_ = ['w_free'] - - def __del__(self): - self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, - W_CDataNewNonStdFree.call_destructor, - 'destructor of ') - - def call_destructor(self): - assert isinstance(self, W_CDataNewNonStdFree) + def _finalize_(self): self.space.call_function(self.w_free, self.w_raw_cdata) @@ -552,14 +541,9 @@ W_CData.__init__(self, space, cdata, ctype) self.w_original_cdata = w_original_cdata self.w_destructor = w_destructor + self.register_finalizer(space) - def __del__(self): - self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, W_CDataGCP.call_destructor, - 'destructor of ') - - def call_destructor(self): - assert isinstance(self, W_CDataGCP) + def _finalize_(self): w_destructor = self.w_destructor if w_destructor is not None: self.w_destructor = None diff --git a/pypy/module/_cffi_backend/cdlopen.py b/pypy/module/_cffi_backend/cdlopen.py --- a/pypy/module/_cffi_backend/cdlopen.py +++ b/pypy/module/_cffi_backend/cdlopen.py @@ -25,10 +25,13 @@ raise wrap_dlopenerror(ffi.space, e, filename) W_LibObject.__init__(self, ffi, filename) self.libhandle = handle + self.register_finalizer(ffi.space) - def __del__(self): - if self.libhandle: - dlclose(self.libhandle) + def _finalize_(self): + h = self.libhandle + if h != rffi.cast(DLLHANDLE, 0): + self.libhandle = rffi.cast(DLLHANDLE, 0) + dlclose(h) def cdlopen_fetch(self, name): if not self.libhandle: diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -15,7 +15,6 @@ class W_Library(W_Root): _immutable_ = True - handle = rffi.cast(DLLHANDLE, 0) def __init__(self, space, filename, flags): self.space = space @@ -27,8 +26,9 @@ except DLOpenError as e: raise wrap_dlopenerror(space, e, filename) self.name = filename + self.register_finalizer(space) - def __del__(self): + def _finalize_(self): h = self.handle if h != rffi.cast(DLLHANDLE, 0): self.handle = rffi.cast(DLLHANDLE, 0) diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -43,22 +43,18 @@ def __init__(self, space): self.space = space + self.register_finalizer(space) - def __del__(self): + def _finalize_(self): # assume that the file and stream objects are only visible in the - # thread that runs __del__, so no race condition should be possible - self.clear_all_weakrefs() + # thread that runs _finalize_, so no race condition should be + # possible and no locking is done here. if self.stream is not None: - self.enqueue_for_destruction(self.space, W_File.destructor, - 'close() method of ') - - def destructor(self): - assert isinstance(self, W_File) - try: - self.direct_close() - except StreamErrors as e: - operr = wrap_streamerror(self.space, e, self.w_name) - raise operr + try: + self.direct_close() + except StreamErrors as e: + operr = wrap_streamerror(self.space, e, self.w_name) + raise operr def fdopenstream(self, stream, fd, mode, w_name=None): self.fd = fd diff --git a/pypy/module/_hashlib/interp_hashlib.py b/pypy/module/_hashlib/interp_hashlib.py --- a/pypy/module/_hashlib/interp_hashlib.py +++ b/pypy/module/_hashlib/interp_hashlib.py @@ -76,11 +76,14 @@ except: lltype.free(ctx, flavor='raw') raise + self.register_finalizer(space) - def __del__(self): - if self.ctx: - ropenssl.EVP_MD_CTX_cleanup(self.ctx) - lltype.free(self.ctx, flavor='raw') + def _finalize_(self): + ctx = self.ctx + if ctx: + self.ctx = lltype.nullptr(ropenssl.EVP_MD_CTX.TO) + ropenssl.EVP_MD_CTX_cleanup(ctx) + lltype.free(ctx, flavor='raw') def digest_type_by_name(self, space): digest_type = ropenssl.EVP_get_digestbyname(self.name) diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -952,9 +952,15 @@ self.w_writer = None raise - def __del__(self): - self.clear_all_weakrefs() + def _finalize_(self): # Don't call the base __del__: do not close the files! + # Usually the _finalize_() method is not called at all because + # we set 'needs_to_finalize = False' in this class, so + # W_IOBase.__init__() won't call register_finalizer(). + # However, this method might still be called: if the user + # makes an app-level subclass and adds a custom __del__. + pass + needs_to_finalize = False # forward to reader for method in ['read', 'peek', 'read1', 'readinto', 'readable']: diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -59,6 +59,8 @@ self.__IOBase_closed = False if add_to_autoflusher: get_autoflusher(space).add(self) + if self.needs_to_finalize: + self.register_finalizer(space) def getdict(self, space): return self.w_dict @@ -71,13 +73,7 @@ return True return False - def __del__(self): - self.clear_all_weakrefs() - self.enqueue_for_destruction(self.space, W_IOBase.destructor, - 'internal __del__ of ') - - def destructor(self): - assert isinstance(self, W_IOBase) + def _finalize_(self): space = self.space w_closed = space.findattr(self, space.wrap('closed')) try: @@ -90,6 +86,7 @@ # equally as bad, and potentially more frequent (because of # shutdown issues). pass + needs_to_finalize = True def _CLOSED(self): # Use this macro whenever you want to check the internal `closed` diff --git a/pypy/module/_multibytecodec/interp_incremental.py b/pypy/module/_multibytecodec/interp_incremental.py --- a/pypy/module/_multibytecodec/interp_incremental.py +++ b/pypy/module/_multibytecodec/interp_incremental.py @@ -20,8 +20,9 @@ self.codec = codec.codec self.name = codec.name self._initialize() + self.register_finalizer(space) - def __del__(self): + def _finalize_(self): self._free() def reset_w(self): diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py --- a/pypy/module/_multiprocessing/interp_connection.py +++ b/pypy/module/_multiprocessing/interp_connection.py @@ -40,14 +40,17 @@ BUFFER_SIZE = 1024 buffer = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, flags): + def __init__(self, space, flags): self.flags = flags self.buffer = lltype.malloc(rffi.CCHARP.TO, self.BUFFER_SIZE, flavor='raw') + self.register_finalizer(space) - def __del__(self): - if self.buffer: - lltype.free(self.buffer, flavor='raw') + def _finalize_(self): + buf = self.buffer + if buf: + self.buffer = lltype.nullptr(rffi.CCHARP.TO) + lltype.free(buf, flavor='raw') try: self.do_close() except OSError: @@ -242,7 +245,7 @@ def __init__(self, space, fd, flags): if fd == self.INVALID_HANDLE_VALUE or fd < 0: raise oefmt(space.w_IOError, "invalid handle %d", fd) - W_BaseConnection.__init__(self, flags) + W_BaseConnection.__init__(self, space, flags) self.fd = fd @unwrap_spec(fd=int, readable=bool, writable=bool) @@ -363,8 +366,8 @@ if sys.platform == 'win32': from rpython.rlib.rwin32 import INVALID_HANDLE_VALUE - def __init__(self, handle, flags): - W_BaseConnection.__init__(self, flags) + def __init__(self, space, handle, flags): + W_BaseConnection.__init__(self, space, flags) self.handle = handle @unwrap_spec(readable=bool, writable=bool) @@ -375,7 +378,7 @@ flags = (readable and READABLE) | (writable and WRITABLE) self = space.allocate_instance(W_PipeConnection, w_subtype) - W_PipeConnection.__init__(self, handle, flags) + W_PipeConnection.__init__(self, space, handle, flags) return space.wrap(self) def descr_repr(self, space): diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py --- a/pypy/module/_multiprocessing/interp_semaphore.py +++ b/pypy/module/_multiprocessing/interp_semaphore.py @@ -430,11 +430,12 @@ class W_SemLock(W_Root): - def __init__(self, handle, kind, maxvalue): + def __init__(self, space, handle, kind, maxvalue): self.handle = handle self.kind = kind self.count = 0 self.maxvalue = maxvalue + self.register_finalizer(space) def kind_get(self, space): return space.newint(self.kind) @@ -508,7 +509,7 @@ @unwrap_spec(kind=int, maxvalue=int) def rebuild(space, w_cls, w_handle, kind, maxvalue): self = space.allocate_instance(W_SemLock, w_cls) - self.__init__(handle_w(space, w_handle), kind, maxvalue) + self.__init__(space, handle_w(space, w_handle), kind, maxvalue) return space.wrap(self) def enter(self, space): @@ -517,7 +518,7 @@ def exit(self, space, __args__): self.release(space) - def __del__(self): + def _finalize_(self): delete_semaphore(self.handle) @unwrap_spec(kind=int, value=int, maxvalue=int) @@ -534,7 +535,7 @@ raise wrap_oserror(space, e) self = space.allocate_instance(W_SemLock, w_subtype) - self.__init__(handle, kind, maxvalue) + self.__init__(space, handle, kind, maxvalue) return space.wrap(self) diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -4,7 +4,7 @@ from pypy.interpreter.function import Function, Method from pypy.interpreter.module import Module from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIteratorWithDel +from pypy.interpreter.generator import GeneratorIterator from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -59,7 +59,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIteratorWithDel) + new_generator = instantiate(GeneratorIterator) return space.wrap(new_generator) @unwrap_spec(current=int, remaining=int, step=int) diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -278,6 +278,8 @@ sock_fd = space.int_w(space.call_method(w_sock, "fileno")) self.ssl = libssl_SSL_new(w_ctx.ctx) # new ssl struct + self.register_finalizer(space) + index = compute_unique_id(self) libssl_SSL_set_app_data(self.ssl, rffi.cast(rffi.VOIDP, index)) SOCKET_STORAGE.set(index, self) @@ -317,16 +319,15 @@ self.ssl_sock_weakref_w = None return self - def __del__(self): - self.enqueue_for_destruction(self.space, _SSLSocket.destructor, - '__del__() method of ') - - def destructor(self): - assert isinstance(self, _SSLSocket) - if self.peer_cert: - libssl_X509_free(self.peer_cert) - if self.ssl: - libssl_SSL_free(self.ssl) + def _finalize_(self): + peer_cert = self.peer_cert + if peer_cert: + self.peer_cert = lltype.nullptr(X509.TO) + libssl_X509_free(peer_cert) + ssl = self.ssl + if ssl: + self.ssl = lltype.nullptr(SSL.TO) + libssl_SSL_free(ssl) @unwrap_spec(data='bufferstr') def write(self, space, data): @@ -1285,6 +1286,7 @@ self = space.allocate_instance(_SSLContext, w_subtype) self.ctx = ctx self.check_hostname = False + self.register_finalizer(space) options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS if protocol != PY_SSL_VERSION_SSL2: options |= SSL_OP_NO_SSLv2 @@ -1308,8 +1310,11 @@ return self - def __del__(self): - libssl_SSL_CTX_free(self.ctx) + def _finalize_(self): + ctx = self.ctx + if ctx: + self.ctx = lltype.nullptr(SSL_CTX.TO) + libssl_SSL_CTX_free(ctx) @unwrap_spec(server_side=int) def descr_wrap_socket(self, space, w_sock, server_side, w_server_hostname=None, w_ssl_sock=None): diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -3,7 +3,8 @@ from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, ObjSpace from pypy.interpreter.typedef import TypeDef -from rpython.rlib import jit +from pypy.interpreter.executioncontext import AsyncAction, report_error +from rpython.rlib import jit, rgc from rpython.rlib.rshrinklist import AbstractShrinkList from rpython.rlib.objectmodel import specialize from rpython.rlib.rweakref import dead_ref @@ -16,9 +17,12 @@ class WeakrefLifeline(W_Root): + typedef = None + cached_weakref = None cached_proxy = None other_refs_weak = None + has_callbacks = False def __init__(self, space): self.space = space @@ -99,31 +103,10 @@ return w_ref return space.w_None - -class WeakrefLifelineWithCallbacks(WeakrefLifeline): - - def __init__(self, space, oldlifeline=None): - self.space = space - if oldlifeline is not None: - self.cached_weakref = oldlifeline.cached_weakref - self.cached_proxy = oldlifeline.cached_proxy - self.other_refs_weak = oldlifeline.other_refs_weak - - def __del__(self): - """This runs when the interp-level object goes away, and allows - its lifeline to go away. The purpose of this is to activate the - callbacks even if there is no __del__ method on the interp-level - W_Root subclass implementing the object. - """ - if self.other_refs_weak is None: - return - items = self.other_refs_weak.items() - for i in range(len(items)-1, -1, -1): - w_ref = items[i]() - if w_ref is not None and w_ref.w_callable is not None: - w_ref.enqueue_for_destruction(self.space, - W_WeakrefBase.activate_callback, - 'weakref callback of ') + def enable_callbacks(self): + if not self.has_callbacks: + self.space.finalizer_queue.register_finalizer(self) + self.has_callbacks = True @jit.dont_look_inside def make_weakref_with_callback(self, w_subtype, w_obj, w_callable): @@ -131,6 +114,7 @@ w_ref = space.allocate_instance(W_Weakref, w_subtype) W_Weakref.__init__(w_ref, space, w_obj, w_callable) self.append_wref_to(w_ref) + self.enable_callbacks() return w_ref @jit.dont_look_inside @@ -141,8 +125,33 @@ else: w_proxy = W_Proxy(space, w_obj, w_callable) self.append_wref_to(w_proxy) + self.enable_callbacks() return w_proxy + def _finalize_(self): + """This is called at the end, if enable_callbacks() was invoked. + It activates the callbacks. + """ + if self.other_refs_weak is None: + return + # + # If this is set, then we're in the 'gc.disable()' mode. In that + # case, don't invoke the callbacks now. + if self.space.user_del_action.gc_disabled(self): + return + # + items = self.other_refs_weak.items() + self.other_refs_weak = None + for i in range(len(items)-1, -1, -1): + w_ref = items[i]() + if w_ref is not None and w_ref.w_callable is not None: + try: + w_ref.activate_callback() + except Exception as e: + report_error(self.space, e, + "weakref callback ", w_ref.w_callable) + + # ____________________________________________________________ @@ -163,7 +172,6 @@ self.w_obj_weak = dead_ref def activate_callback(w_self): - assert isinstance(w_self, W_WeakrefBase) w_self.space.call_function(w_self.w_callable, w_self) def descr__repr__(self, space): @@ -227,32 +235,16 @@ w_obj.setweakref(space, lifeline) return lifeline -def getlifelinewithcallbacks(space, w_obj): - lifeline = w_obj.getweakref() - if not isinstance(lifeline, WeakrefLifelineWithCallbacks): # or None - oldlifeline = lifeline - lifeline = WeakrefLifelineWithCallbacks(space, oldlifeline) - w_obj.setweakref(space, lifeline) - return lifeline - - -def get_or_make_weakref(space, w_subtype, w_obj): - return getlifeline(space, w_obj).get_or_make_weakref(w_subtype, w_obj) - - -def make_weakref_with_callback(space, w_subtype, w_obj, w_callable): - lifeline = getlifelinewithcallbacks(space, w_obj) - return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) - def descr__new__weakref(space, w_subtype, w_obj, w_callable=None, __args__=None): if __args__.arguments_w: raise oefmt(space.w_TypeError, "__new__ expected at most 2 arguments") + lifeline = getlifeline(space, w_obj) if space.is_none(w_callable): - return get_or_make_weakref(space, w_subtype, w_obj) + return lifeline.get_or_make_weakref(w_subtype, w_obj) else: - return make_weakref_with_callback(space, w_subtype, w_obj, w_callable) + return lifeline.make_weakref_with_callback(w_subtype, w_obj, w_callable) W_Weakref.typedef = TypeDef("weakref", __doc__ = """A weak reference to an object 'obj'. A 'callback' can be given, @@ -308,23 +300,15 @@ return space.call_args(w_obj, __args__) -def get_or_make_proxy(space, w_obj): - return getlifeline(space, w_obj).get_or_make_proxy(w_obj) - - -def make_proxy_with_callback(space, w_obj, w_callable): - lifeline = getlifelinewithcallbacks(space, w_obj) - return lifeline.make_proxy_with_callback(w_obj, w_callable) - - def proxy(space, w_obj, w_callable=None): """Create a proxy object that weakly references 'obj'. 'callback', if given, is called with the proxy as an argument when 'obj' is about to be finalized.""" + lifeline = getlifeline(space, w_obj) if space.is_none(w_callable): - return get_or_make_proxy(space, w_obj) + return lifeline.get_or_make_proxy(w_obj) else: - return make_proxy_with_callback(space, w_obj, w_callable) + return lifeline.make_proxy_with_callback(w_obj, w_callable) def descr__new__proxy(space, w_subtype, w_obj, w_callable=None): raise oefmt(space.w_TypeError, "cannot create 'weakproxy' instances") @@ -345,7 +329,7 @@ proxy_typedef_dict = {} callable_proxy_typedef_dict = {} -special_ops = {'repr': True, 'userdel': True, 'hash': True} +special_ops = {'repr': True, 'hash': True} for opname, _, arity, special_methods in ObjSpace.MethodTable: if opname in special_ops or not special_methods: diff --git a/pypy/module/_weakref/test/test_weakref.py b/pypy/module/_weakref/test/test_weakref.py --- a/pypy/module/_weakref/test/test_weakref.py +++ b/pypy/module/_weakref/test/test_weakref.py @@ -1,6 +1,9 @@ class AppTestWeakref(object): spaceconfig = dict(usemodules=('_weakref',)) - + + def setup_class(cls): + cls.w_runappdirect = cls.space.wrap(cls.runappdirect) + def test_simple(self): import _weakref, gc class A(object): @@ -287,6 +290,9 @@ assert a1 is None def test_del_and_callback_and_id(self): + if not self.runappdirect: + skip("the id() doesn't work correctly in __del__ and " + "callbacks before translation") import gc, weakref seen_del = [] class A(object): diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -518,8 +518,14 @@ def __init__(self, space, compresslevel): self.space = space self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True) - self.running = False - self._init_bz2comp(compresslevel) + try: + self.running = False + self._init_bz2comp(compresslevel) + except: + lltype.free(self.bzs, flavor='raw') + self.bzs = lltype.nullptr(bz_stream.TO) + raise + self.register_finalizer(space) def _init_bz2comp(self, compresslevel): if compresslevel < 1 or compresslevel > 9: @@ -532,9 +538,12 @@ self.running = True - def __del__(self): - BZ2_bzCompressEnd(self.bzs) - lltype.free(self.bzs, flavor='raw') + def _finalize_(self): + bzs = self.bzs + if bzs: + self.bzs = lltype.nullptr(bz_stream.TO) + BZ2_bzCompressEnd(bzs) + lltype.free(bzs, flavor='raw') @unwrap_spec(data='bufferstr') def compress(self, data): @@ -621,10 +630,16 @@ self.space = space self.bzs = lltype.malloc(bz_stream.TO, flavor='raw', zero=True) - self.running = False - self.unused_data = "" + try: + self.running = False + self.unused_data = "" - self._init_bz2decomp() + self._init_bz2decomp() + except: + lltype.free(self.bzs, flavor='raw') + self.bzs = lltype.nullptr(bz_stream.TO) + raise + self.register_finalizer(space) def _init_bz2decomp(self): bzerror = BZ2_bzDecompressInit(self.bzs, 0, 0) @@ -633,9 +648,12 @@ self.running = True - def __del__(self): - BZ2_bzDecompressEnd(self.bzs) - lltype.free(self.bzs, flavor='raw') + def _finalize_(self): + bzs = self.bzs + if bzs: + self.bzs = lltype.nullptr(bz_stream.TO) + BZ2_bzDecompressEnd(bzs) + lltype.free(bzs, flavor='raw') @unwrap_spec(data='bufferstr') def decompress(self, data): diff --git a/pypy/module/bz2/test/support.py b/pypy/module/bz2/test/support.py --- a/pypy/module/bz2/test/support.py +++ b/pypy/module/bz2/test/support.py @@ -10,5 +10,6 @@ # while tries and ll2ctypes.ALLOCATED: gc.collect() # to make sure we disallocate buffers + self.space.getexecutioncontext()._run_finalizers_now() tries -= 1 assert not ll2ctypes.ALLOCATED diff --git a/pypy/module/cppyy/interp_cppyy.py b/pypy/module/cppyy/interp_cppyy.py --- a/pypy/module/cppyy/interp_cppyy.py +++ b/pypy/module/cppyy/interp_cppyy.py @@ -1020,9 +1020,12 @@ class W_CPPInstance(W_Root): - _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns'] + _attrs_ = ['space', 'cppclass', '_rawobject', 'isref', 'python_owns', + 'finalizer_registered'] _immutable_fields_ = ["cppclass", "isref"] + finalizer_registered = False + def __init__(self, space, cppclass, rawobject, isref, python_owns): self.space = space self.cppclass = cppclass @@ -1032,6 +1035,12 @@ assert not isref or not python_owns self.isref = isref self.python_owns = python_owns + self._opt_register_finalizer() + + def _opt_register_finalizer(self): + if self.python_owns and not self.finalizer_registered: + self.register_finalizer(self.space) + self.finalizer_registered = True def _nullcheck(self): if not self._rawobject or (self.isref and not self.get_rawobject()): @@ -1045,6 +1054,7 @@ @unwrap_spec(value=bool) def fset_python_owns(self, space, value): self.python_owns = space.is_true(value) + self._opt_register_finalizer() def get_cppthis(self, calling_scope): return self.cppclass.get_cppthis(self, calling_scope) @@ -1143,16 +1153,14 @@ (self.cppclass.name, rffi.cast(rffi.ULONG, self.get_rawobject()))) def destruct(self): - assert isinstance(self, W_CPPInstance) if self._rawobject and not self.isref: memory_regulator.unregister(self) capi.c_destruct(self.space, self.cppclass, self._rawobject) self._rawobject = capi.C_NULL_OBJECT - def __del__(self): + def _finalize_(self): if self.python_owns: - self.enqueue_for_destruction(self.space, W_CPPInstance.destruct, - '__del__() method of ') + self.destruct() W_CPPInstance.typedef = TypeDef( 'CPPInstance', diff --git a/pypy/module/cpyext/src/abstract.c b/pypy/module/cpyext/src/abstract.c --- a/pypy/module/cpyext/src/abstract.c +++ b/pypy/module/cpyext/src/abstract.c @@ -326,3 +326,9 @@ return tmp; } +/* for binary compatibility with 5.1 */ +PyAPI_FUNC(void) PyPyObject_Del(PyObject *); +void PyPyObject_Del(PyObject *op) +{ + PyObject_FREE(op); +} diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -927,31 +927,62 @@ ("fetchFooType", "METH_VARARGS", """ PyObject *o; + Foo_Type.tp_basicsize = sizeof(FooObject); Foo_Type.tp_dealloc = &dealloc_foo; - Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE; + Foo_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES + | Py_TPFLAGS_BASETYPE; Foo_Type.tp_new = &new_foo; Foo_Type.tp_free = &PyObject_Del; if (PyType_Ready(&Foo_Type) < 0) return NULL; o = PyObject_New(PyObject, &Foo_Type); + init_foo(o); Py_DECREF(o); /* calls dealloc_foo immediately */ Py_INCREF(&Foo_Type); return (PyObject *)&Foo_Type; """), + ("newInstance", "METH_O", + """ + PyTypeObject *tp = (PyTypeObject *)args; + PyObject *e = PyTuple_New(0); + PyObject *o = tp->tp_new(tp, e, NULL); + Py_DECREF(e); + return o; + """), ("getCounter", "METH_VARARGS", """ return PyInt_FromLong(foo_counter); """)], prologue= """ + typedef struct { + PyObject_HEAD + int someval[99]; + } FooObject; static int foo_counter = 1000; static void dealloc_foo(PyObject *foo) { + int i; foo_counter += 10; + for (i = 0; i < 99; i++) + if (((FooObject *)foo)->someval[i] != 1000 + i) + foo_counter += 100000; /* error! */ + Py_TYPE(foo)->tp_free(foo); + } + static void init_foo(PyObject *o) + { + int i; + if (o->ob_type->tp_basicsize < sizeof(FooObject)) + abort(); + for (i = 0; i < 99; i++) + ((FooObject *)o)->someval[i] = 1000 + i; } static PyObject *new_foo(PyTypeObject *t, PyObject *a, PyObject *k) { + PyObject *o; foo_counter += 1000; - return t->tp_alloc(t, 0); + o = t->tp_alloc(t, 0); + init_foo(o); + return o; } static PyTypeObject Foo_Type = { PyVarObject_HEAD_INIT(NULL, 0) @@ -971,9 +1002,24 @@ # class Bar(Foo): pass + assert Foo.__new__ is Bar.__new__ Bar(); Bar() for i in range(10): if module.getCounter() >= 5050: break self.debug_collect() assert module.getCounter() == 5050 + # + module.newInstance(Foo) + for i in range(10): + if module.getCounter() >= 6060: + break + self.debug_collect() + assert module.getCounter() == 6060 + # + module.newInstance(Bar) + for i in range(10): + if module.getCounter() >= 7070: + break + self.debug_collect() + assert module.getCounter() == 7070 diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -196,6 +196,10 @@ def update_all_slots(space, w_type, pto): # XXX fill slots in pto + # Not very sure about it, but according to + # test_call_tp_dealloc_when_created_from_python, we should not + # overwrite slots that are already set: these ones are probably + # coming from a parent C type. typedef = w_type.layout.typedef for method_name, slot_name, slot_names, slot_func in slotdefs_for_tp_slots: @@ -223,7 +227,8 @@ # XXX special case wrapper-functions and use a "specific" slot func if len(slot_names) == 1: - setattr(pto, slot_names[0], slot_func_helper) + if not getattr(pto, slot_names[0]): + setattr(pto, slot_names[0], slot_func_helper) else: assert len(slot_names) == 2 struct = getattr(pto, slot_names[0]) @@ -240,7 +245,8 @@ struct = lltype.malloc(STRUCT_TYPE, flavor='raw', zero=True) setattr(pto, slot_names[0], struct) - setattr(struct, slot_names[1], slot_func_helper) + if not getattr(struct, slot_names[1]): + setattr(struct, slot_names[1], slot_func_helper) def add_operators(space, dict_w, pto): # XXX support PyObject_HashNotImplemented diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py --- a/pypy/module/gc/interp_gc.py +++ b/pypy/module/gc/interp_gc.py @@ -38,13 +38,23 @@ return space.newbool(space.user_del_action.enabled_at_app_level) def enable_finalizers(space): - if space.user_del_action.finalizers_lock_count == 0: + uda = space.user_del_action + if uda.finalizers_lock_count == 0: raise oefmt(space.w_ValueError, "finalizers are already enabled") - space.user_del_action.finalizers_lock_count -= 1 - space.user_del_action.fire() + uda.finalizers_lock_count -= 1 + if uda.finalizers_lock_count == 0: + pending = uda.pending_with_disabled_del + uda.pending_with_disabled_del = None + if pending is not None: + for i in range(len(pending)): + uda._call_finalizer(pending[i]) + pending[i] = None # clear the list as we progress def disable_finalizers(space): - space.user_del_action.finalizers_lock_count += 1 + uda = space.user_del_action + uda.finalizers_lock_count += 1 + if uda.pending_with_disabled_del is None: + uda.pending_with_disabled_del = [] # ____________________________________________________________ diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -3,7 +3,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.interpreter.argument import Arguments -from rpython.rlib import jit +from rpython.rlib import jit, rgc from rpython.rlib.rarithmetic import LONG_BIT, maxint, _get_bitsize from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.rawstorage import ( @@ -1534,6 +1534,7 @@ self.steps = alloc_raw_storage(0, track_allocation=False) self.dims_steps_set = False + @rgc.must_be_light_finalizer def __del__(self): free_raw_storage(self.dims, track_allocation=False) free_raw_storage(self.steps, track_allocation=False) diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -421,8 +421,11 @@ class W_XMLParserType(W_Root): + id = -1 + def __init__(self, space, parser, w_intern): self.itself = parser + self.register_finalizer(space) self.w_intern = w_intern @@ -444,14 +447,17 @@ CallbackData(space, self)) XML_SetUserData(self.itself, rffi.cast(rffi.VOIDP, self.id)) - def __del__(self): + def _finalize_(self): if XML_ParserFree: # careful with CPython interpreter shutdown - XML_ParserFree(self.itself) - if global_storage: + if self.itself: + XML_ParserFree(self.itself) + self.itself = lltype.nullptr(XML_Parser.TO) + if global_storage and self.id >= 0: try: global_storage.free_nonmoving_id(self.id) except KeyError: pass # maybe global_storage.clear() was already called + self.id = -1 @unwrap_spec(flag=int) def SetParamEntityParsing(self, space, flag): diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -28,10 +28,10 @@ p65 = getfield_gc_r(p14, descr=<FieldP .+inst_map \d+>) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc_r(p14, descr=<FieldP .+inst__value0 \d+>) - guard_nonnull_class(p66, ..., descr=...) + guard_nonnull(p66, descr=...) p67 = force_token() setfield_gc(p0, p67, descr=<FieldP pypy.interpreter.pyframe.PyFrame.vable_token \d+>) - p68 = call_may_force_r(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=<Callr \d rrrr EF=7>) + p68 = call_may_force_r(ConstClass(WeakrefLifeline.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=<Callr \d rrrr EF=7>) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) diff --git a/pypy/module/select/interp_epoll.py b/pypy/module/select/interp_epoll.py --- a/pypy/module/select/interp_epoll.py +++ b/pypy/module/select/interp_epoll.py @@ -80,6 +80,7 @@ class W_Epoll(W_Root): def __init__(self, space, epfd): self.epfd = epfd + self.register_finalizer(space) @unwrap_spec(sizehint=int) def descr__new__(space, w_subtype, sizehint=-1): @@ -98,7 +99,7 @@ def descr_fromfd(space, w_cls, fd): return space.wrap(W_Epoll(space, fd)) - def __del__(self): + def _finalize_(self): self.close() def check_closed(self, space): diff --git a/pypy/module/select/interp_kqueue.py b/pypy/module/select/interp_kqueue.py --- a/pypy/module/select/interp_kqueue.py +++ b/pypy/module/select/interp_kqueue.py @@ -109,6 +109,7 @@ class W_Kqueue(W_Root): def __init__(self, space, kqfd): self.kqfd = kqfd + self.register_finalizer(space) def descr__new__(space, w_subtype): kqfd = syscall_kqueue() @@ -120,7 +121,7 @@ def descr_fromfd(space, w_cls, fd): return space.wrap(W_Kqueue(space, fd)) - def __del__(self): + def _finalize_(self): self.close() def get_closed(self): diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py --- a/pypy/module/zlib/interp_zlib.py +++ b/pypy/module/zlib/interp_zlib.py @@ -148,8 +148,9 @@ raise zlib_error(space, e.msg) except ValueError: raise oefmt(space.w_ValueError, "Invalid initialization option") + self.register_finalizer(space) - def __del__(self): + def _finalize_(self): """Automatically free the resources used by the stream.""" if self.stream: rzlib.deflateEnd(self.stream) @@ -258,8 +259,9 @@ raise zlib_error(space, e.msg) except ValueError: raise oefmt(space.w_ValueError, "Invalid initialization option") + self.register_finalizer(space) - def __del__(self): + def _finalize_(self): """Automatically free the resources used by the stream.""" if self.stream: rzlib.inflateEnd(self.stream) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -440,11 +440,6 @@ raise oefmt(space.w_TypeError, "__hash__() should return an int or long") - def userdel(space, w_obj): - w_del = space.lookup(w_obj, '__del__') - if w_del is not None: - space.get_and_call_function(w_del, w_obj) - def cmp(space, w_v, w_w): if space.is_w(w_v, w_w): diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -357,11 +357,12 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - subcls = get_unique_interplevel_subclass( - self, cls, w_subtype.needsdel) + subcls = get_unique_interplevel_subclass(self, cls) instance = instantiate(subcls) assert isinstance(instance, cls) instance.user_setup(self, w_subtype) + if w_subtype.hasuserdel: + self.finalizer_queue.register_finalizer(instance) else: raise oefmt(self.w_TypeError, "%N.__new__(%N): only for the type %N", diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -132,7 +132,7 @@ "flag_sequence_bug_compat", "flag_map_or_seq", # '?' or 'M' or 'S' "compares_by_identity_status?", - 'needsdel', + 'hasuserdel', 'weakrefable', 'hasdict', 'layout', @@ -160,7 +160,7 @@ w_self.bases_w = bases_w w_self.dict_w = dict_w w_self.hasdict = False - w_self.needsdel = False + w_self.hasuserdel = False w_self.weakrefable = False w_self.w_doc = space.w_None w_self.weak_subclasses = [] @@ -289,7 +289,7 @@ # compute a tuple that fully describes the instance layout def get_full_instance_layout(w_self): layout = w_self.layout - return (layout, w_self.hasdict, w_self.needsdel, w_self.weakrefable) + return (layout, w_self.hasdict, w_self.weakrefable) def compute_default_mro(w_self): return compute_C3_mro(w_self.space, w_self) @@ -986,7 +986,7 @@ hasoldstylebase = True continue w_self.hasdict = w_self.hasdict or w_base.hasdict - w_self.needsdel = w_self.needsdel or w_base.needsdel + w_self.hasuserdel = w_self.hasuserdel or w_base.hasuserdel w_self.weakrefable = w_self.weakrefable or w_base.weakrefable return hasoldstylebase @@ -1028,7 +1028,7 @@ if wantweakref: create_weakref_slot(w_self) if '__del__' in dict_w: - w_self.needsdel = True + w_self.hasuserdel = True # if index_next_extra_slot == base_layout.nslots and not force_new_layout: return base_layout diff --git a/pypy/tool/pytest/apptest.py b/pypy/tool/pytest/apptest.py --- a/pypy/tool/pytest/apptest.py +++ b/pypy/tool/pytest/apptest.py @@ -7,7 +7,7 @@ # ...unless the -A option ('runappdirect') is passed. import py -import sys, textwrap, types +import sys, textwrap, types, gc from pypy.interpreter.gateway import app2interp_temp from pypy.interpreter.error import OperationError from pypy.interpreter.function import Method @@ -32,6 +32,7 @@ return traceback def execute_appex(self, space, target, *args): + self.space = space try: target(*args) except OperationError as e: @@ -64,6 +65,13 @@ code = getattr(func, 'im_func', func).func_code return "[%s:%s]" % (code.co_filename, code.co_firstlineno) + def track_allocations_collect(self): + gc.collect() + # must also invoke finalizers now; UserDelAction + # would not run at all unless invoked explicitly + if hasattr(self, 'space'): + self.space.getexecutioncontext()._run_finalizers_now() + class AppTestMethod(AppTestFunction): def setup(self): diff --git a/rpython/annotator/classdesc.py b/rpython/annotator/classdesc.py --- a/rpython/annotator/classdesc.py +++ b/rpython/annotator/classdesc.py @@ -579,6 +579,14 @@ if cls not in FORCE_ATTRIBUTES_INTO_CLASSES: self.all_enforced_attrs = [] # no attribute allowed + if (getattr(cls, '_must_be_light_finalizer_', False) and + hasattr(cls, '__del__') and + not getattr(cls.__del__, '_must_be_light_finalizer_', False)): + raise AnnotatorError( + "Class %r is in a class hierarchy with " + "_must_be_light_finalizer_ = True: it cannot have a " + "finalizer without @rgc.must_be_light_finalizer" % (cls,)) + def add_source_attribute(self, name, value, mixin=False): if isinstance(value, property): # special case for property object diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4584,6 +4584,32 @@ e = py.test.raises(Exception, a.build_types, f, []) assert str(e.value) == "Don't know how to represent Ellipsis" + def test_must_be_light_finalizer(self): + from rpython.rlib import rgc + @rgc.must_be_light_finalizer + class A(object): + pass + class B(A): + def __del__(self): + pass + class C(A): + @rgc.must_be_light_finalizer + def __del__(self): + pass + class D(object): + def __del__(self): + pass + def fb(): + B() + def fc(): + C() + def fd(): + D() + a = self.RPythonAnnotator() + a.build_types(fc, []) + a.build_types(fd, []) + py.test.raises(AnnotatorError, a.build_types, fb, []) + def g(n): return [0, 1, 2, n] diff --git a/rpython/conftest.py b/rpython/conftest.py --- a/rpython/conftest.py +++ b/rpython/conftest.py @@ -82,7 +82,13 @@ return if (not getattr(item.obj, 'dont_track_allocations', False) and leakfinder.TRACK_ALLOCATIONS): - item._pypytest_leaks = leakfinder.stop_tracking_allocations(False) + kwds = {} + try: + kwds['do_collection'] = item.track_allocations_collect + except AttributeError: + pass + item._pypytest_leaks = leakfinder.stop_tracking_allocations(False, + **kwds) else: # stop_tracking_allocations() already called item._pypytest_leaks = None diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -362,6 +362,16 @@ return func def must_be_light_finalizer(func): + """Mark a __del__ method as being a destructor, calling only a limited + set of operations. See pypy/doc/discussion/finalizer-order.rst. + + If you use the same decorator on a class, this class and all its _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit