Author: Armin Rigo <[email protected]>
Branch: concurrent-marksweep
Changeset: r48357:ca2fc56c81dd
Date: 2011-10-23 12:44 +0200
http://bitbucket.org/pypy/pypy/changeset/ca2fc56c81dd/
Log: Bug fixes.
diff --git a/pypy/rpython/memory/gc/base.py b/pypy/rpython/memory/gc/base.py
--- a/pypy/rpython/memory/gc/base.py
+++ b/pypy/rpython/memory/gc/base.py
@@ -100,9 +100,6 @@
def set_root_walker(self, root_walker):
self.root_walker = root_walker
- def write_barrier(self, newvalue, addr_struct):
- pass
-
def statistics(self, index):
return -1
diff --git a/pypy/rpython/memory/gc/concurrentgen.py
b/pypy/rpython/memory/gc/concurrentgen.py
--- a/pypy/rpython/memory/gc/concurrentgen.py
+++ b/pypy/rpython/memory/gc/concurrentgen.py
@@ -98,15 +98,17 @@
self.flagged_objects = self.AddressStack()
self.prebuilt_root_objects = self.AddressStack()
#
- # the linked list of new young objects, and the linked list of
- # all old objects. note that the aging objects are not here
- # but on 'collector.aging_objects'.
- self.young_objects = self.NULL
+ # The linked list of new young objects, and the linked list of
+ # all old objects. Note that the aging objects are not here
+ # but on 'collector.aging_objects'. Note also that 'old_objects'
+ # contains the objects that the write barrier re-marked as young
+ # (so they are "old young objects").
+ self.new_young_objects = self.NULL
self.old_objects = self.NULL
#
# See concurrentgen.txt for more information about these fields.
self.current_young_marker = MARK_BYTE_1
- self.current_aging_marker = MARK_BYTE_2
+ self.collector.current_aging_marker = MARK_BYTE_2
#
#self.ready_to_start_lock = ...built in setup()
#self.finished_lock = ...built in setup()
@@ -181,8 +183,8 @@
obj = adr + size_gc_header
hdr = self.header(obj)
hdr.tid = self.combine(typeid, self.current_young_marker, 0)
- hdr.next = self.young_objects
- self.young_objects = hdr
+ hdr.next = self.new_young_objects
+ self.new_young_objects = hdr
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
def malloc_varsize_clear(self, typeid, length, size, itemsize,
@@ -211,8 +213,8 @@
(obj + offset_to_length).signed[0] = length
hdr = self.header(obj)
hdr.tid = self.combine(typeid, self.current_young_marker, 0)
- hdr.next = self.young_objects
- self.young_objects = hdr
+ hdr.next = self.new_young_objects
+ self.new_young_objects = hdr
return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
# ----------
@@ -270,8 +272,6 @@
mark = self.header(addr_struct).tid & 0xFF
if mark != self.current_young_marker:
self.force_scan(addr_struct)
- #else:
- # debug_print("deletion_barrier (off)", addr_struct)
def assume_young_pointers(self, addr_struct):
pass # XXX
@@ -279,9 +279,9 @@
def _init_writebarrier_logic(self):
#
def force_scan(obj):
- #debug_print("deletion_barrier ON ", obj)
cym = self.current_young_marker
mark = self.get_mark(obj)
+ #debug_print("deletion_barrier:", mark, obj)
#
if mark == MARK_BYTE_OLD:
#
@@ -303,7 +303,7 @@
mark = self.get_mark(obj)
self.set_mark(obj, cym)
#
- if mark == self.current_aging_marker:
+ if mark == self.collector.current_aging_marker:
#
# it is only possible to reach this point if there is
# a collection running in collector_mark(), before it
@@ -422,11 +422,14 @@
The most useful default.
gen>=4: Do a full synchronous major collection.
"""
+ debug_start("gc-forced-collect")
+ debug_print("collect, gen =", gen)
if gen >= 1 or self.collector.running <= 0:
self.trigger_next_collection(gen >= 3)
if gen >= 2:
self.wait_for_the_end_of_collection()
self.execute_finalizers_ll()
+ debug_stop("gc-forced-collect")
def trigger_next_collection(self, force_major_collection=False):
"""In the mutator thread: triggers the next minor collection."""
@@ -462,13 +465,13 @@
#
# Exchange the meanings of 'cym' and 'cam'
other = self.current_young_marker
- self.current_young_marker = self.current_aging_marker
- self.current_aging_marker = other
+ self.current_young_marker = self.collector.current_aging_marker
+ self.collector.current_aging_marker = other
#
# Copy a few 'mutator' fields to 'collector' fields
collector = self.collector
- collector.aging_objects = self.young_objects
- self.young_objects = self.NULL
+ collector.aging_objects = self.new_young_objects
+ self.new_young_objects = self.NULL
#self.collect_weakref_pages = self.weakref_pages
#self.collect_finalizer_pages = self.finalizer_pages
#
@@ -496,7 +499,7 @@
def debug_check_lists(self):
# just check that they are correct, non-infinite linked lists
- self.debug_check_list(self.young_objects)
+ self.debug_check_list(self.new_young_objects)
self.debug_check_list(self.old_objects)
def debug_check_list(self, list):
@@ -601,6 +604,22 @@
def __init__(self, gc):
self.gc = gc
#
+ # a different AddressStack class, which uses a different pool
+ # of free pages than the regular one, so can run concurrently
+ self.CollectorAddressStack = get_address_stack(lock="collector")
+ #
+ # The start function for the thread, as a function and not a method
+ def collector_start():
+ if we_are_translated():
+ self.collector_run()
+ else:
+ self.collector_run_nontranslated()
+ collector_start._should_never_raise_ = True
+ self.collector_start = collector_start
+
+ def _initialize(self):
+ self.gray_objects = self.CollectorAddressStack()
+ #
# When the mutator thread wants to trigger the next collection,
# it scans its own stack roots and prepares everything, then
# sets 'collector.running' to 1, and releases
@@ -619,25 +638,9 @@
# The mutex_lock is acquired to go from 1 to 2, and from 2 to 3.
self.running = 0
#
- # a different AddressStack class, which uses a different pool
- # of free pages than the regular one, so can run concurrently
- self.CollectorAddressStack = get_address_stack(lock="collector")
- #
# when the collection starts, we make all young objects aging and
- # move 'young_objects' into 'aging_objects'
+ # move 'new_young_objects' into 'aging_objects'
self.aging_objects = self.NULL
- #
- # The start function for the thread, as a function and not a method
- def collector_start():
- if we_are_translated():
- self.collector_run()
- else:
- self.collector_run_nontranslated()
- collector_start._should_never_raise_ = True
- self.collector_start = collector_start
-
- def _initialize(self):
- self.gray_objects = self.CollectorAddressStack()
def setup(self):
self.ready_to_start_lock = self.gc.ready_to_start_lock
@@ -746,7 +749,7 @@
def _collect_mark(self):
extra_objects_to_mark = self.gc.extra_objects_to_mark
- cam = self.gc.current_aging_marker
+ cam = self.current_aging_marker
while self.gray_objects.non_empty():
obj = self.gray_objects.pop()
if self.get_mark(obj) != cam:
@@ -793,7 +796,7 @@
self.gray_objects.append(obj)
def collector_sweep(self):
- cam = self.gc.current_aging_marker
+ cam = self.current_aging_marker
hdr = self.aging_objects
linked_list = self.gc.old_objects
while hdr != self.NULL:
diff --git a/pypy/rpython/memory/gcwrapper.py b/pypy/rpython/memory/gcwrapper.py
--- a/pypy/rpython/memory/gcwrapper.py
+++ b/pypy/rpython/memory/gcwrapper.py
@@ -107,9 +107,13 @@
break
#
if wb:
- self.gc.write_barrier(
- llmemory.cast_ptr_to_adr(newvalue),
- llmemory.cast_ptr_to_adr(toplevelcontainer))
+ if self.gc.needs_write_barrier:
+ self.gc.write_barrier(
+ llmemory.cast_ptr_to_adr(newvalue),
+ llmemory.cast_ptr_to_adr(toplevelcontainer))
+ elif self.gc.needs_deletion_barrier:
+ self.gc.deletion_barrier(
+ llmemory.cast_ptr_to_adr(toplevelcontainer))
llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
def collect(self, *gen):
diff --git a/pypy/rpython/memory/support.py b/pypy/rpython/memory/support.py
--- a/pypy/rpython/memory/support.py
+++ b/pypy/rpython/memory/support.py
@@ -46,6 +46,8 @@
else:
zero = 2
size = llmemory.raw_malloc_usage(llmemory.sizeof(CHUNK))
+ # use arena_malloc to directly call the system 'malloc',
+ # so no locking issue in case of concurrent usage
addr = llarena.arena_malloc(size, zero)
if not addr:
fatalerror("out of memory in GC support code")
diff --git a/pypy/rpython/memory/test/test_gc.py
b/pypy/rpython/memory/test/test_gc.py
--- a/pypy/rpython/memory/test/test_gc.py
+++ b/pypy/rpython/memory/test/test_gc.py
@@ -937,3 +937,6 @@
def test_weakref_to_object_with_finalizer_ordering(self):
py.test.skip("frees weakrefs before calling finalizers")
+
+class TestConcurrentGenGC(GCTest):
+ from pypy.rpython.memory.gc.concurrentgen import ConcurrentGenGC as GCClass
diff --git a/pypy/rpython/memory/test/test_transformed_gc.py
b/pypy/rpython/memory/test/test_transformed_gc.py
--- a/pypy/rpython/memory/test/test_transformed_gc.py
+++ b/pypy/rpython/memory/test/test_transformed_gc.py
@@ -1437,3 +1437,12 @@
GC_PARAMS = {'page_size': 128*WORD,
'translated_to_c': False}
root_stack_depth = 200
+
+class TestConcurrentGenGC(GCTest):
+ gcname = "concurrentgen"
+ class gcpolicy(gc.FrameworkGcPolicy):
+ class transformerclass(framework.FrameworkGCTransformer):
+ from pypy.rpython.memory.gc.concurrentgen \
+ import ConcurrentGenGC as GCClass
+ GC_PARAMS = {'translated_to_c': False}
+ root_stack_depth = 200
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit