Author: Remi Meier <remi.me...@inf.ethz.ch> Branch: stmgc-c8 Changeset: r75483:df00e75f0207 Date: 2015-01-22 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/df00e75f0207/
Log: import stmgc-c8 509da83c0d5d diff --git a/rpython/translator/stm/src_stm/revision b/rpython/translator/stm/src_stm/revision --- a/rpython/translator/stm/src_stm/revision +++ b/rpython/translator/stm/src_stm/revision @@ -1,1 +1,1 @@ -5cfce5d61c50 +509da83c0d5d diff --git a/rpython/translator/stm/src_stm/stm/core.c b/rpython/translator/stm/src_stm/stm/core.c --- a/rpython/translator/stm/src_stm/stm/core.c +++ b/rpython/translator/stm/src_stm/stm/core.c @@ -3,6 +3,32 @@ # error "must be compiled via stmgc.c" #endif +/* *** MISC *** */ +static void free_bk(struct stm_undo_s *undo) +{ + free(undo->backup); + assert(undo->backup = (char*)-88); + increment_total_allocated(-SLICE_SIZE(undo->slice)); +} + +static struct stm_commit_log_entry_s *malloc_cle(long entries) +{ + size_t byte_len = sizeof(struct stm_commit_log_entry_s) + + entries * sizeof(struct stm_undo_s); + struct stm_commit_log_entry_s *result = malloc(byte_len); + increment_total_allocated(byte_len); + return result; +} + +static void free_cle(struct stm_commit_log_entry_s *e) +{ + size_t byte_len = sizeof(struct stm_commit_log_entry_s) + + e->written_count * sizeof(struct stm_undo_s); + increment_total_allocated(-byte_len); + free(e); +} +/* *** MISC *** */ + /* General helper: copies objects into our own segment, from some source described by a range of 'struct stm_undo_s'. Maybe later @@ -150,6 +176,9 @@ /* make our page write-ready */ page_mark_accessible(my_segnum, pagenum); + /* account for this page now: XXX */ + /* increment_total_allocated(4096); */ + if (copy_from_segnum == -1) { /* this page is only accessible in the sharing segment so far (new allocation). We can thus simply mark it accessible here. */ @@ -390,6 +419,7 @@ return !needs_abort; } + static struct stm_commit_log_entry_s *_create_commit_log_entry(void) { /* puts all modified_old_objects in a new commit log entry */ @@ -399,9 +429,7 @@ struct list_s *list = STM_PSEGMENT->modified_old_objects; OPT_ASSERT((list_count(list) % 3) == 0); size_t count = list_count(list) / 3; - size_t byte_len = sizeof(struct stm_commit_log_entry_s) + - count * sizeof(struct stm_undo_s); - struct stm_commit_log_entry_s *result = malloc(byte_len); + struct stm_commit_log_entry_s *result = malloc_cle(count); result->next = NULL; result->segment_num = STM_SEGMENT->segment_num; @@ -427,7 +455,7 @@ while (1) { if (!_stm_validate()) { if (new != INEV_RUNNING) - free(new); + free_cle((struct stm_commit_log_entry_s*)new); stm_abort_transaction(); } @@ -601,8 +629,10 @@ /* make backup slice: */ char *bk_slice = malloc(slice_sz); + increment_total_allocated(slice_sz); memcpy(bk_slice, realobj + slice_off, slice_sz); + /* !! follows layout of "struct stm_undo_s" !! */ STM_PSEGMENT->modified_old_objects = list_append3( STM_PSEGMENT->modified_old_objects, (uintptr_t)obj, /* obj */ @@ -860,7 +890,7 @@ dprintf(("reset_modified_from_backup_copies(%d): obj=%p off=%lu bk=%p\n", segment_num, obj, SLICE_OFFSET(undo->slice), undo->backup)); - free(undo->backup); + free_bk(undo); } /* check that all objects have the GCFLAG_WRITE_BARRIER afterwards */ @@ -1089,7 +1119,7 @@ if (get_page_status_in(i, page) != PAGE_NO_ACCESS) { /* shared or private, but never segfault */ char *dst = REAL_ADDRESS(get_segment_base(i), frag); - dprintf(("-> flush %p to seg %lu, sz=%lu\n", frag, i, frag_size)); + //dprintf(("-> flush %p to seg %lu, sz=%lu\n", frag, i, frag_size)); memcpy(dst, src, frag_size); } } diff --git a/rpython/translator/stm/src_stm/stm/core.h b/rpython/translator/stm/src_stm/stm/core.h --- a/rpython/translator/stm/src_stm/stm/core.h +++ b/rpython/translator/stm/src_stm/stm/core.h @@ -149,6 +149,8 @@ #define SLICE_SIZE(slice) ((int)((slice) & 0xFFFF)) #define NEW_SLICE(offset, size) (((uint64_t)(offset)) << 16 | (size)) + + /* The model is: we have a global chained list, from 'commit_log_root', of 'struct stm_commit_log_entry_s' entries. Every one is fully read-only apart from the 'next' field. Every one stands for one @@ -168,6 +170,11 @@ static struct stm_commit_log_entry_s commit_log_root; +static void free_bk(struct stm_undo_s *undo); +static struct stm_commit_log_entry_s *malloc_cle(long entries); +static void free_cle(struct stm_commit_log_entry_s *e); + + #ifndef STM_TESTS static char *stm_object_pages; #else diff --git a/rpython/translator/stm/src_stm/stm/gcpage.c b/rpython/translator/stm/src_stm/stm/gcpage.c --- a/rpython/translator/stm/src_stm/stm/gcpage.c +++ b/rpython/translator/stm/src_stm/stm/gcpage.c @@ -308,29 +308,53 @@ some of the pages) */ long i; + struct list_s *uniques = list_create(); + for (i = 1; i < NB_SEGMENTS; i++) { char *base = get_segment_base(i); + OPT_ASSERT(list_is_empty(uniques)); + /* the mod_old_objects list may contain maanny slices for + the same *huge* object. it seems worth to first construct + a list of unique objects. we use the VISITED flag for this + purpose as it is never set outside of seg0: */ struct list_s *lst = get_priv_segment(i)->modified_old_objects; + struct stm_undo_s *modified = (struct stm_undo_s *)lst->items; struct stm_undo_s *end = (struct stm_undo_s *)(lst->items + lst->count); - for (; modified < end; modified++) { object_t *obj = modified->object; - /* All modified objs have all pages accessible for now. - This is because we create a backup of the whole obj - and thus make all pages accessible. */ - assert_obj_accessible_in(i, obj); + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, obj); - assert(!is_new_object(obj)); /* should never be in that list */ + if (!(dst->stm_flags & GCFLAG_VISITED)) { + LIST_APPEND(uniques, obj); + dst->stm_flags |= GCFLAG_VISITED; + } + } - if (!mark_visited_test_and_set(obj)) { - /* trace shared, committed version */ - mark_and_trace(obj, stm_object_pages); - } - mark_and_trace(obj, base); /* private, modified version */ - } + LIST_FOREACH_R(uniques, object_t*, + ({ + /* clear the VISITED flags again and actually visit them */ + struct object_s *dst = (struct object_s*)REAL_ADDRESS(base, item); + dst->stm_flags &= ~GCFLAG_VISITED; + + /* All modified objs have all pages accessible for now. + This is because we create a backup of the whole obj + and thus make all pages accessible. */ + assert_obj_accessible_in(i, item); + + assert(!is_new_object(item)); /* should never be in that list */ + + if (!mark_visited_test_and_set(item)) { + /* trace shared, committed version */ + mark_and_trace(item, stm_object_pages); + } + mark_and_trace(item, base); /* private, modified version */ + })); + + list_clear(uniques); } + LIST_FREE(uniques); } static void mark_visit_from_roots(void) @@ -485,7 +509,7 @@ { /* this is called by _stm_largemalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); - dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); + //dprintf(("keep obj %p ? -> %d\n", obj, mark_visited_test(obj))); if (!mark_visited_test_and_clear(obj)) { /* This is actually needed in order to avoid random write-read conflicts with objects read and freed long in the past. @@ -511,7 +535,7 @@ /* XXX: identical to largemalloc_keep_object_at()? */ /* this is called by _stm_smallmalloc_sweep() */ object_t *obj = (object_t *)(data - stm_object_pages); - dprintf(("keep small obj %p ? -> %d\n", obj, mark_visited_test(obj))); + //dprintf(("keep small obj %p ? -> %d\n", obj, mark_visited_test(obj))); if (!mark_visited_test_and_clear(obj)) { /* This is actually needed in order to avoid random write-read conflicts with objects read and freed long in the past. @@ -558,8 +582,14 @@ cl = next; rev_num = cl->rev_num; + /* free bk copies of entries: */ + long count = cl->written_count; + while (count-->0) { + free_bk(&cl->written[count]); + } + next = cl->next; - free(cl); + free_cle(cl); if (next == INEV_RUNNING) { was_inev = true; break; diff --git a/rpython/translator/stm/src_stm/stm/nursery.c b/rpython/translator/stm/src_stm/stm/nursery.c --- a/rpython/translator/stm/src_stm/stm/nursery.c +++ b/rpython/translator/stm/src_stm/stm/nursery.c @@ -104,7 +104,7 @@ nobj = (object_t *)allocate_outside_nursery_small(size); } - dprintf(("move %p -> %p\n", obj, nobj)); + //dprintf(("move %p -> %p\n", obj, nobj)); /* copy the object */ copy_large_object:; @@ -175,7 +175,7 @@ { assert(!_is_young(obj)); - dprintf(("_collect_now: %p\n", obj)); + //dprintf(("_collect_now: %p\n", obj)); assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); diff --git a/rpython/translator/stm/src_stm/stm/pages.c b/rpython/translator/stm/src_stm/stm/pages.c --- a/rpython/translator/stm/src_stm/stm/pages.c +++ b/rpython/translator/stm/src_stm/stm/pages.c @@ -75,9 +75,6 @@ /* set this flag *after* we un-protected it, because XXX later */ set_page_status_in(segnum, pagenum, PAGE_ACCESSIBLE); - - // XXX: maybe? - //increment_total_allocated(4096); } __attribute__((unused)) @@ -95,7 +92,4 @@ perror("mprotect"); stm_fatalerror("mprotect failed! Consider running 'sysctl vm.max_map_count=16777216'"); } - - // XXX: maybe? - //increment_total_allocated(-4096); } diff --git a/rpython/translator/stm/src_stm/stm/smallmalloc.c b/rpython/translator/stm/src_stm/stm/smallmalloc.c --- a/rpython/translator/stm/src_stm/stm/smallmalloc.c +++ b/rpython/translator/stm/src_stm/stm/smallmalloc.c @@ -181,8 +181,8 @@ (_allocate_small_slowpath(size) - stm_object_pages); *fl = result->next; - dprintf(("allocate_outside_nursery_small(%lu): %p\n", - size, (char*)((char *)result - stm_object_pages))); + /* dprintf(("allocate_outside_nursery_small(%lu): %p\n", */ + /* size, (char*)((char *)result - stm_object_pages))); */ return (stm_char*) ((char *)result - stm_object_pages); } _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit