Author: Remi Meier <remi.me...@inf.ethz.ch>
Branch: 
Changeset: r1344:69b4538466fc
Date: 2014-09-04 10:35 +0200
http://bitbucket.org/pypy/stmgc/changeset/69b4538466fc/

Log:    add minimal privatization in signal handler

diff --git a/c8/stm/core.c b/c8/stm/core.c
--- a/c8/stm/core.c
+++ b/c8/stm/core.c
@@ -9,14 +9,31 @@
 
 void _signal_handler(int sig, siginfo_t *siginfo, void *context)
 {
-    /* make PROT_READWRITE again and validate */
-
-    if (siginfo->si_addr == NULL) { /* actual segfault */
+    char *addr = siginfo->si_addr;
+    dprintf(("si_addr: %p\n", addr));
+    if (addr == NULL) { /* actual segfault */
         /* send to GDB */
         kill(getpid(), SIGINT);
     }
-    /* didn't know what to do with it: send to GDB */
-    kill(getpid(), SIGINT);
+    /* make PROT_READWRITE again and validate */
+    int segnum = get_segment_of_linear_address(addr);
+    OPT_ASSERT(segnum == STM_SEGMENT->segment_num);
+    dprintf(("-> segment: %d\n", segnum));
+    char *seg_base = STM_SEGMENT->segment_base;
+    uintptr_t pagenum = ((char*)addr - seg_base) / 4096UL;
+
+    /* XXX: missing synchronisation: we may change protection, then
+       another thread changes it back, then we try to privatize that
+       calls page_copy() and traps */
+    mprotect(seg_base + pagenum * 4096UL, 4096, PROT_READ|PROT_WRITE);
+    page_privatize(pagenum);
+
+    /* XXX: ... what can go wrong when we abort from inside
+       the signal handler? */
+
+    /* make sure we are up to date in this (and all other) pages */
+    stm_validate(NULL);
+    return;
 }
 
 /* ############# commit log ############# */
@@ -125,11 +142,11 @@
     for (i = 0; i < NB_SEGMENTS; i++) {
         if (i == STM_SEGMENT->segment_num)
             continue;
-
+        /* XXX: only do it if not already PROT_NONE */
         char *segment_base = get_segment_base(i);
         mprotect(segment_base + first_page * 4096,
                  (end_page - first_page + 1) * 4096, PROT_NONE);
-        dprintf(("prot %lu, len=%lu in seg %d\n", first_page, (end_page - 
first_page + 1), i));
+        dprintf(("prot %lu, len=%lu in seg %lu\n", first_page, (end_page - 
first_page + 1), i));
     }
 
     LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj);
diff --git a/c8/stm/core.h b/c8/stm/core.h
--- a/c8/stm/core.h
+++ b/c8/stm/core.h
@@ -95,6 +95,12 @@
         get_segment_base(segment_num), STM_PSEGMENT);
 }
 
+static inline int get_segment_of_linear_address(char *addr) {
+    assert(addr > stm_object_pages && addr < stm_object_pages + TOTAL_MEMORY);
+    return (addr - stm_object_pages) / (NB_PAGES * 4096UL);
+}
+
+
 static bool _is_tl_registered(stm_thread_local_t *tl);
 static bool _seems_to_be_running_transaction(void);
 
diff --git a/c8/stm/gcpage.c b/c8/stm/gcpage.c
--- a/c8/stm/gcpage.c
+++ b/c8/stm/gcpage.c
@@ -51,5 +51,7 @@
     object_t *o = (object_t *)(p - stm_object_pages);
     o->stm_flags = GCFLAG_WRITE_BARRIER;
 
+    dprintf(("allocate_old(%lu): %p, seg=%d\n", size_rounded_up, p,
+             get_segment_of_linear_address(p)));
     return o;
 }
diff --git a/c8/stm/pages.c b/c8/stm/pages.c
--- a/c8/stm/pages.c
+++ b/c8/stm/pages.c
@@ -68,3 +68,38 @@
                            count * 4096UL, pagenum);
     }
 }
+
+static void page_privatize(uintptr_t pagenum)
+{
+    /* check this thread's 'pages_privatized' bit */
+    uint64_t bitmask = 1UL << (STM_SEGMENT->segment_num - 1);
+    volatile struct page_shared_s *ps = (volatile struct page_shared_s *)
+        &pages_privatized[pagenum - PAGE_FLAG_START];
+    if (ps->by_segment & bitmask) {
+        /* the page is already privatized; nothing to do */
+        return;
+    }
+
+    long i;
+    for (i = 0; i < NB_SEGMENTS; i++) {
+        spinlock_acquire(get_priv_segment(i)->privatization_lock);
+    }
+
+    /* add this thread's 'pages_privatized' bit */
+    ps->by_segment |= bitmask;
+
+    /* "unmaps" the page to make the address space location correspond
+       again to its underlying file offset (XXX later we should again
+       attempt to group together many calls to d_remap_file_pages() in
+       succession) */
+    uintptr_t pagenum_in_file = NB_PAGES * STM_SEGMENT->segment_num + pagenum;
+    char *new_page = stm_object_pages + pagenum_in_file * 4096UL;
+    d_remap_file_pages(new_page, 4096, pagenum_in_file);
+
+    /* copy the content from the shared (segment 0) source */
+    pagecopy(new_page, stm_object_pages + pagenum * 4096UL);
+
+    for (i = NB_SEGMENTS-1; i >= 0; i--) {
+        spinlock_release(get_priv_segment(i)->privatization_lock);
+    }
+}
diff --git a/c8/stm/pages.h b/c8/stm/pages.h
--- a/c8/stm/pages.h
+++ b/c8/stm/pages.h
@@ -21,6 +21,8 @@
 static struct page_shared_s pages_privatized[PAGE_FLAG_END - PAGE_FLAG_START];
 
 static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count);
+static void page_privatize(uintptr_t pagenum);
+
 
 static inline bool is_private_page(long segnum, uintptr_t pagenum)
 {
diff --git a/c8/stm/setup.c b/c8/stm/setup.c
--- a/c8/stm/setup.c
+++ b/c8/stm/setup.c
@@ -70,11 +70,11 @@
                      PROT_NONE);
 
         if (i != 0) {
-            /* let's give all pages to segment 0 at first and make them
-               write-inaccessible everywhere else */
+            /* let's give all pages to segment 0 at first, all others
+               need to trap and look for the backup copy */
             mprotect(segment_base + END_NURSERY_PAGE * 4096,
                      (NB_PAGES - END_NURSERY_PAGE) * 4096,
-                     PROT_READ);
+                     PROT_NONE);
         }
     }
 }
@@ -232,13 +232,12 @@
         tl->prev = stm_all_thread_locals->prev;
         stm_all_thread_locals->prev->next = tl;
         stm_all_thread_locals->prev = tl;
-        num = tl->prev->associated_segment_num;
+        num = (tl->prev->associated_segment_num + 1) % NB_SEGMENTS;
     }
 
     /* assign numbers consecutively, but that's for tests; we could also
        assign the same number to all of them and they would get their own
        numbers automatically. */
-    num = (num + 1) % NB_SEGMENTS;
     tl->associated_segment_num = num;
     *_get_cpth(tl) = pthread_self();
     _init_shadow_stack(tl);
diff --git a/c8/stm/sync.c b/c8/stm/sync.c
--- a/c8/stm/sync.c
+++ b/c8/stm/sync.c
@@ -142,3 +142,8 @@
     set_gs_register(get_segment_base(tl->associated_segment_num));
     assert(STM_SEGMENT->running_thread == tl);
 }
+
+void _stm_test_switch_segment(int segnum)
+{
+    set_gs_register(get_segment_base(segnum));
+}
diff --git a/c8/stmgc.h b/c8/stmgc.h
--- a/c8/stmgc.h
+++ b/c8/stmgc.h
@@ -80,6 +80,7 @@
 
 long stm_can_move(object_t *obj);
 void _stm_test_switch(stm_thread_local_t *tl);
+void _stm_test_switch_segment(int segnum);
 void _push_obj_to_other_segments(object_t *obj);
 
 char *_stm_get_segment_base(long index);
diff --git a/c8/test/support.py b/c8/test/support.py
--- a/c8/test/support.py
+++ b/c8/test/support.py
@@ -51,6 +51,7 @@
 long stm_can_move(object_t *obj);
 char *_stm_real_address(object_t *o);
 void _stm_test_switch(stm_thread_local_t *tl);
+void _stm_test_switch_segment(int segnum);
 
 void clear_jmpbuf(stm_thread_local_t *tl);
 long stm_start_transaction(stm_thread_local_t *tl);
@@ -395,6 +396,9 @@
         lib.stm_setup()
         self.tls = [_allocate_thread_local() for i in range(self.NB_THREADS)]
         self.current_thread = 0
+        # force-switch back to segment 0 so that when we do something
+        # outside of transactions before the test, it happens in seg0
+        self.switch_to_segment(0)
 
     def teardown_method(self, meth):
         lib.stmcb_expand_marker = ffi.NULL
@@ -452,6 +456,9 @@
             lib._stm_test_switch(tl2)
             stm_validate() # can raise
 
+    def switch_to_segment(self, seg_num):
+        lib._stm_test_switch_segment(seg_num)
+
     def push_root(self, o):
         assert ffi.typeof(o) == ffi.typeof("object_t *")
         tl = self.tls[self.current_thread]
diff --git a/c8/test/test_basic.py b/c8/test/test_basic.py
--- a/c8/test/test_basic.py
+++ b/c8/test/test_basic.py
@@ -56,7 +56,7 @@
 
     def test_allocate_old(self):
         lp1 = stm_allocate_old(16)
-        self.switch(1)
+        self.switch(1) # actually has not much of an effect...
         lp2 = stm_allocate_old(16)
         assert lp1 != lp2
 
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to