Author: Remi Meier <remi.me...@gmail.com>
Branch: c7
Changeset: r637:cae45c13aee6
Date: 2014-01-18 11:39 +0100
http://bitbucket.org/pypy/stmgc/changeset/cae45c13aee6/

Log:    add spinlock implementation of reader-writer lock

diff --git a/c7/Makefile b/c7/Makefile
--- a/c7/Makefile
+++ b/c7/Makefile
@@ -14,9 +14,9 @@
        rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE)
 
 
-H_FILES = core.h list.h pagecopy.h
+H_FILES = core.h list.h pagecopy.h reader_writer_lock.h
 
-C_FILES = core.c list.c pagecopy.c
+C_FILES = core.c list.c pagecopy.c reader_writer_lock.c
 
 DEBUG = -g 
 
diff --git a/c7/core.c b/c7/core.c
--- a/c7/core.c
+++ b/c7/core.c
@@ -13,6 +13,7 @@
 #include "core.h"
 #include "list.h"
 #include "pagecopy.h"
+#include "reader_writer_lock.h"
 
 
 #define NB_PAGES            (256*256)    // 256MB
@@ -142,41 +143,40 @@
 
 /************************************************************/
 
+rwticket rw_shared_lock;
+
 /* a multi-reader, single-writer lock: transactions normally take a reader
    lock, so don't conflict with each other; when we need to do a global GC,
    we take a writer lock to "stop the world".  Note the initializer here,
    which should give the correct priority for stm_possible_safe_point(). */
-static pthread_rwlock_t rwlock_shared;
+
 
 struct tx_descriptor *in_single_thread = NULL;
 
 void stm_start_shared_lock(void)
 {
-    int err = pthread_rwlock_rdlock(&rwlock_shared);
-    if (err != 0)
-        abort();
+    rwticket_rdlock(&rw_shared_lock);
 }
 
-void stm_stop_lock(void)
+void stm_stop_shared_lock(void)
 {
-    int err = pthread_rwlock_unlock(&rwlock_shared);
-    if (err != 0)
-        abort();
+    rwticket_rdunlock(&rw_shared_lock);
+}
+
+void stm_stop_exclusive_lock(void)
+{
+    rwticket_wrunlock(&rw_shared_lock);
 }
 
 void stm_start_exclusive_lock(void)
 {
-    int err = pthread_rwlock_wrlock(&rwlock_shared);
-    if (err != 0)
-        abort();
-    if (_STM_TL2->need_abort)
-        stm_abort_transaction();
+    rwticket_wrlock(&rw_shared_lock);
 }
 
 void _stm_start_safe_point(void)
 {
     assert(!_STM_TL2->need_abort);
-    stm_stop_lock();
+    stm_stop_shared_lock();
 }
 
 void _stm_stop_safe_point(void)
@@ -376,9 +376,12 @@
     uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START;
     uint8_t previous;
     while ((previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) {
-        usleep(1);              /* XXXXXX */
-        if (!(previous = __sync_lock_test_and_set(&write_locks[lock_idx], 1))) 
-            break;
+        /* XXXXXX */
+        //_stm_start_semi_safe_point();
+        usleep(1);
+        //_stm_stop_semi_safe_point();
+        //if (!(previous = __sync_lock_test_and_set(&write_locks[lock_idx], 
1))) 
+        //    break;
         stm_abort_transaction();
         /* XXX: only abort if we are younger */
         spin_loop();
@@ -583,13 +586,8 @@
 
 
 void stm_setup(void)
-{    
-    pthread_rwlockattr_t attr;
-    pthread_rwlockattr_init(&attr);
-    pthread_rwlockattr_setkind_np(&attr,
-                                  
PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
-    pthread_rwlock_init(&rwlock_shared, &attr);
-    pthread_rwlockattr_destroy(&attr);
+{
+    memset(&rw_shared_lock, 0, sizeof(rwticket));
 
     /* Check that some values are acceptable */
     assert(4096 <= ((uintptr_t)_STM_TL1));
@@ -685,8 +683,8 @@
 
 void _stm_teardown_thread(void)
 {
-    assert(!pthread_rwlock_trywrlock(&rwlock_shared));
-    assert(!pthread_rwlock_unlock(&rwlock_shared));
+    assert(!rwticket_wrtrylock(&rw_shared_lock));
+    assert(!rwticket_wrunlock(&rw_shared_lock));
     
     stm_list_free(_STM_TL2->modified_objects);
     _STM_TL2->modified_objects = NULL;
@@ -708,7 +706,6 @@
     munmap(object_pages, TOTAL_MEMORY);
     memset(flag_page_private, 0, sizeof(flag_page_private));
     memset(write_locks, 0, sizeof(write_locks));
-    pthread_rwlock_destroy(&rwlock_shared);
     object_pages = NULL;
 }
 
@@ -794,7 +791,7 @@
 void stm_stop_transaction(void)
 {
     assert(_STM_TL2->running_transaction);
-    stm_stop_lock();
+    stm_stop_shared_lock();
     stm_start_exclusive_lock();
 
     _STM_TL1->jmpbufptr = NULL;          /* cannot abort any more */
@@ -899,7 +896,7 @@
     /* } */
 
     _STM_TL2->running_transaction = 0;
-    stm_stop_lock();
+    stm_stop_exclusive_lock();
     fprintf(stderr, "%c", 'C'+_STM_TL2->thread_num*32);
 }
 
@@ -978,7 +975,7 @@
     assert(_STM_TL1->jmpbufptr != NULL);
     assert(_STM_TL1->jmpbufptr != (jmpbufptr_t *)-1);   /* for tests only */
     _STM_TL2->running_transaction = 0;
-    stm_stop_lock();
+    stm_stop_shared_lock();
     fprintf(stderr, "%c", 'A'+_STM_TL2->thread_num*32);
 
     /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */
diff --git a/c7/demo2.c b/c7/demo2.c
--- a/c7/demo2.c
+++ b/c7/demo2.c
@@ -57,7 +57,9 @@
             r_n = r_n->next;
             stm_read((objptr_t)r_n);
             sum += r_n->value;
-        
+
+            _stm_start_safe_point();
+            _stm_stop_safe_point();
             if (prev >= r_n->value) {
                 stm_stop_transaction();
                 return -1;
@@ -184,7 +186,6 @@
         sem_post(&initialized);
         status = sem_wait(&go);
         assert(status == 0);
-
     }
     
     while (check_sorted() == -1) {
diff --git a/c7/test/support.py b/c7/test/support.py
--- a/c7/test/support.py
+++ b/c7/test/support.py
@@ -7,9 +7,9 @@
 parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
 header_files = [os.path.join(parent_dir, _n) for _n in
-                "core.h pagecopy.h list.h".split()]
+                "core.h pagecopy.h list.h reader_writer_lock.h".split()]
 source_files = [os.path.join(parent_dir, _n) for _n in
-                "core.c pagecopy.c list.c".split()]
+                "core.c pagecopy.c list.c reader_writer_lock.c".split()]
 
 _pycache_ = os.path.join(parent_dir, 'test', '__pycache__')
 if os.path.exists(_pycache_):
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to