Author: cognet
Date: Thu Aug  9 12:07:37 2018
New Revision: 337531
URL: https://svnweb.freebsd.org/changeset/base/337531

Log:
  Import CK as of commit 08813496570879fbcc2adcdd9ddc0a054361bfde, mostly
  to avoid using lwsync on ppc32.

Modified:
  vendor-sys/ck/dist/include/gcc/ppc/ck_pr.h
  vendor-sys/ck/dist/include/spinlock/hclh.h
  vendor-sys/ck/dist/src/ck_barrier_combining.c

Modified: vendor-sys/ck/dist/include/gcc/ppc/ck_pr.h
==============================================================================
--- vendor-sys/ck/dist/include/gcc/ppc/ck_pr.h  Thu Aug  9 11:46:12 2018        
(r337530)
+++ vendor-sys/ck/dist/include/gcc/ppc/ck_pr.h  Thu Aug  9 12:07:37 2018        
(r337531)
@@ -67,21 +67,29 @@ ck_pr_stall(void)
                __asm__ __volatile__(I ::: "memory");   \
        }
 
-CK_PR_FENCE(atomic, "lwsync")
-CK_PR_FENCE(atomic_store, "lwsync")
+#ifdef CK_MD_PPC32_LWSYNC
+#define CK_PR_LWSYNCOP "lwsync"
+#else /* CK_MD_PPC32_LWSYNC_DISABLE */
+#define CK_PR_LWSYNCOP "sync"
+#endif
+
+CK_PR_FENCE(atomic, CK_PR_LWSYNCOP)
+CK_PR_FENCE(atomic_store, CK_PR_LWSYNCOP)
 CK_PR_FENCE(atomic_load, "sync")
-CK_PR_FENCE(store_atomic, "lwsync")
-CK_PR_FENCE(load_atomic, "lwsync")
-CK_PR_FENCE(store, "lwsync")
+CK_PR_FENCE(store_atomic, CK_PR_LWSYNCOP)
+CK_PR_FENCE(load_atomic, CK_PR_LWSYNCOP)
+CK_PR_FENCE(store, CK_PR_LWSYNCOP)
 CK_PR_FENCE(store_load, "sync")
-CK_PR_FENCE(load, "lwsync")
-CK_PR_FENCE(load_store, "lwsync")
+CK_PR_FENCE(load, CK_PR_LWSYNCOP)
+CK_PR_FENCE(load_store, CK_PR_LWSYNCOP)
 CK_PR_FENCE(memory, "sync")
-CK_PR_FENCE(acquire, "lwsync")
-CK_PR_FENCE(release, "lwsync")
-CK_PR_FENCE(acqrel, "lwsync")
-CK_PR_FENCE(lock, "lwsync")
-CK_PR_FENCE(unlock, "lwsync")
+CK_PR_FENCE(acquire, CK_PR_LWSYNCOP)
+CK_PR_FENCE(release, CK_PR_LWSYNCOP)
+CK_PR_FENCE(acqrel, CK_PR_LWSYNCOP)
+CK_PR_FENCE(lock, CK_PR_LWSYNCOP)
+CK_PR_FENCE(unlock, CK_PR_LWSYNCOP)
+
+#undef CK_PR_LWSYNCOP
 
 #undef CK_PR_FENCE
 

Modified: vendor-sys/ck/dist/include/spinlock/hclh.h
==============================================================================
--- vendor-sys/ck/dist/include/spinlock/hclh.h  Thu Aug  9 11:46:12 2018        
(r337530)
+++ vendor-sys/ck/dist/include/spinlock/hclh.h  Thu Aug  9 12:07:37 2018        
(r337531)
@@ -81,6 +81,8 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_q
        thread->wait = true;
        thread->splice = false;
        thread->cluster_id = (*local_queue)->cluster_id;
+       /* Make sure previous->previous doesn't appear to be NULL */
+       thread->previous = *local_queue;
 
        /* Serialize with respect to update of local queue. */
        ck_pr_fence_store_atomic();
@@ -91,13 +93,15 @@ ck_spinlock_hclh_lock(struct ck_spinlock_hclh **glob_q
 
        /* Wait until previous thread from the local queue is done with lock. */
        ck_pr_fence_load();
-       if (previous->previous != NULL &&
-           previous->cluster_id == thread->cluster_id) {
-               while (ck_pr_load_uint(&previous->wait) == true)
+       if (previous->previous != NULL) {
+               while (ck_pr_load_uint(&previous->wait) == true &&
+                       ck_pr_load_int(&previous->cluster_id) == 
thread->cluster_id &&
+                       ck_pr_load_uint(&previous->splice) == false)
                        ck_pr_stall();
 
                /* We're head of the global queue, we're done */
-               if (ck_pr_load_uint(&previous->splice) == false)
+               if (ck_pr_load_int(&previous->cluster_id) == thread->cluster_id 
&&
+                               ck_pr_load_uint(&previous->splice) == false)
                        return;
        }
 

Modified: vendor-sys/ck/dist/src/ck_barrier_combining.c
==============================================================================
--- vendor-sys/ck/dist/src/ck_barrier_combining.c       Thu Aug  9 11:46:12 
2018        (r337530)
+++ vendor-sys/ck/dist/src/ck_barrier_combining.c       Thu Aug  9 12:07:37 
2018        (r337531)
@@ -35,7 +35,7 @@ struct ck_barrier_combining_queue {
        struct ck_barrier_combining_group *tail;
 };
 
-CK_CC_INLINE static struct ck_barrier_combining_group *
+static struct ck_barrier_combining_group *
 ck_barrier_combining_queue_dequeue(struct ck_barrier_combining_queue *queue)
 {
        struct ck_barrier_combining_group *front = NULL;
@@ -48,7 +48,7 @@ ck_barrier_combining_queue_dequeue(struct ck_barrier_c
        return front;
 }
 
-CK_CC_INLINE static void
+static void
 ck_barrier_combining_insert(struct ck_barrier_combining_group *parent,
     struct ck_barrier_combining_group *tnode,
     struct ck_barrier_combining_group **child)
@@ -72,7 +72,7 @@ ck_barrier_combining_insert(struct ck_barrier_combinin
  * into the barrier's tree. We use a queue to implement this
  * traversal.
  */
-CK_CC_INLINE static void
+static void
 ck_barrier_combining_queue_enqueue(struct ck_barrier_combining_queue *queue,
     struct ck_barrier_combining_group *node_value)
 {
@@ -185,10 +185,10 @@ ck_barrier_combining_aux(struct ck_barrier_combining *
                ck_pr_fence_store();
                ck_pr_store_uint(&tnode->sense, ~tnode->sense);
        } else {
-               ck_pr_fence_memory();
                while (sense != ck_pr_load_uint(&tnode->sense))
                        ck_pr_stall();
        }
+       ck_pr_fence_memory();
 
        return;
 }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to