On 04/14/2016 10:34 AM, Pan Xinhui wrote:
hello, Waiman
I try your patch, thanks!

also I do some improvement.
below code diff has been tested, it works for me. :)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index ce2f75e..99f31e4 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -248,7 +248,8 @@ static __always_inline void set_locked(struct qspinlock 
*lock)
   */

  static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
+static __always_inline void __pv_wait_node(struct qspinlock *lock,
+                                          struct mcs_spinlock *node,
                                            struct mcs_spinlock *prev) { }
  static __always_inline void __pv_kick_node(struct qspinlock *lock,
                                            struct mcs_spinlock *node) { }
@@ -407,7 +408,7 @@ queue:
                 prev = decode_tail(old);
                 WRITE_ONCE(prev->next, node);

-               pv_wait_node(node, prev);
+               pv_wait_node(lock, node, prev);
                 arch_mcs_spin_lock_contended(&node->locked);

                 /*
diff --git a/kernel/locking/qspinlock_paravirt.h 
b/kernel/locking/qspinlock_paravirt.h
index 01a6d16..75ccfd3 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -255,6 +257,19 @@ static struct pv_node *pv_unhash(struct qspinlock *lock)
         BUG();
  }

+static struct pv_node *pv_hash_lookup(struct qspinlock *lock)
+{
+       unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
+       struct pv_hash_entry *he;
+
+       for_each_hash_entry(he, offset, hash) {
+               if (READ_ONCE(he->lock) == lock) {
+                       return he->node;
+               }
+       }
+       return NULL;
+}
+
  /*
   * Return true if when it is time to check the previous node which is not
   * in a running state.
@@ -287,14 +303,17 @@ static void pv_init_node(struct mcs_spinlock *node)
   * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
   * behalf.
   */
-static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
+static void pv_wait_node(struct qspinlock *lock, struct mcs_spinlock *node,
+                       struct mcs_spinlock *prev)
  {
         struct pv_node *pn = (struct pv_node *)node;
         struct pv_node *pp = (struct pv_node *)prev;
+       struct pv_node *ph;
         int waitcnt = 0;
         int loop;
         bool wait_early;

+       pn->prev_cpu = pp->cpu;
         /* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */
         for (;; waitcnt++) {
                 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
@@ -322,7 +341,10 @@ static void pv_wait_node(struct mcs_spinlock *node, struct 
mcs_spinlock *prev)
                         qstat_inc(qstat_pv_wait_node, true);
                         qstat_inc(qstat_pv_wait_again, waitcnt);
                         qstat_inc(qstat_pv_wait_early, wait_early);
-                       pv_wait(&pn->state, vcpu_halted);
+                       ph = pv_hash_lookup(lock);
+                       if (!ph)
+                               ph = pp;
+                       pv_wait(&pn->state, vcpu_halted, ph->prev_cpu);
                 }

                 /*

any comments are welcome. I put my patch here just for simplicity, or need I 
send it out in a new thread?

thanks
xinhui



I have sent out a v2 patch that incorporate your change with some minor twists.

Cheers,
Longman

Reply via email to