On Wed, Oct 31, 2018 at 09:54:17AM +0800, Yi Sun wrote: > On 18-10-23 17:33:28, Yi Sun wrote: > > On 18-10-23 10:51:27, Peter Zijlstra wrote:
> > > Can you try and explain why vcpu_is_preempted() doesn't work for you? > > > > I thought HvSpinWaitInfo is used to notify hypervisor the spin number > > which is different with vcpu_is_preempted. So I did not consider > > vcpu_is_preempted. > > > > But HvSpinWaitInfo is a quite simple function and could be combined > > with vcpu_is_preempted together. So I think it is OK to use > > vcpu_is_preempted to make codes clean. I will have a try. > > After checking codes, there is one issue to call vcpu_is_preempted. > There are two spin loops in qspinlock_paravirt.h. One loop in > 'pv_wait_node' calls vcpu_is_preempted. But another loop in > 'pv_wait_head_or_lock' does not call vcpu_is_preempted. It also does > not call any other ops of 'pv_lock_ops' in the loop. So I am afraid > we have to add one more ops in 'pv_lock_ops' to do this. Why? Would not something like the below cure that? Waiman, can you have a look at this; I always forget how that paravirt crud works. --- kernel/locking/qspinlock.c | 5 +++-- kernel/locking/qspinlock_paravirt.h | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 8a8c3c208c5e..a4ab80f95176 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -286,7 +286,8 @@ static __always_inline void __pv_wait_node(struct mcs_spinlock *node, static __always_inline void __pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) { } static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, - struct mcs_spinlock *node) + struct mcs_spinlock *node, + struct mcs_spinlock *prev) { return 0; } #define pv_enabled() false @@ -500,7 +501,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * If PV isn't active, 0 will be returned instead. * */ - if ((val = pv_wait_head_or_lock(lock, node))) + if ((val = pv_wait_head_or_lock(lock, node, prev))) goto locked; val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK)); diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 0130e488ebfe..531dadc955fb 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -399,9 +399,10 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) * The current value of the lock will be returned for additional processing. */ static u32 -pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) +pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node, struct mcs_spinlock *prev) { struct pv_node *pn = (struct pv_node *)node; + struct pv_node *pp = (struct pv_node *)prev; struct qspinlock **lp = NULL; int waitcnt = 0; int loop; @@ -430,7 +431,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) * disable lock stealing before attempting to acquire the lock. */ set_pending(lock); - for (loop = SPIN_THRESHOLD; loop; loop--) { + for (loop = SPIN_THRESHOLD; loop && !vcpu_is_preempted(prev->cpu); loop--) { if (trylock_clear_pending(lock)) goto gotlock; cpu_relax();