Commit-ID:  446f3dc8cc0af59259c6c8b898726fae7ed2c055
Gitweb:     http://git.kernel.org/tip/446f3dc8cc0af59259c6c8b898726fae7ed2c055
Author:     Pan Xinhui <xinhui....@linux.vnet.ibm.com>
AuthorDate: Wed, 2 Nov 2016 05:08:33 -0400
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Tue, 22 Nov 2016 12:48:07 +0100

locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen 
guests

Optimize spinlock and mutex busy-loops by providing a vcpu_is_preempted(cpu)
function on KVM and Xen platforms.

Extend the pv_lock_ops interface accordingly and implement the callbacks
on KVM and Xen.

Signed-off-by: Pan Xinhui <xinhui....@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
[ Translated to English. ]
Acked-by: Paolo Bonzini <pbonz...@redhat.com>
Cc: david.lai...@aculab.com
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: b...@kernel.crashing.org
Cc: boqun.f...@gmail.com
Cc: borntrae...@de.ibm.com
Cc: bsinghar...@gmail.com
Cc: d...@stgolabs.net
Cc: jgr...@suse.com
Cc: kernel...@gmail.com
Cc: konrad.w...@oracle.com
Cc: linuxppc-...@lists.ozlabs.org
Cc: m...@ellerman.id.au
Cc: paul...@linux.vnet.ibm.com
Cc: pau...@samba.org
Cc: rkrc...@redhat.com
Cc: virtualizat...@lists.linux-foundation.org
Cc: will.dea...@arm.com
Cc: xen-devel-requ...@lists.xenproject.org
Cc: xen-de...@lists.xenproject.org
Link: 
http://lkml.kernel.org/r/1478077718-37424-7-git-send-email-xinhui....@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/paravirt_types.h | 2 ++
 arch/x86/include/asm/spinlock.h       | 8 ++++++++
 arch/x86/kernel/paravirt-spinlocks.c  | 6 ++++++
 3 files changed, 16 insertions(+)

diff --git a/arch/x86/include/asm/paravirt_types.h 
b/arch/x86/include/asm/paravirt_types.h
index 0f400c0..38c3bb7 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -310,6 +310,8 @@ struct pv_lock_ops {
 
        void (*wait)(u8 *ptr, u8 val);
        void (*kick)(int cpu);
+
+       bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 921bea7..0526f59 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,6 +26,14 @@
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+       return pv_lock_ops.vcpu_is_preempted(cpu);
+}
+#endif
+
 #include <asm/qspinlock.h>
 
 /*
diff --git a/arch/x86/kernel/paravirt-spinlocks.c 
b/arch/x86/kernel/paravirt-spinlocks.c
index 2c55a00..2f204dd 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
                __raw_callee_save___native_queued_spin_unlock;
 }
 
+static bool native_vcpu_is_preempted(int cpu)
+{
+       return 0;
+}
+
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
        .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
        .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
        .wait = paravirt_nop,
        .kick = paravirt_nop,
+       .vcpu_is_preempted = native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);

Reply via email to