Fix stale description of "xen_nopvspin" as we use qspinlock now.

Signed-off-by: Zhenzhong Duan <zhenzhong.d...@oracle.com>
Cc: Jonathan Corbet <cor...@lwn.net>
Cc: Boris Ostrovsky <boris.ostrov...@oracle.com>
Cc: Juergen Gross <jgr...@suse.com>
Cc: Stefano Stabellini <sstabell...@kernel.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
---
 Documentation/admin-guide/kernel-parameters.txt |  7 ++++---
 arch/x86/xen/spinlock.c                         | 13 +++++++------
 2 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
index 4b956d8..1f0a62f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5303,8 +5303,9 @@
                        never -- do not unplug even if version check succeeds
 
        xen_nopvspin    [X86,XEN]
-                       Disables the ticketlock slowpath using Xen PV
-                       optimizations.
+                       Disables the qspinlock slowpath using Xen PV 
optimizations.
+                       This parameter is obsoleted by "nopvspin" parameter, 
which
+                       has equivalent effect for XEN platform.
 
        xen_nopv        [X86]
                        Disables the PV optimizations forcing the HVM guest to
@@ -5330,7 +5331,7 @@
                        as generic guest with no PV drivers. Currently support
                        XEN HVM, KVM, HYPER_V and VMWARE guest.
 
-       nopvspin        [X86,KVM] Disables the qspinlock slow path
+       nopvspin        [X86,XEN,KVM] Disables the qspinlock slow path
                        using PV optimizations which allow the hypervisor to
                        'idle' the guest on lock contention.
 
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 6deb490..092a53f 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -18,7 +18,6 @@
 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
 static DEFINE_PER_CPU(char *, irq_name);
 static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
-static bool xen_pvspin = true;
 
 static void xen_qlock_kick(int cpu)
 {
@@ -68,7 +67,7 @@ void xen_init_lock_cpu(int cpu)
        int irq;
        char *name;
 
-       if (!xen_pvspin)
+       if (!pvspin)
                return;
 
        WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on 
IRQ%d!\n",
@@ -93,7 +92,7 @@ void xen_init_lock_cpu(int cpu)
 
 void xen_uninit_lock_cpu(int cpu)
 {
-       if (!xen_pvspin)
+       if (!pvspin)
                return;
 
        unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
@@ -117,9 +116,9 @@ void __init xen_init_spinlocks(void)
 
        /*  Don't need to use pvqspinlock code if there is only 1 vCPU. */
        if (num_possible_cpus() == 1)
-               xen_pvspin = false;
+               pvspin = false;
 
-       if (!xen_pvspin) {
+       if (!pvspin) {
                printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
                static_branch_disable(&virt_spin_lock_key);
                return;
@@ -137,7 +136,9 @@ void __init xen_init_spinlocks(void)
 
 static __init int xen_parse_nopvspin(char *arg)
 {
-       xen_pvspin = false;
+       pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" 
instead\n");
+       if (xen_domain())
+               pvspin = false;
        return 0;
 }
 early_param("xen_nopvspin", xen_parse_nopvspin);
-- 
1.8.3.1

Reply via email to