This patch moves the UV NMI support from the x2apic file to a
new separate uv_nmi.c file in preparation for the next sequence
of patches.  It minimizes bloat of the x2apic file, and has the
added benefit of putting the upcoming /sys/module parameters under
the name 'uv_nmi' instead of 'x2apic_uv_x', which was obscure.

Cc: Alex Shi <alex....@intel.com>
Cc: Cliff Wickman <c...@sgi.com>
Cc: Alexander Gordeev <agord...@redhat.com>
Cc: Suresh Siddha <suresh.b.sid...@intel.com>
Cc: "Michael S. Tsirkin" <m...@redhat.com>
Cc: Steffen Persvold <s...@numascale.com>
Reviewed-by: Dimitri Sivanich <sivan...@sgi.com>
Signed-off-by: Mike Travis <tra...@sgi.com>
---
 arch/x86/include/asm/uv/uv.h       |    2 
 arch/x86/kernel/apic/x2apic_uv_x.c |   69 -------------------------
 arch/x86/platform/uv/Makefile      |    2 
 arch/x86/platform/uv/uv_nmi.c      |  101 +++++++++++++++++++++++++++++++++++++
 4 files changed, 104 insertions(+), 70 deletions(-)

--- linux.orig/arch/x86/include/asm/uv/uv.h
+++ linux/arch/x86/include/asm/uv/uv.h
@@ -12,6 +12,7 @@ extern enum uv_system_type get_uv_system
 extern int is_uv_system(void);
 extern void uv_cpu_init(void);
 extern void uv_nmi_init(void);
+extern void uv_register_nmi_notifier(void);
 extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                                 struct mm_struct *mm,
@@ -25,6 +26,7 @@ static inline enum uv_system_type get_uv
 static inline int is_uv_system(void)   { return 0; }
 static inline void uv_cpu_init(void)   { }
 static inline void uv_system_init(void)        { }
+static inline void uv_register_nmi_notifier(void) { }
 static inline const struct cpumask *
 uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
                    unsigned long start, unsigned long end, unsigned int cpu)
--- linux.orig/arch/x86/kernel/apic/x2apic_uv_x.c
+++ linux/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -39,12 +39,6 @@
 #include <asm/emergency-restart.h>
 #include <asm/nmi.h>
 
-/* BMC sets a bit this MMR non-zero before sending an NMI */
-#define UVH_NMI_MMR                            UVH_SCRATCH5
-#define UVH_NMI_MMR_CLEAR                      (UVH_NMI_MMR + 8)
-#define UV_NMI_PENDING_MASK                    (1UL << 63)
-DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
-
 DEFINE_PER_CPU(int, x2apic_extra_bits);
 
 #define PR_DEVEL(fmt, args...) pr_devel("%s: " fmt, __func__, args)
@@ -56,7 +50,6 @@ int uv_min_hub_revision_id;
 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
 unsigned int uv_apicid_hibits;
 EXPORT_SYMBOL_GPL(uv_apicid_hibits);
-static DEFINE_SPINLOCK(uv_nmi_lock);
 
 static struct apic apic_x2apic_uv_x;
 
@@ -795,68 +788,6 @@ void __cpuinit uv_cpu_init(void)
                set_x2apic_extra_bits(uv_hub_info->pnode);
 }
 
-/*
- * When NMI is received, print a stack trace.
- */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
-{
-       unsigned long real_uv_nmi;
-       int bid;
-
-       /*
-        * Each blade has an MMR that indicates when an NMI has been sent
-        * to cpus on the blade. If an NMI is detected, atomically
-        * clear the MMR and update a per-blade NMI count used to
-        * cause each cpu on the blade to notice a new NMI.
-        */
-       bid = uv_numa_blade_id();
-       real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
-
-       if (unlikely(real_uv_nmi)) {
-               spin_lock(&uv_blade_info[bid].nmi_lock);
-               real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & 
UV_NMI_PENDING_MASK);
-               if (real_uv_nmi) {
-                       uv_blade_info[bid].nmi_count++;
-                       uv_write_local_mmr(UVH_NMI_MMR_CLEAR, 
UV_NMI_PENDING_MASK);
-               }
-               spin_unlock(&uv_blade_info[bid].nmi_lock);
-       }
-
-       if (likely(__get_cpu_var(cpu_last_nmi_count) == 
uv_blade_info[bid].nmi_count))
-               return NMI_DONE;
-
-       __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
-
-       /*
-        * Use a lock so only one cpu prints at a time.
-        * This prevents intermixed output.
-        */
-       spin_lock(&uv_nmi_lock);
-       pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
-       dump_stack();
-       spin_unlock(&uv_nmi_lock);
-
-       return NMI_HANDLED;
-}
-
-void uv_register_nmi_notifier(void)
-{
-       if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
-               printk(KERN_WARNING "UV NMI handler failed to register\n");
-}
-
-void uv_nmi_init(void)
-{
-       unsigned int value;
-
-       /*
-        * Unmask NMI on all cpus
-        */
-       value = apic_read(APIC_LVT1) | APIC_DM_NMI;
-       value &= ~APIC_LVT_MASKED;
-       apic_write(APIC_LVT1, value);
-}
-
 void __init uv_system_init(void)
 {
        union uvh_rh_gam_config_mmr_u  m_n_config;
--- linux.orig/arch/x86/platform/uv/Makefile
+++ linux/arch/x86/platform/uv/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_X86_UV)           += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o 
uv_time.o
+obj-$(CONFIG_X86_UV)           += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o 
uv_time.o uv_nmi.o
--- /dev/null
+++ linux/arch/x86/platform/uv/uv_nmi.c
@@ -0,0 +1,101 @@
+/*
+ * SGI NMI support routines
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ *  Copyright (c) 2009-2013 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) Mike Travis
+ */
+
+#include <linux/cpu.h>
+#include <linux/nmi.h>
+
+#include <asm/apic.h>
+#include <asm/uv/uv.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_mmrs.h>
+
+/* BMC sets a bit this MMR non-zero before sending an NMI */
+#define UVH_NMI_MMR                            UVH_SCRATCH5
+#define UVH_NMI_MMR_CLEAR                      (UVH_NMI_MMR + 8)
+#define UV_NMI_PENDING_MASK                    (1UL << 63)
+DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
+static DEFINE_SPINLOCK(uv_nmi_lock);
+
+/*
+ * When NMI is received, print a stack trace.
+ */
+int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+{
+       unsigned long real_uv_nmi;
+       int bid;
+
+       /*
+        * Each blade has an MMR that indicates when an NMI has been sent
+        * to cpus on the blade. If an NMI is detected, atomically
+        * clear the MMR and update a per-blade NMI count used to
+        * cause each cpu on the blade to notice a new NMI.
+        */
+       bid = uv_numa_blade_id();
+       real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
+
+       if (unlikely(real_uv_nmi)) {
+               spin_lock(&uv_blade_info[bid].nmi_lock);
+               real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) &
+                               UV_NMI_PENDING_MASK);
+               if (real_uv_nmi) {
+                       uv_blade_info[bid].nmi_count++;
+                       uv_write_local_mmr(UVH_NMI_MMR_CLEAR,
+                                               UV_NMI_PENDING_MASK);
+               }
+               spin_unlock(&uv_blade_info[bid].nmi_lock);
+       }
+
+       if (likely(__get_cpu_var(cpu_last_nmi_count) ==
+                       uv_blade_info[bid].nmi_count))
+               return NMI_DONE;
+
+       __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
+
+       /*
+        * Use a lock so only one cpu prints at a time.
+        * This prevents intermixed output.
+        */
+       spin_lock(&uv_nmi_lock);
+       pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
+       dump_stack();
+       spin_unlock(&uv_nmi_lock);
+
+       return NMI_HANDLED;
+}
+
+void uv_register_nmi_notifier(void)
+{
+       if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
+               pr_warn("UV NMI handler failed to register\n");
+}
+
+void uv_nmi_init(void)
+{
+       unsigned int value;
+
+       /*
+        * Unmask NMI on all cpus
+        */
+       value = apic_read(APIC_LVT1) | APIC_DM_NMI;
+       value &= ~APIC_LVT_MASKED;
+       apic_write(APIC_LVT1, value);
+}
+

-- 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to