This is a preparation for make PAT(Page Attribute Table) independent
from MTRR(Memory Type Range Register).
It renames prefix of common functions in mtrr/generic.c from mtrr_ to
mtrr_pat_ which are commonly used by both MTRR and PAT and moves out
them from mtrr/generic.c to rendezvous.c.
Only prefix rename and movement, no logic change.

Signed-off-by: Isaku Yamahata <isaku.yamah...@gmail.com>
---
 arch/x86/include/asm/mtrr.h           |   4 +
 arch/x86/kernel/cpu/mtrr/generic.c    | 111 ++------------------------
 arch/x86/kernel/cpu/mtrr/mtrr.c       |   2 +-
 arch/x86/kernel/cpu/mtrr/mtrr.h       |   3 +-
 arch/x86/kernel/cpu/mtrr/rendezvous.c |  91 +++++++++++++++++++++
 5 files changed, 106 insertions(+), 105 deletions(-)

diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index d90e87c55302..5b056374f5a6 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -33,6 +33,8 @@
  */
 # ifdef CONFIG_MTRR
 extern bool mtrr_enabled(void);
+extern void mtrr_pat_prepare_set(void) __acquires(set_atomicity_lock);
+extern void mtrr_pat_post_set(void) __releases(set_atomicity_lock);
 extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
 extern void mtrr_save_fixed_ranges(void *);
 extern void mtrr_save_state(void);
@@ -55,6 +57,8 @@ static inline bool mtrr_enabled(void)
 {
        return false;
 }
+static inline void mtrr_pat_prepare_set(void) { };
+static inline void mtrr_pat_post_set(void) { };
 static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
 {
        /*
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c 
b/arch/x86/kernel/cpu/mtrr/generic.c
index aa5c064a6a22..a44f05f64846 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -397,9 +397,6 @@ print_fixed(unsigned base, unsigned step, const mtrr_type 
*types)
        }
 }
 
-static void prepare_set(void);
-static void post_set(void);
-
 static void __init print_mtrr_state(void)
 {
        unsigned int i;
@@ -445,20 +442,6 @@ static void __init print_mtrr_state(void)
                pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
 }
 
-/* PAT setup for BP. We need to go through sync steps here */
-void __init mtrr_bp_pat_init(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       prepare_set();
-
-       pat_init();
-
-       post_set();
-       local_irq_restore(flags);
-}
-
 /* Grab all of the MTRR state for this CPU into *state */
 bool __init get_mtrr_state(void)
 {
@@ -680,8 +663,6 @@ static bool set_mtrr_var_ranges(unsigned int index, struct 
mtrr_var_range *vr)
        return changed;
 }
 
-static u32 deftype_lo, deftype_hi;
-
 /**
  * set_mtrr_state - Set the MTRR state for this CPU.
  *
@@ -705,100 +686,24 @@ static unsigned long set_mtrr_state(void)
         * Set_mtrr_restore restores the old value of MTRRdefType,
         * so to set it we fiddle with the saved value:
         */
-       if ((deftype_lo & 0xff) != mtrr_state.def_type
-           || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
+       if ((mtrr_deftype_lo & 0xff) != mtrr_state.def_type
+           || ((mtrr_deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
 
-               deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
-                            (mtrr_state.enabled << 10);
+               mtrr_deftype_lo = (mtrr_deftype_lo & ~0xcff) |
+                       mtrr_state.def_type | (mtrr_state.enabled << 10);
                change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
        }
 
        return change_mask;
 }
 
-
-static unsigned long cr4;
-static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
-
-/*
- * Since we are disabling the cache don't allow any interrupts,
- * they would run extremely slow and would only increase the pain.
- *
- * The caller must ensure that local interrupts are disabled and
- * are reenabled after post_set() has been called.
- */
-static void prepare_set(void) __acquires(set_atomicity_lock)
-{
-       unsigned long cr0;
-
-       /*
-        * Note that this is not ideal
-        * since the cache is only flushed/disabled for this CPU while the
-        * MTRRs are changed, but changing this requires more invasive
-        * changes to the way the kernel boots
-        */
-
-       raw_spin_lock(&set_atomicity_lock);
-
-       /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
-       cr0 = read_cr0() | X86_CR0_CD;
-       write_cr0(cr0);
-
-       /*
-        * Cache flushing is the most time-consuming step when programming
-        * the MTRRs. Fortunately, as per the Intel Software Development
-        * Manual, we can skip it if the processor supports cache self-
-        * snooping.
-        */
-       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-               wbinvd();
-
-       /* Save value of CR4 and clear Page Global Enable (bit 7) */
-       if (boot_cpu_has(X86_FEATURE_PGE)) {
-               cr4 = __read_cr4();
-               __write_cr4(cr4 & ~X86_CR4_PGE);
-       }
-
-       /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       __flush_tlb();
-
-       /* Save MTRR state */
-       rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
-       /* Disable MTRRs, and set the default type to uncached */
-       mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
-
-       /* Again, only flush caches if we have to. */
-       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
-               wbinvd();
-}
-
-static void post_set(void) __releases(set_atomicity_lock)
-{
-       /* Flush TLBs (no need to flush caches - they are disabled) */
-       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
-       __flush_tlb();
-
-       /* Intel (P6) standard MTRRs */
-       mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
-       /* Enable caches */
-       write_cr0(read_cr0() & ~X86_CR0_CD);
-
-       /* Restore value of CR4 */
-       if (boot_cpu_has(X86_FEATURE_PGE))
-               __write_cr4(cr4);
-       raw_spin_unlock(&set_atomicity_lock);
-}
-
 static void generic_set_all(void)
 {
        unsigned long mask, count;
        unsigned long flags;
 
        local_irq_save(flags);
-       prepare_set();
+       mtrr_pat_prepare_set();
 
        /* Actually set the state */
        mask = set_mtrr_state();
@@ -806,7 +711,7 @@ static void generic_set_all(void)
        /* also set PAT */
        pat_init();
 
-       post_set();
+       mtrr_pat_post_set();
        local_irq_restore(flags);
 
        /* Use the atomic bitops to update the global mask */
@@ -837,7 +742,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned 
long base,
        vr = &mtrr_state.var_ranges[reg];
 
        local_irq_save(flags);
-       prepare_set();
+       mtrr_pat_prepare_set();
 
        if (size == 0) {
                /*
@@ -856,7 +761,7 @@ static void generic_set_mtrr(unsigned int reg, unsigned 
long base,
                mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
        }
 
-       post_set();
+       mtrr_pat_post_set();
        local_irq_restore(flags);
 }
 
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 3d35edb1aa42..475627ca2c1b 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -646,7 +646,7 @@ void __init mtrr_pat_bp_init(void)
                        __mtrr_enabled = get_mtrr_state();
 
                        if (mtrr_enabled())
-                               mtrr_bp_pat_init();
+                               pat_bp_init();
 
                        if (mtrr_cleanup(phys_addr)) {
                                changed_by_mtrr_cleanup = 1;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index e9aeeeac9a3e..57948b651b8e 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -53,7 +53,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
 void fill_mtrr_var_range(unsigned int index,
                u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
 bool get_mtrr_state(void);
-void mtrr_bp_pat_init(void);
+void pat_bp_init(void);
 
 extern void __init set_mtrr_ops(const struct mtrr_ops *ops);
 
@@ -76,6 +76,7 @@ void set_mtrr_pat(unsigned int reg, unsigned long base,
                  unsigned long size, mtrr_type type);
 void set_mtrr_pat_cpuslocked(unsigned int reg, unsigned long base,
                             unsigned long size, mtrr_type type);
+extern u32 mtrr_deftype_lo, mtrr_deftype_hi;
 
 /* CPU specific mtrr init functions */
 int amd_init_mtrr(void);
diff --git a/arch/x86/kernel/cpu/mtrr/rendezvous.c 
b/arch/x86/kernel/cpu/mtrr/rendezvous.c
index 5448eea573df..d902b9e5cc17 100644
--- a/arch/x86/kernel/cpu/mtrr/rendezvous.c
+++ b/arch/x86/kernel/cpu/mtrr/rendezvous.c
@@ -33,14 +33,105 @@
 #define DEBUG
 
 #include <linux/stop_machine.h>
+#include <linux/vmstat.h>
 
 #include <asm/mtrr.h>
 #include <asm/msr.h>
 #include <asm/pat.h>
+#include <asm/tlbflush.h>
 
 #include "mtrr.h"
 
 static bool mtrr_pat_aps_delayed_init;
+u32 mtrr_deftype_lo, mtrr_deftype_hi;
+static unsigned long cr4;
+static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
+
+/* PAT setup for BP. We need to go through sync steps here */
+void __init pat_bp_init(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       mtrr_pat_prepare_set();
+
+       pat_init();
+
+       mtrr_pat_post_set();
+       local_irq_restore(flags);
+}
+
+/*
+ * Since we are disabling the cache don't allow any interrupts,
+ * they would run extremely slow and would only increase the pain.
+ *
+ * The caller must ensure that local interrupts are disabled and
+ * are reenabled after mtrr_pat_post_set() has been called.
+ */
+void mtrr_pat_prepare_set(void) __acquires(set_atomicity_lock)
+{
+       unsigned long cr0;
+
+       /*
+        * Note that this is not ideal
+        * since the cache is only flushed/disabled for this CPU while the
+        * MTRRs are changed, but changing this requires more invasive
+        * changes to the way the kernel boots
+        */
+
+       raw_spin_lock(&set_atomicity_lock);
+
+       /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
+       cr0 = read_cr0() | X86_CR0_CD;
+       write_cr0(cr0);
+
+       /*
+        * Cache flushing is the most time-consuming step when programming
+        * the MTRRs. Fortunately, as per the Intel Software Development
+        * Manual, we can skip it if the processor supports cache self-
+        * snooping.
+        */
+       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+               wbinvd();
+
+       /* Save value of CR4 and clear Page Global Enable (bit 7) */
+       if (boot_cpu_has(X86_FEATURE_PGE)) {
+               cr4 = __read_cr4();
+               __write_cr4(cr4 & ~X86_CR4_PGE);
+       }
+
+       /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       __flush_tlb();
+
+       /* Save MTRR state */
+       rdmsr(MSR_MTRRdefType, mtrr_deftype_lo, mtrr_deftype_hi);
+
+       /* Disable MTRRs, and set the default type to uncached */
+       mtrr_wrmsr(MSR_MTRRdefType, mtrr_deftype_lo & ~0xcff, mtrr_deftype_hi);
+
+       /* Again, only flush caches if we have to. */
+       if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
+               wbinvd();
+}
+
+void mtrr_pat_post_set(void) __releases(set_atomicity_lock)
+{
+       /* Flush TLBs (no need to flush caches - they are disabled) */
+       count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+       __flush_tlb();
+
+       /* Intel (P6) standard MTRRs */
+       mtrr_wrmsr(MSR_MTRRdefType, mtrr_deftype_lo, mtrr_deftype_hi);
+
+       /* Enable caches */
+       write_cr0(read_cr0() & ~X86_CR0_CD);
+
+       /* Restore value of CR4 */
+       if (boot_cpu_has(X86_FEATURE_PGE))
+               __write_cr4(cr4);
+       raw_spin_unlock(&set_atomicity_lock);
+}
 
 struct set_mtrr_data {
        unsigned long   smp_base;
-- 
2.17.1

Reply via email to