Commit-ID:  99c13b8c8896d7bcb92753bf0c63a8de4326e78d
Gitweb:     http://git.kernel.org/tip/99c13b8c8896d7bcb92753bf0c63a8de4326e78d
Author:     Mikulas Patocka <mpato...@redhat.com>
AuthorDate: Tue, 4 Jul 2017 19:04:23 -0400
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Wed, 5 Jul 2017 09:01:24 +0200

x86/mm/pat: Don't report PAT on CPUs that don't support it

The pat_enabled() logic is broken on CPUs which do not support PAT and
where the initialization code fails to call pat_init(). Due to that the
enabled flag stays true and pat_enabled() returns true wrongfully.

As a consequence the mappings, e.g. for Xorg, are set up with the wrong
caching mode and the required MTRR setups are omitted.

To cure this the following changes are required:

  1) Make pat_enabled() return true only if PAT initialization was
     invoked and successful.

  2) Invoke init_cache_modes() unconditionally in setup_arch() and
     remove the extra callsites in pat_disable() and the pat disabled
     code path in pat_init().

Also rename __pat_enabled to pat_disabled to reflect the real purpose of
this variable.

Fixes: 9cd25aac1f44 ("x86/mm/pat: Emulate PAT when it is disabled")
Signed-off-by: Mikulas Patocka <mpato...@redhat.com>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Cc: Bernhard Held <berny...@gmx.de>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Brian Gerst <brge...@gmail.com>
Cc: "Luis R. Rodriguez" <mcg...@suse.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: sta...@vger.kernel.org
Link: 
http://lkml.kernel.org/r/alpine.lrh.2.02.1707041749300.3...@file01.intranet.prod.int.rdu2.redhat.com

---
 arch/x86/include/asm/pat.h |  1 +
 arch/x86/kernel/setup.c    |  7 +++++++
 arch/x86/mm/pat.c          | 28 ++++++++++++----------------
 3 files changed, 20 insertions(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 0b1ff4c..fffb279 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -7,6 +7,7 @@
 bool pat_enabled(void);
 void pat_disable(const char *reason);
 extern void pat_init(void);
+extern void init_cache_modes(void);
 
 extern int reserve_memtype(u64 start, u64 end,
                enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 65622f0..3486d04 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1076,6 +1076,13 @@ void __init setup_arch(char **cmdline_p)
        max_possible_pfn = max_pfn;
 
        /*
+        * This call is required when the CPU does not support PAT. If
+        * mtrr_bp_init() invoked it already via pat_init() the call has no
+        * effect.
+        */
+       init_cache_modes();
+
+       /*
         * Define random base addresses for memory sections after max_pfn is
         * defined and before each memory section base is used.
         */
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 9b78685..4597950 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -37,14 +37,14 @@
 #undef pr_fmt
 #define pr_fmt(fmt) "" fmt
 
-static bool boot_cpu_done;
-
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
-static void init_cache_modes(void);
+static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __read_mostly pat_initialized;
+static bool __read_mostly init_cm_done;
 
 void pat_disable(const char *reason)
 {
-       if (!__pat_enabled)
+       if (pat_disabled)
                return;
 
        if (boot_cpu_done) {
@@ -52,10 +52,8 @@ void pat_disable(const char *reason)
                return;
        }
 
-       __pat_enabled = 0;
+       pat_disabled = true;
        pr_info("x86/PAT: %s\n", reason);
-
-       init_cache_modes();
 }
 
 static int __init nopat(char *str)
@@ -67,7 +65,7 @@ early_param("nopat", nopat);
 
 bool pat_enabled(void)
 {
-       return !!__pat_enabled;
+       return pat_initialized;
 }
 EXPORT_SYMBOL_GPL(pat_enabled);
 
@@ -205,6 +203,8 @@ static void __init_cache_modes(u64 pat)
                update_cache_mode_entry(i, cache);
        }
        pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
+
+       init_cm_done = true;
 }
 
 #define PAT(x, y)      ((u64)PAT_ ## y << ((x)*8))
@@ -225,6 +225,7 @@ static void pat_bsp_init(u64 pat)
        }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
+       pat_initialized = true;
 
        __init_cache_modes(pat);
 }
@@ -242,10 +243,9 @@ static void pat_ap_init(u64 pat)
        wrmsrl(MSR_IA32_CR_PAT, pat);
 }
 
-static void init_cache_modes(void)
+void init_cache_modes(void)
 {
        u64 pat = 0;
-       static int init_cm_done;
 
        if (init_cm_done)
                return;
@@ -287,8 +287,6 @@ static void init_cache_modes(void)
        }
 
        __init_cache_modes(pat);
-
-       init_cm_done = 1;
 }
 
 /**
@@ -306,10 +304,8 @@ void pat_init(void)
        u64 pat;
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
-       if (!pat_enabled()) {
-               init_cache_modes();
+       if (pat_disabled)
                return;
-       }
 
        if ((c->x86_vendor == X86_VENDOR_INTEL) &&
            (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||

Reply via email to