Commit-ID: 45fc8757d1d2128e342b4e7ef39adedf7752faac
Gitweb: http://git.kernel.org/tip/45fc8757d1d2128e342b4e7ef39adedf7752faac
Author: Thomas Garnier
AuthorDate: Tue, 14 Mar 2017 10:05:08 -0700
Committer: Ingo Molnar
CommitDate: Thu, 16 Mar 2017 09:06:35 +0100
x86: Make the GDT remapping read-only on 64-bit
This patch makes the GDT remapped pages read-only, to prevent accidental
(or intentional) corruption of this key data structure.
This change is done only on 64-bit, because 32-bit needs it to be writable
for TSS switches.
The native_load_tr_desc function was adapted to correctly handle a
read-only GDT. The LTR instruction always writes to the GDT TSS entry.
This generates a page fault if the GDT is read-only. This change checks
if the current GDT is a remap and swap GDTs as needed. This function was
tested by booting multiple machines and checking hibernation works
properly.
KVM SVM and VMX were adapted to use the writeable GDT. On VMX, the
per-cpu variable was removed for functions to fetch the original GDT.
Instead of reloading the previous GDT, VMX will reload the fixmap GDT as
expected. For testing, VMs were started and restored on multiple
configurations.
Signed-off-by: Thomas Garnier
Cc: Alexander Potapenko
Cc: Andrew Morton
Cc: Andrey Ryabinin
Cc: Andy Lutomirski
Cc: Ard Biesheuvel
Cc: Boris Ostrovsky
Cc: Borislav Petkov
Cc: Chris Wilson
Cc: Christian Borntraeger
Cc: Dmitry Vyukov
Cc: Frederic Weisbecker
Cc: Jiri Kosina
Cc: Joerg Roedel
Cc: Jonathan Corbet
Cc: Josh Poimboeuf
Cc: Juergen Gross
Cc: Kees Cook
Cc: Len Brown
Cc: Linus Torvalds
Cc: Lorenzo Stoakes
Cc: Luis R . Rodriguez
Cc: Matt Fleming
Cc: Michal Hocko
Cc: Paolo Bonzini
Cc: Paul Gortmaker
Cc: Pavel Machek
Cc: Peter Zijlstra
Cc: Radim Krčmář
Cc: Rafael J . Wysocki
Cc: Rusty Russell
Cc: Stanislaw Gruszka
Cc: Thomas Gleixner
Cc: Tim Chen
Cc: Vitaly Kuznetsov
Cc: kasan-...@googlegroups.com
Cc: kernel-harden...@lists.openwall.com
Cc: k...@vger.kernel.org
Cc: lgu...@lists.ozlabs.org
Cc: linux-...@vger.kernel.org
Cc: linux-...@vger.kernel.org
Cc: linux...@kvack.org
Cc: linux...@vger.kernel.org
Cc: xen-de...@lists.xenproject.org
Cc: zijun_hu
Link: http://lkml.kernel.org/r/20170314170508.100882-3-thgar...@google.com
Signed-off-by: Ingo Molnar
---
arch/x86/include/asm/desc.h | 106 +--
arch/x86/include/asm/processor.h | 1 +
arch/x86/kernel/cpu/common.c | 28 ---
arch/x86/kvm/svm.c | 4 +-
arch/x86/kvm/vmx.c | 12 ++---
5 files changed, 96 insertions(+), 55 deletions(-)
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 4b5ef0c..ec05f9c 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -248,9 +248,77 @@ static inline void native_set_ldt(const void *addr,
unsigned int entries)
}
}
+static inline void native_load_gdt(const struct desc_ptr *dtr)
+{
+ asm volatile("lgdt %0"::"m" (*dtr));
+}
+
+static inline void native_load_idt(const struct desc_ptr *dtr)
+{
+ asm volatile("lidt %0"::"m" (*dtr));
+}
+
+static inline void native_store_gdt(struct desc_ptr *dtr)
+{
+ asm volatile("sgdt %0":"=m" (*dtr));
+}
+
+static inline void native_store_idt(struct desc_ptr *dtr)
+{
+ asm volatile("sidt %0":"=m" (*dtr));
+}
+
+/*
+ * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
+ * a read-only remapping. To prevent a page fault, the GDT is switched to the
+ * original writeable version when needed.
+ */
+#ifdef CONFIG_X86_64
static inline void native_load_tr_desc(void)
{
+ struct desc_ptr gdt;
+ int cpu = raw_smp_processor_id();
+ bool restore = 0;
+ struct desc_struct *fixmap_gdt;
+
+ native_store_gdt();
+ fixmap_gdt = get_cpu_gdt_ro(cpu);
+
+ /*
+* If the current GDT is the read-only fixmap, swap to the original
+* writeable version. Swap back at the end.
+*/
+ if (gdt.address == (unsigned long)fixmap_gdt) {
+ load_direct_gdt(cpu);
+ restore = 1;
+ }
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+ if (restore)
+ load_fixmap_gdt(cpu);
+}
+#else
+static inline void native_load_tr_desc(void)
+{
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+#endif
+
+static inline unsigned long native_store_tr(void)
+{
+ unsigned long tr;
+
+ asm volatile("str %0":"=r" (tr));
+
+ return tr;
+}
+
+static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+{
+ struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
+ unsigned int i;
+
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
DECLARE_PER_CPU(bool, __tss_limit_invalid);
@@ -305,44 +373,6 @@ static inline void invalidate_tss_limit(void)
this_cpu_write(__tss_limit_invalid, true);
}
-static