Linus,

Please pull the latest x86-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
x86-urgent-for-linus

   # HEAD: 88edb57d1e0b262e669c5cad36646dcf5a7f37f5 x86/vdso: Change time() 
prototype to match __vdso_time()

Misc fixes:

 - Make CR4 handling irq-safe, which bug vmware guests ran into
 - Don't crash on early IRQs in Xen guests
 - Don't crash secondary CPU bringup if #UD assisted WARN()ings are triggered
 - Make X86_BUG_FXSAVE_LEAK optional on newer AMD CPUs that have the fix
 - Fix AMD Fam17h microcode loading
 - Fix broadcom_postcore_init() if ACPI is disabled
 - Fix resume regression in __restore_processor_context()
 - Fix Sparse warnings
 - Fix a GCC-8 warning

 Thanks,

        Ingo

------------------>
Andy Lutomirski (1):
      x86/power: Fix some ordering bugs in __restore_processor_context()

Arnd Bergmann (1):
      x86/vdso: Change time() prototype to match __vdso_time()

Chunyu Hu (1):
      x86/idt: Load idt early in start_secondary

Colin Ian King (1):
      x86: Fix Sparse warnings about non-static functions

Juergen Gross (1):
      x86/xen: Support early interrupts in xen pv guests

Nadav Amit (2):
      x86/tlb: Refactor CR4 setting and shadow write
      x86/tlb: Disable interrupts when changing CR4

Rafael J. Wysocki (1):
      x86/PCI: Make broadcom_postcore_init() check acpi_disabled

Rudolf Marek (1):
      x86/cpufeatures: Make X86_BUG_FXSAVE_LEAK detectable in CPUID on AMD

Tom Lendacky (1):
      x86/microcode/AMD: Add support for fam17h microcode loading


 arch/x86/entry/vdso/vclock_gettime.c |  2 +-
 arch/x86/include/asm/cpufeatures.h   |  1 +
 arch/x86/include/asm/segment.h       | 12 ++++++++++++
 arch/x86/include/asm/tlbflush.h      | 35 ++++++++++++++++++----------------
 arch/x86/kernel/apic/vector.c        |  4 ++--
 arch/x86/kernel/cpu/amd.c            |  7 +++++--
 arch/x86/kernel/cpu/microcode/amd.c  |  4 ++++
 arch/x86/kernel/process.c            |  2 +-
 arch/x86/kernel/smpboot.c            |  2 +-
 arch/x86/mm/extable.c                |  4 +++-
 arch/x86/pci/broadcom_bus.c          |  2 +-
 arch/x86/platform/uv/uv_nmi.c        |  4 ++--
 arch/x86/power/cpu.c                 | 21 ++++++++++++++++----
 arch/x86/xen/enlighten_pv.c          | 37 +++++++++++++++++++++++-------------
 arch/x86/xen/xen-asm_64.S            | 14 ++++++++++++++
 15 files changed, 107 insertions(+), 44 deletions(-)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c 
b/arch/x86/entry/vdso/vclock_gettime.c
index 11b13c4b43d5..f19856d95c60 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -324,5 +324,5 @@ notrace time_t __vdso_time(time_t *t)
                *t = result;
        return result;
 }
-int time(time_t *t)
+time_t time(time_t *t)
        __attribute__((weak, alias("__vdso_time")));
diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index c0b0e9e8aa66..800104c8a3ed 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -266,6 +266,7 @@
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired 
Count */
+#define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP 
error pointers */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index b20f9d623f9c..8f09012b92e7 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -236,11 +236,23 @@
  */
 #define EARLY_IDT_HANDLER_SIZE 9
 
+/*
+ * xen_early_idt_handler_array is for Xen pv guests: for each entry in
+ * early_idt_handler_array it contains a prequel in the form of
+ * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
+ * max 8 bytes.
+ */
+#define XEN_EARLY_IDT_HANDLER_SIZE 8
+
 #ifndef __ASSEMBLY__
 
 extern const char 
early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 extern void early_ignore_irq(void);
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
+extern const char 
xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
+#endif
+
 /*
  * Load a segment. Fall back on loading the zero segment if something goes
  * wrong.  This variant assumes that loading zero fully clears the segment.
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 509046cfa5ce..877b5c1a1b12 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -173,40 +173,43 @@ static inline void cr4_init_shadow(void)
        this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
+static inline void __cr4_set(unsigned long cr4)
+{
+       lockdep_assert_irqs_disabled();
+       this_cpu_write(cpu_tlbstate.cr4, cr4);
+       __write_cr4(cr4);
+}
+
 /* Set in this cpu's CR4. */
 static inline void cr4_set_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 | mask) != cr4) {
-               cr4 |= mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 | mask) != cr4)
+               __cr4_set(cr4 | mask);
+       local_irq_restore(flags);
 }
 
 /* Clear in this cpu's CR4. */
 static inline void cr4_clear_bits(unsigned long mask)
 {
-       unsigned long cr4;
+       unsigned long cr4, flags;
 
+       local_irq_save(flags);
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       if ((cr4 & ~mask) != cr4) {
-               cr4 &= ~mask;
-               this_cpu_write(cpu_tlbstate.cr4, cr4);
-               __write_cr4(cr4);
-       }
+       if ((cr4 & ~mask) != cr4)
+               __cr4_set(cr4 & ~mask);
+       local_irq_restore(flags);
 }
 
-static inline void cr4_toggle_bits(unsigned long mask)
+static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
 {
        unsigned long cr4;
 
        cr4 = this_cpu_read(cpu_tlbstate.cr4);
-       cr4 ^= mask;
-       this_cpu_write(cpu_tlbstate.cr4, cr4);
-       __write_cr4(cr4);
+       __cr4_set(cr4 ^ mask);
 }
 
 /* Read the CR4 shadow. */
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 6a823a25eaff..750449152b04 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -542,8 +542,8 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, 
unsigned int virq,
 }
 
 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
-void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
-                          struct irq_data *irqd, int ind)
+static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
+                                 struct irq_data *irqd, int ind)
 {
        unsigned int cpu, vector, prev_cpu, prev_vector;
        struct apic_chip_data *apicd;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d58184b7cd44..bcb75dc97d44 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -804,8 +804,11 @@ static void init_amd(struct cpuinfo_x86 *c)
        case 0x17: init_amd_zn(c); break;
        }
 
-       /* Enable workaround for FXSAVE leak */
-       if (c->x86 >= 6)
+       /*
+        * Enable workaround for FXSAVE leak on CPUs
+        * without a XSaveErPtr feature
+        */
+       if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
                set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
 
        cpu_detect_cache_sizes(c);
diff --git a/arch/x86/kernel/cpu/microcode/amd.c 
b/arch/x86/kernel/cpu/microcode/amd.c
index c6daec4bdba5..330b8462d426 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -470,6 +470,7 @@ static unsigned int verify_patch_size(u8 family, u32 
patch_size,
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
+#define F17H_MPB_MAX_SIZE 3200
 
        switch (family) {
        case 0x14:
@@ -481,6 +482,9 @@ static unsigned int verify_patch_size(u8 family, u32 
patch_size,
        case 0x16:
                max_size = F16H_MPB_MAX_SIZE;
                break;
+       case 0x17:
+               max_size = F17H_MPB_MAX_SIZE;
+               break;
        default:
                max_size = F1XH_MPB_MAX_SIZE;
                break;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 97fb3e5737f5..bb988a24db92 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -299,7 +299,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct 
task_struct *next_p,
        }
 
        if ((tifp ^ tifn) & _TIF_NOTSC)
-               cr4_toggle_bits(X86_CR4_TSD);
+               cr4_toggle_bits_irqsoff(X86_CR4_TSD);
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 3d01df7d7cf6..05a97d5fe298 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -237,7 +237,7 @@ static void notrace start_secondary(void *unused)
        load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 #endif
-
+       load_current_idt();
        cpu_init();
        x86_cpuinit.early_percpu_clock_init();
        preempt_disable();
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 3321b446b66c..88754bfd425f 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -1,6 +1,7 @@
 #include <linux/extable.h>
 #include <linux/uaccess.h>
 #include <linux/sched/debug.h>
+#include <xen/xen.h>
 
 #include <asm/fpu/internal.h>
 #include <asm/traps.h>
@@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int 
trapnr)
         * Old CPUs leave the high bits of CS on the stack
         * undefined.  I'm not sure which CPUs do this, but at least
         * the 486 DX works this way.
+        * Xen pv domains are not using the default __KERNEL_CS.
         */
-       if (regs->cs != __KERNEL_CS)
+       if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
                goto fail;
 
        /*
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index bb461cfd01ab..526536c81ddc 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
         * We should get host bridge information from ACPI unless the BIOS
         * doesn't support it.
         */
-       if (acpi_os_get_root_pointer())
+       if (!acpi_disabled && acpi_os_get_root_pointer())
                return 0;
 #endif
 
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c34bd8233f7c..5f64f30873e2 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -905,7 +905,7 @@ static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs 
*regs, int master)
 /*
  * UV NMI handler
  */
-int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
 {
        struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
        int cpu = smp_processor_id();
@@ -1013,7 +1013,7 @@ void uv_nmi_init(void)
 }
 
 /* Setup HUB NMI info */
-void __init uv_nmi_setup_common(bool hubbed)
+static void __init uv_nmi_setup_common(bool hubbed)
 {
        int size = sizeof(void *) * (1 << NODES_SHIFT);
        int cpu;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 84fcfde53f8f..5191de14f4df 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -226,8 +226,20 @@ static void notrace __restore_processor_state(struct 
saved_context *ctxt)
        load_idt((const struct desc_ptr *)&ctxt->idt_limit);
 #endif
 
+#ifdef CONFIG_X86_64
        /*
-        * segment registers
+        * We need GSBASE restored before percpu access can work.
+        * percpu access can happen in exception handlers or in complicated
+        * helpers like load_gs_index().
+        */
+       wrmsrl(MSR_GS_BASE, ctxt->gs_base);
+#endif
+
+       fix_processor_context();
+
+       /*
+        * Restore segment registers.  This happens after restoring the GDT
+        * and LDT, which happen in fix_processor_context().
         */
 #ifdef CONFIG_X86_32
        loadsegment(es, ctxt->es);
@@ -248,13 +260,14 @@ static void notrace __restore_processor_state(struct 
saved_context *ctxt)
        load_gs_index(ctxt->gs);
        asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
 
+       /*
+        * Restore FSBASE and user GSBASE after reloading the respective
+        * segment selectors.
+        */
        wrmsrl(MSR_FS_BASE, ctxt->fs_base);
-       wrmsrl(MSR_GS_BASE, ctxt->gs_base);
        wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
 #endif
 
-       fix_processor_context();
-
        do_fpu_end();
        tsc_verify_tsc_adjust(true);
        x86_platform.restore_sched_clock_state();
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5b2b3f3f6531..f2414c6c5e7c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = {
        { simd_coprocessor_error,      xen_simd_coprocessor_error,      false },
 };
 
-static bool get_trap_addr(void **addr, unsigned int ist)
+static bool __ref get_trap_addr(void **addr, unsigned int ist)
 {
        unsigned int nr;
        bool ist_okay = false;
@@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
                }
        }
 
+       if (nr == ARRAY_SIZE(trap_array) &&
+           *addr >= (void *)early_idt_handler_array[0] &&
+           *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
+               nr = (*addr - (void *)early_idt_handler_array[0]) /
+                    EARLY_IDT_HANDLER_SIZE;
+               *addr = (void *)xen_early_idt_handler_array[nr];
+       }
+
        if (WARN_ON(ist != 0 && !ist_okay))
                return false;
 
@@ -1262,6 +1270,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
        xen_setup_gdt(0);
 
        xen_init_irq_ops();
+
+       /* Let's presume PV guests always boot on vCPU with id 0. */
+       per_cpu(xen_vcpu_id, 0) = 0;
+
+       /*
+        * Setup xen_vcpu early because idt_setup_early_handler needs it for
+        * local_irq_disable(), irqs_disabled().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
+
+       idt_setup_early_handler();
+
        xen_init_capabilities();
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -1295,18 +1318,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
         */
        acpi_numa = -1;
 #endif
-       /* Let's presume PV guests always boot on vCPU with id 0. */
-       per_cpu(xen_vcpu_id, 0) = 0;
-
-       /*
-        * Setup xen_vcpu early because start_kernel needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
 
        local_irq_disable();
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 8a10c9a9e2b5..417b339e5c8e 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -15,6 +15,7 @@
 
 #include <xen/interface/xen.h>
 
+#include <linux/init.h>
 #include <linux/linkage.h>
 
 .macro xen_pv_trap name
@@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
 #endif
 xen_pv_trap hypervisor_callback
 
+       __INIT
+ENTRY(xen_early_idt_handler_array)
+       i = 0
+       .rept NUM_EXCEPTION_VECTORS
+       pop %rcx
+       pop %r11
+       jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
+       i = i + 1
+       .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 
1, 0xcc
+       .endr
+END(xen_early_idt_handler_array)
+       __FINIT
+
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
  * Xen64 iret frame:

Reply via email to