diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu 
b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 258902db14bf..8355e79350b7 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -378,6 +378,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/meltdown
                /sys/devices/system/cpu/vulnerabilities/spectre_v1
                /sys/devices/system/cpu/vulnerabilities/spectre_v2
+               /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
index 8cfb44ffe853..0380a45ecf4b 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2600,6 +2600,9 @@
                        allow data leaks with this option, which is equivalent
                        to spectre_v2=off.
 
+       nospec_store_bypass_disable
+                       [HW] Disable all mitigations for the Speculative Store 
Bypass vulnerability
+
        noxsave         [BUGS=X86] Disables x86 extended register state save
                        and restore using xsave. The kernel will fallback to
                        enabling legacy floating-point and sse state.
@@ -3930,6 +3933,48 @@
                        Not specifying this option is equivalent to
                        spectre_v2=auto.
 
+       spec_store_bypass_disable=
+                       [HW] Control Speculative Store Bypass (SSB) Disable 
mitigation
+                       (Speculative Store Bypass vulnerability)
+
+                       Certain CPUs are vulnerable to an exploit against a
+                       a common industry wide performance optimization known
+                       as "Speculative Store Bypass" in which recent stores
+                       to the same memory location may not be observed by
+                       later loads during speculative execution. The idea
+                       is that such stores are unlikely and that they can
+                       be detected prior to instruction retirement at the
+                       end of a particular speculation execution window.
+
+                       In vulnerable processors, the speculatively forwarded
+                       store can be used in a cache side channel attack, for
+                       example to read memory to which the attacker does not
+                       directly have access (e.g. inside sandboxed code).
+
+                       This parameter controls whether the Speculative Store
+                       Bypass optimization is used.
+
+                       on      - Unconditionally disable Speculative Store 
Bypass
+                       off     - Unconditionally enable Speculative Store 
Bypass
+                       auto    - Kernel detects whether the CPU model contains 
an
+                                 implementation of Speculative Store Bypass and
+                                 picks the most appropriate mitigation. If the
+                                 CPU is not vulnerable, "off" is selected. If 
the
+                                 CPU is vulnerable the default mitigation is
+                                 architecture and Kconfig dependent. See below.
+                       prctl   - Control Speculative Store Bypass per thread
+                                 via prctl. Speculative Store Bypass is enabled
+                                 for a process by default. The state of the 
control
+                                 is inherited on fork.
+                       seccomp - Same as "prctl" above, but all seccomp threads
+                                 will disable SSB unless they explicitly opt 
out.
+
+                       Not specifying this option is equivalent to
+                       spec_store_bypass_disable=auto.
+
+                       Default mitigations:
+                       X86:    If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+
        spia_io_base=   [HW,MTD]
        spia_fio_base=
        spia_pedr=
diff --git a/Documentation/userspace-api/index.rst 
b/Documentation/userspace-api/index.rst
index 7b2eb1b7d4ca..a3233da7fa88 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -19,6 +19,7 @@ place where this information is gathered.
    no_new_privs
    seccomp_filter
    unshare
+   spec_ctrl
 
 .. only::  subproject and html
 
diff --git a/Documentation/userspace-api/spec_ctrl.rst 
b/Documentation/userspace-api/spec_ctrl.rst
new file mode 100644
index 000000000000..32f3d55c54b7
--- /dev/null
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -0,0 +1,94 @@
+===================
+Speculation Control
+===================
+
+Quite some CPUs have speculation-related misfeatures which are in
+fact vulnerabilities causing data leaks in various forms even across
+privilege domains.
+
+The kernel provides mitigation for such vulnerabilities in various
+forms. Some of these mitigations are compile-time configurable and some
+can be supplied on the kernel command line.
+
+There is also a class of mitigations which are very expensive, but they can
+be restricted to a certain set of processes or tasks in controlled
+environments. The mechanism to control these mitigations is via
+:manpage:`prctl(2)`.
+
+There are two prctl options which are related to this:
+
+ * PR_GET_SPECULATION_CTRL
+
+ * PR_SET_SPECULATION_CTRL
+
+PR_GET_SPECULATION_CTRL
+-----------------------
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+the following meaning:
+
+==== ===================== ===================================================
+Bit  Define                Description
+==== ===================== ===================================================
+0    PR_SPEC_PRCTL         Mitigation can be controlled per task by
+                           PR_SET_SPECULATION_CTRL.
+1    PR_SPEC_ENABLE        The speculation feature is enabled, mitigation is
+                           disabled.
+2    PR_SPEC_DISABLE       The speculation feature is disabled, mitigation is
+                           enabled.
+3    PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+                           subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+==== ===================== ===================================================
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL
+-----------------------
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
+PR_SPEC_FORCE_DISABLE.
+
+Common error codes
+------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+EINVAL  The prctl is not implemented by the architecture or unused
+        prctl(2) arguments are not 0.
+
+ENODEV  arg2 is selecting a not supported speculation misfeature.
+======= =================================================================
+
+PR_SET_SPECULATION_CTRL error codes
+-----------------------------------
+======= =================================================================
+Value   Meaning
+======= =================================================================
+0       Success
+
+ERANGE  arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+        PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
+
+ENXIO   Control of the selected speculation misfeature is not possible.
+        See PR_GET_SPECULATION_CTRL.
+
+EPERM   Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
+        tried to enable it again.
+======= =================================================================
+
+Speculation misfeature controls
+-------------------------------
+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 
0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 
0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 
PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Makefile b/Makefile
index 777f5685a36b..9be88c9d9fc9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 14
-SUBLEVEL = 42
+SUBLEVEL = 43
 EXTRAVERSION =
 NAME = Petit Gorille
 
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index bc8d4bbd82e2..9342904cccca 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -536,4 +536,14 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
 #endif
        .endm
 
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry)                           \
+       .pushsection "_kprobe_blacklist", "aw" ;        \
+       .balign 4 ;                                     \
+       .long entry;                                    \
+       .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
 #endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index eb46fc81a440..08cd720eae01 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -221,6 +221,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+                                     gpa_t gpa, void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_read_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 static inline void *kvm_get_hyp_vector(void)
 {
        return kvm_ksym_ref(__kvm_hyp_vector);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index b8dc3b516f93..f702f2b37052 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
 #include <linux/kdebug.h>
+#include <linux/kprobes.h>
 #include <linux/module.h>
 #include <linux/kexec.h>
 #include <linux/bug.h>
@@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
        raw_spin_unlock_irqrestore(&undef_lock, flags);
 }
 
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 {
        struct undef_hook *hook;
        unsigned long flags;
@@ -490,6 +492,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs 
*regs)
 
        arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 }
+NOKPROBE_SYMBOL(do_undefinstr)
 
 /*
  * Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914e81c8..746e7801dcdf 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_1)
+_ASM_NOKPROBE(__get_user_1)
 
 ENTRY(__get_user_2)
        check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@ rb    .req    r0
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_2)
+_ASM_NOKPROBE(__get_user_2)
 
 ENTRY(__get_user_4)
        check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_4)
+_ASM_NOKPROBE(__get_user_4)
 
 ENTRY(__get_user_8)
        check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_8)
+_ASM_NOKPROBE(__get_user_8)
 
 #ifdef __ARMEB__
 ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_32t_8)
+_ASM_NOKPROBE(__get_user_32t_8)
 
 ENTRY(__get_user_64t_1)
        check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_64t_1)
+_ASM_NOKPROBE(__get_user_64t_1)
 
 ENTRY(__get_user_64t_2)
        check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@ rb  .req    r0
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_64t_2)
+_ASM_NOKPROBE(__get_user_64t_2)
 
 ENTRY(__get_user_64t_4)
        check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
        mov     r0, #0
        ret     lr
 ENDPROC(__get_user_64t_4)
+_ASM_NOKPROBE(__get_user_64t_4)
 #endif
 
 __get_user_bad8:
@@ -131,6 +139,8 @@ __get_user_bad:
        ret     lr
 ENDPROC(__get_user_bad)
 ENDPROC(__get_user_bad8)
+_ASM_NOKPROBE(__get_user_bad)
+_ASM_NOKPROBE(__get_user_bad8)
 
 .pushsection __ex_table, "a"
        .long   1b, __get_user_bad
diff --git a/arch/arm/probes/kprobes/opt-arm.c 
b/arch/arm/probes/kprobes/opt-arm.c
index bcdecc25461b..b2aa9b32bff2 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
 {
        unsigned long flags;
        struct kprobe *p = &op->kp;
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       struct kprobe_ctlblk *kcb;
 
        /* Save skipped registers */
        regs->ARM_pc = (unsigned long)op->kp.addr;
        regs->ARM_ORIG_r0 = ~0UL;
 
        local_irq_save(flags);
+       kcb = get_kprobe_ctlblk();
 
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct 
pt_regs *regs)
 
        local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(optimized_callback)
 
 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe 
*orig)
 {
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 2d6d4bd9de52..fe55b516f018 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return (cpuid_feature_extract_unsigned_field(reg, 
ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
+/*
+ * We are not in the kvm->srcu critical section most of the time, so we take
+ * the SRCU read lock here. Since we copy the data from the user page, we
+ * can immediately drop the lock again.
+ */
+static inline int kvm_read_guest_lock(struct kvm *kvm,
+                                     gpa_t gpa, void *data, unsigned long len)
+{
+       int srcu_idx = srcu_read_lock(&kvm->srcu);
+       int ret = kvm_read_guest(kvm, gpa, data, len);
+
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+       return ret;
+}
+
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 #include <asm/mmu.h>
 
diff --git a/arch/powerpc/kernel/setup-common.c 
b/arch/powerpc/kernel/setup-common.c
index 90bc20efb4c7..b4fcb54b9686 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -242,14 +242,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        unsigned short maj;
        unsigned short min;
 
-       /* We only show online cpus: disable preempt (overzealous, I
-        * knew) to prevent cpu going down. */
-       preempt_disable();
-       if (!cpu_online(cpu_id)) {
-               preempt_enable();
-               return 0;
-       }
-
 #ifdef CONFIG_SMP
        pvr = per_cpu(cpu_pvr, cpu_id);
 #else
@@ -358,9 +350,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #ifdef CONFIG_SMP
        seq_printf(m, "\n");
 #endif
-
-       preempt_enable();
-
        /* If this is the last cpu, print the summary */
        if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
                show_cpuinfo_summary(m);
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c 
b/arch/powerpc/platforms/powernv/opal-nvram.c
index 1bceb95f422d..5584247f5029 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, 
loff_t *index)
        return count;
 }
 
+/*
+ * This can be called in the panic path with interrupts off, so use
+ * mdelay in that case.
+ */
 static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
 {
        s64 rc = OPAL_BUSY;
@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, 
loff_t *index)
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
                rc = opal_write_nvram(__pa(buf), count, off);
                if (rc == OPAL_BUSY_EVENT) {
-                       msleep(OPAL_BUSY_DELAY_MS);
+                       if (in_interrupt() || irqs_disabled())
+                               mdelay(OPAL_BUSY_DELAY_MS);
+                       else
+                               msleep(OPAL_BUSY_DELAY_MS);
                        opal_poll_events(NULL);
                } else if (rc == OPAL_BUSY) {
-                       msleep(OPAL_BUSY_DELAY_MS);
+                       if (in_interrupt() || irqs_disabled())
+                               mdelay(OPAL_BUSY_DELAY_MS);
+                       else
+                               msleep(OPAL_BUSY_DELAY_MS);
                }
        }
 
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 94f2099bceb0..3d17c41074ca 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
                new -= STACK_FRAME_OVERHEAD;
                ((struct stack_frame *) new)->back_chain = old;
                asm volatile("   la    15,0(%0)\n"
-                            "   basr  14,%2\n"
+                            "   brasl 14,__do_softirq\n"
                             "   la    15,0(%1)\n"
-                            : : "a" (new), "a" (old),
-                                "a" (__do_softirq)
+                            : : "a" (new), "a" (old)
                             : "0", "1", "2", "3", "4", "5", "14",
                               "cc", "memory" );
        } else {
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 7e1e40323b78..d99155793c26 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -739,6 +739,10 @@ static int __hw_perf_event_init(struct perf_event *event)
         */
        rate = 0;
        if (attr->freq) {
+               if (!attr->sample_freq) {
+                       err = -EINVAL;
+                       goto out;
+               }
                rate = freq_to_sample_rate(&si, attr->sample_freq);
                rate = hw_limit_rate(&si, rate);
                attr->freq = 0;
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e56dbc67e837..97c57b5f8d57 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct 
pci_setup_rom **__rom)
        if (status != EFI_SUCCESS)
                goto free_struct;
 
-       memcpy(rom->romdata, pci->romimage, pci->romsize);
+       memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+              pci->romsize);
        return status;
 
 free_struct:
@@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct 
pci_setup_rom **__rom)
        if (status != EFI_SUCCESS)
                goto free_struct;
 
-       memcpy(rom->romdata, pci->romimage, pci->romsize);
+       memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+              pci->romsize);
        return status;
 
 free_struct:
diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
index 23a65439c37c..403e97d5e243 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -198,7 +198,6 @@
 #define X86_FEATURE_CAT_L2             ( 7*32+ 5) /* Cache Allocation 
Technology L2 */
 #define X86_FEATURE_CDP_L3             ( 7*32+ 6) /* Code and Data 
Prioritization L3 */
 #define X86_FEATURE_INVPCID_SINGLE     ( 7*32+ 7) /* Effectively INVPCID && 
CR4.PCIDE=1 */
-
 #define X86_FEATURE_HW_PSTATE          ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK      ( 7*32+ 9) /* AMD ProcFeedbackInterface 
*/
 #define X86_FEATURE_SME                        ( 7*32+10) /* AMD Secure Memory 
Encryption */
@@ -207,11 +206,19 @@
 #define X86_FEATURE_RETPOLINE_AMD      ( 7*32+13) /* "" AMD Retpoline 
mitigation for Spectre variant 2 */
 #define X86_FEATURE_INTEL_PPIN         ( 7*32+14) /* Intel Processor Inventory 
Number */
 
+#define X86_FEATURE_MSR_SPEC_CTRL      ( 7*32+16) /* "" MSR SPEC_CTRL is 
implemented */
+#define X86_FEATURE_SSBD               ( 7*32+17) /* Speculative Store Bypass 
Disable */
 #define X86_FEATURE_MBA                        ( 7*32+18) /* Memory Bandwidth 
Allocation */
 #define X86_FEATURE_RSB_CTXSW          ( 7*32+19) /* "" Fill RSB on context 
switches */
 
 #define X86_FEATURE_USE_IBPB           ( 7*32+21) /* "" Indirect Branch 
Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW                ( 7*32+22) /* "" Use IBRS 
during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE  ( 7*32+23) /* "" Disable 
Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD                ( 7*32+24)  /* "" AMD SSBD 
implementation via LS_CFG MSR */
+#define X86_FEATURE_IBRS               ( 7*32+25) /* Indirect Branch 
Restricted Speculation */
+#define X86_FEATURE_IBPB               ( 7*32+26) /* Indirect Branch 
Prediction Barrier */
+#define X86_FEATURE_STIBP              ( 7*32+27) /* Single Thread Indirect 
Branch Predictors */
+#define X86_FEATURE_ZEN                        ( 7*32+28) /* "" CPU is AMD 
family 0x17 (Zen) */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
@@ -272,9 +279,10 @@
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_IRPERF             (13*32+ 1) /* Instructions Retired 
Count */
 #define X86_FEATURE_XSAVEERPTR         (13*32+ 2) /* Always save/restore FP 
error pointers */
-#define X86_FEATURE_IBPB               (13*32+12) /* Indirect Branch 
Prediction Barrier */
-#define X86_FEATURE_IBRS               (13*32+14) /* Indirect Branch 
Restricted Speculation */
-#define X86_FEATURE_STIBP              (13*32+15) /* Single Thread Indirect 
Branch Predictors */
+#define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch 
Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch 
Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect 
Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative 
Store Bypass Disable */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
@@ -331,6 +339,7 @@
 #define X86_FEATURE_SPEC_CTRL          (18*32+26) /* "" Speculation Control 
(IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP                (18*32+27) /* "" Single Thread 
Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES  (18*32+29) /* IA32_ARCH_CAPABILITIES 
MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD     (18*32+31) /* "" Speculative Store 
Bypass Disable */
 
 /*
  * BUG word(s)
@@ -360,5 +369,6 @@
 #define X86_BUG_CPU_MELTDOWN           X86_BUG(14) /* CPU is affected by 
meltdown attack and needs kernel page table isolation */
 #define X86_BUG_SPECTRE_V1             X86_BUG(15) /* CPU is affected by 
Spectre variant 1 attack with conditional branches */
 #define X86_BUG_SPECTRE_V2             X86_BUG(16) /* CPU is affected by 
Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS      X86_BUG(17) /* CPU is affected by 
speculative store bypass attack */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4f8b80199672..174b9c41efce 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -908,7 +908,7 @@ struct kvm_x86_ops {
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
        bool (*cpu_has_accelerated_tpr)(void);
-       bool (*cpu_has_high_real_mode_segbase)(void);
+       bool (*has_emulated_msr)(int index);
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
        int (*vm_init)(struct kvm *kvm);
diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
index 1de72ce514cd..ed97ef3b48a7 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -192,7 +192,7 @@ static inline int init_new_context(struct task_struct *tsk,
 
 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
        if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
-               /* pkey 0 is the default and always allocated */
+               /* pkey 0 is the default and allocated implicitly */
                mm->context.pkey_allocation_map = 0x1;
                /* -1 means unallocated or invalid */
                mm->context.execute_only_pkey = -1;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index eb83ff1bae8f..504b21692d32 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,6 +42,8 @@
 #define MSR_IA32_SPEC_CTRL             0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS                 (1 << 0)   /* Indirect Branch 
Restricted Speculation */
 #define SPEC_CTRL_STIBP                        (1 << 1)   /* Single Thread 
Indirect Branch Predictors */
+#define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass 
Disable bit */
+#define SPEC_CTRL_SSBD                 (1 << SPEC_CTRL_SSBD_SHIFT)   /* 
Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  (1 << 0)   /* Indirect Branch 
Prediction Barrier */
@@ -68,6 +70,11 @@
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               (1 << 0)   /* Not susceptible to 
Meltdown */
 #define ARCH_CAP_IBRS_ALL              (1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SSB_NO                        (1 << 4)   /*
+                                                   * Not susceptible to 
Speculative Store Bypass
+                                                   * attack, so no Speculative 
Store Bypass
+                                                   * control required.
+                                                   */
 
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
@@ -337,6 +344,8 @@
 #define MSR_AMD64_IBSOPDATA4           0xc001103d
 #define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
 
+#define MSR_AMD64_VIRT_SPEC_CTRL       0xc001011f
+
 /* Fam 17h MSRs */
 #define MSR_F17H_IRPERF                        0xc00000e9
 
diff --git a/arch/x86/include/asm/nospec-branch.h 
b/arch/x86/include/asm/nospec-branch.h
index f928ad9b143f..8b38df98548e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
        SPECTRE_V2_IBRS,
 };
 
+/* The Speculative Store Bypass disable variants */
+enum ssb_mitigation {
+       SPEC_STORE_BYPASS_NONE,
+       SPEC_STORE_BYPASS_DISABLE,
+       SPEC_STORE_BYPASS_PRCTL,
+       SPEC_STORE_BYPASS_SECCOMP,
+};
+
 extern char __indirect_thunk_start[];
 extern char __indirect_thunk_end[];
 
@@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
 #endif
 }
 
-#define alternative_msr_write(_msr, _val, _feature)            \
-       asm volatile(ALTERNATIVE("",                            \
-                                "movl %[msr], %%ecx\n\t"       \
-                                "movl %[val], %%eax\n\t"       \
-                                "movl $0, %%edx\n\t"           \
-                                "wrmsr",                       \
-                                _feature)                      \
-                    : : [msr] "i" (_msr), [val] "i" (_val)     \
-                    : "eax", "ecx", "edx", "memory")
+static __always_inline
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+{
+       asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+               : : "c" (msr),
+                   "a" ((u32)val),
+                   "d" ((u32)(val >> 32)),
+                   [feature] "i" (feature)
+               : "memory");
+}
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
-                             X86_FEATURE_USE_IBPB);
+       u64 val = PRED_CMD_IBPB;
+
+       alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
 }
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
 /*
  * With retpoline, we must use IBRS to restrict branch prediction
  * before calling into firmware.
@@ -265,14 +278,18 @@ static inline void 
indirect_branch_prediction_barrier(void)
  */
 #define firmware_restrict_branch_speculation_start()                   \
 do {                                                                   \
+       u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;                  \
+                                                                       \
        preempt_disable();                                              \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,       \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
                              X86_FEATURE_USE_IBRS_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
 do {                                                                   \
-       alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,                    \
+       u64 val = x86_spec_ctrl_base;                                   \
+                                                                       \
+       alternative_msr_write(MSR_IA32_SPEC_CTRL, val,                  \
                              X86_FEATURE_USE_IBRS_FW);                 \
        preempt_enable();                                               \
 } while (0)
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index a0ba1ffda0df..851c04b7a092 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -2,6 +2,8 @@
 #ifndef _ASM_X86_PKEYS_H
 #define _ASM_X86_PKEYS_H
 
+#define ARCH_DEFAULT_PKEY      0
+
 #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
 
 extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
@@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm);
 static inline int execute_only_pkey(struct mm_struct *mm)
 {
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
-               return 0;
+               return ARCH_DEFAULT_PKEY;
 
        return __execute_only_pkey(mm);
 }
@@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
 {
        /*
         * "Allocated" pkeys are those that have been returned
-        * from pkey_alloc().  pkey 0 is special, and never
-        * returned from pkey_alloc().
+        * from pkey_alloc() or pkey 0 which is allocated
+        * implicitly when the mm is created.
         */
-       if (pkey <= 0)
+       if (pkey < 0)
                return false;
        if (pkey >= arch_max_pkey())
                return false;
+       /*
+        * The exec-only pkey is set in the allocation map, but
+        * is not available to any of the user interfaces like
+        * mprotect_pkey().
+        */
+       if (pkey == mm->context.execute_only_pkey)
+               return false;
+
        return mm_pkey_allocation_map(mm) & (1U << pkey);
 }
 
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644
index 000000000000..ae7c2c5cd7f0
--- /dev/null
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SPECCTRL_H_
+#define _ASM_X86_SPECCTRL_H_
+
+#include <linux/thread_info.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, 
bool guest);
+
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl:           The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:      The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *                             (may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+       x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+}
+
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl:           The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl:      The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ *                             (may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+       x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
+}
+
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+{
+       BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+       return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+       BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+       return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - 
SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+{
+       return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+}
+
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
+extern void speculative_store_bypass_update(unsigned long tif);
+
+static inline void speculative_store_bypass_update_current(void)
+{
+       speculative_store_bypass_update(current_thread_info()->flags);
+}
+
+#endif
diff --git a/arch/x86/include/asm/thread_info.h 
b/arch/x86/include/asm/thread_info.h
index eda3b6823ca4..95ff2d7f553f 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -81,6 +81,7 @@ struct thread_info {
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
+#define TIF_SSBD                       5       /* Reduced data speculation */
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
@@ -107,6 +108,7 @@ struct thread_info {
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
+#define _TIF_SSBD              (1 << TIF_SSBD)
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
@@ -146,7 +148,7 @@ struct thread_info {
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        
\
-       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
+       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e7d5a7883632..90574f731c05 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
+#include <asm/spec-ctrl.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
 #include <asm/delay.h>
@@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                rdmsrl(MSR_FAM10H_NODE_ID, value);
                nodes_per_socket = ((value >> 3) & 7) + 1;
        }
+
+       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+               unsigned int bit;
+
+               switch (c->x86) {
+               case 0x15: bit = 54; break;
+               case 0x16: bit = 33; break;
+               case 0x17: bit = 10; break;
+               default: return;
+               }
+               /*
+                * Try to cache the base value so further operations can
+                * avoid RMW. If that faults, do not enable SSBD.
+                */
+               if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+                       setup_force_cpu_cap(X86_FEATURE_SSBD);
+                       x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+               }
+       }
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
@@ -765,6 +786,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 
 static void init_amd_zn(struct cpuinfo_x86 *c)
 {
+       set_cpu_cap(c, X86_FEATURE_ZEN);
        /*
         * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
         * all up to and including B1.
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bfca937bdcc3..7416fc206b4a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -12,8 +12,10 @@
 #include <linux/utsname.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
 #include <asm/bugs.h>
 #include <asm/processor.h>
@@ -27,6 +29,27 @@
 #include <asm/intel-family.h>
 
 static void __init spectre_v2_select_mitigation(void);
+static void __init ssb_select_mitigation(void);
+
+/*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+u64 __ro_after_init x86_spec_ctrl_base;
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/*
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+u64 __ro_after_init x86_amd_ls_cfg_base;
+u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
 void __init check_bugs(void)
 {
@@ -37,9 +60,27 @@ void __init check_bugs(void)
                print_cpu_info(&boot_cpu_data);
        }
 
+       /*
+        * Read the SPEC_CTRL MSR to account for reserved bits which may
+        * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+        * init code as it is not enumerated and depends on the family.
+        */
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+       /* Allow STIBP in MSR_SPEC_CTRL if supported */
+       if (boot_cpu_has(X86_FEATURE_STIBP))
+               x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
        /* Select the proper spectre mitigation before patching alternatives */
        spectre_v2_select_mitigation();
 
+       /*
+        * Select proper mitigation for any exposure to the Speculative Store
+        * Bypass vulnerability.
+        */
+       ssb_select_mitigation();
+
 #ifdef CONFIG_X86_32
        /*
         * Check whether we are able to run this kernel safely on SMP.
@@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+
+void
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool 
setguest)
+{
+       u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+       struct thread_info *ti = current_thread_info();
+
+       /* Is MSR_SPEC_CTRL implemented ? */
+       if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+               /*
+                * Restrict guest_spec_ctrl to supported values. Clear the
+                * modifiable bits in the host base value and or the
+                * modifiable bits from the guest value.
+                */
+               guestval = hostval & ~x86_spec_ctrl_mask;
+               guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
+               /* SSBD controlled in MSR_SPEC_CTRL */
+               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+                       hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+               if (hostval != guestval) {
+                       msrval = setguest ? guestval : hostval;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+               }
+       }
+
+       /*
+        * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
+        * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
+        */
+       if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+           !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+               return;
+
+       /*
+        * If the host has SSBD mitigation enabled, force it in the host's
+        * virtual MSR value. If its not permanently enabled, evaluate
+        * current's TIF_SSBD thread flag.
+        */
+       if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+               hostval = SPEC_CTRL_SSBD;
+       else
+               hostval = ssbd_tif_to_spec_ctrl(ti->flags);
+
+       /* Sanitize the guest value */
+       guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+
+       if (hostval != guestval) {
+               unsigned long tif;
+
+               tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+                                ssbd_spec_ctrl_to_tif(hostval);
+
+               speculative_store_bypass_update(tif);
+       }
+}
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+static void x86_amd_ssb_disable(void)
+{
+       u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+       if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+               wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+       else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+               wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
 
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
@@ -312,32 +422,289 @@ static void __init spectre_v2_select_mitigation(void)
 }
 
 #undef pr_fmt
+#define pr_fmt(fmt)    "Speculative Store Bypass: " fmt
+
+static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
+
+/* The kernel command line selection */
+enum ssb_mitigation_cmd {
+       SPEC_STORE_BYPASS_CMD_NONE,
+       SPEC_STORE_BYPASS_CMD_AUTO,
+       SPEC_STORE_BYPASS_CMD_ON,
+       SPEC_STORE_BYPASS_CMD_PRCTL,
+       SPEC_STORE_BYPASS_CMD_SECCOMP,
+};
+
+static const char *ssb_strings[] = {
+       [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
+       [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass 
disabled",
+       [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass 
disabled via prctl",
+       [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass 
disabled via prctl and seccomp",
+};
+
+static const struct {
+       const char *option;
+       enum ssb_mitigation_cmd cmd;
+} ssb_mitigation_options[] = {
+       { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
+       { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative 
Store Bypass */
+       { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch 
Speculative Store Bypass */
+       { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative 
Store Bypass via prctl */
+       { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative 
Store Bypass via prctl and seccomp */
+};
+
+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+{
+       enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
+       char arg[20];
+       int ret, i;
+
+       if (cmdline_find_option_bool(boot_command_line, 
"nospec_store_bypass_disable")) {
+               return SPEC_STORE_BYPASS_CMD_NONE;
+       } else {
+               ret = cmdline_find_option(boot_command_line, 
"spec_store_bypass_disable",
+                                         arg, sizeof(arg));
+               if (ret < 0)
+                       return SPEC_STORE_BYPASS_CMD_AUTO;
+
+               for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
+                       if (!match_option(arg, ret, 
ssb_mitigation_options[i].option))
+                               continue;
+
+                       cmd = ssb_mitigation_options[i].cmd;
+                       break;
+               }
+
+               if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
+                       pr_err("unknown option (%s). Switching to AUTO 
select\n", arg);
+                       return SPEC_STORE_BYPASS_CMD_AUTO;
+               }
+       }
+
+       return cmd;
+}
+
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
+{
+       enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+       enum ssb_mitigation_cmd cmd;
+
+       if (!boot_cpu_has(X86_FEATURE_SSBD))
+               return mode;
+
+       cmd = ssb_parse_cmdline();
+       if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
+           (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
+            cmd == SPEC_STORE_BYPASS_CMD_AUTO))
+               return mode;
+
+       switch (cmd) {
+       case SPEC_STORE_BYPASS_CMD_AUTO:
+       case SPEC_STORE_BYPASS_CMD_SECCOMP:
+               /*
+                * Choose prctl+seccomp as the default mode if seccomp is
+                * enabled.
+                */
+               if (IS_ENABLED(CONFIG_SECCOMP))
+                       mode = SPEC_STORE_BYPASS_SECCOMP;
+               else
+                       mode = SPEC_STORE_BYPASS_PRCTL;
+               break;
+       case SPEC_STORE_BYPASS_CMD_ON:
+               mode = SPEC_STORE_BYPASS_DISABLE;
+               break;
+       case SPEC_STORE_BYPASS_CMD_PRCTL:
+               mode = SPEC_STORE_BYPASS_PRCTL;
+               break;
+       case SPEC_STORE_BYPASS_CMD_NONE:
+               break;
+       }
+
+       /*
+        * We have three CPU feature flags that are in play here:
+        *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+        *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store 
bypass
+        *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+        */
+       if (mode == SPEC_STORE_BYPASS_DISABLE) {
+               setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+               /*
+                * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+                * a completely different MSR and bit dependent on family.
+                */
+               switch (boot_cpu_data.x86_vendor) {
+               case X86_VENDOR_INTEL:
+                       x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+                       x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+                       break;
+               case X86_VENDOR_AMD:
+                       x86_amd_ssb_disable();
+                       break;
+               }
+       }
+
+       return mode;
+}
+
+static void ssb_select_mitigation(void)
+{
+       ssb_mode = __ssb_select_mitigation();
+
+       if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+               pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Speculation prctl: " fmt
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+       bool update;
+
+       if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+           ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+               return -ENXIO;
+
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               /* If speculation is force disabled, enable is not allowed */
+               if (task_spec_ssb_force_disable(task))
+                       return -EPERM;
+               task_clear_spec_ssb_disable(task);
+               update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       case PR_SPEC_DISABLE:
+               task_set_spec_ssb_disable(task);
+               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       case PR_SPEC_FORCE_DISABLE:
+               task_set_spec_ssb_disable(task);
+               task_set_spec_ssb_force_disable(task);
+               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       /*
+        * If being set on non-current task, delay setting the CPU
+        * mitigation until it is next scheduled.
+        */
+       if (task == current && update)
+               speculative_store_bypass_update_current();
+
+       return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+                            unsigned long ctrl)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssb_prctl_set(task, ctrl);
+       default:
+               return -ENODEV;
+       }
+}
+
+#ifdef CONFIG_SECCOMP
+void arch_seccomp_spec_mitigate(struct task_struct *task)
+{
+       if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+               ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+}
+#endif
+
+static int ssb_prctl_get(struct task_struct *task)
+{
+       switch (ssb_mode) {
+       case SPEC_STORE_BYPASS_DISABLE:
+               return PR_SPEC_DISABLE;
+       case SPEC_STORE_BYPASS_SECCOMP:
+       case SPEC_STORE_BYPASS_PRCTL:
+               if (task_spec_ssb_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ssb_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       default:
+               if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+                       return PR_SPEC_ENABLE;
+               return PR_SPEC_NOT_AFFECTED;
+       }
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+       switch (which) {
+       case PR_SPEC_STORE_BYPASS:
+               return ssb_prctl_get(task);
+       default:
+               return -ENODEV;
+       }
+}
+
+void x86_spec_ctrl_setup_ap(void)
+{
+       if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+       if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+               x86_amd_ssb_disable();
+}
 
 #ifdef CONFIG_SYSFS
-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, 
char *buf)
+
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute 
*attr,
+                              char *buf, unsigned int bug)
 {
-       if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+       if (!boot_cpu_has_bug(bug))
                return sprintf(buf, "Not affected\n");
-       if (boot_cpu_has(X86_FEATURE_PTI))
-               return sprintf(buf, "Mitigation: PTI\n");
+
+       switch (bug) {
+       case X86_BUG_CPU_MELTDOWN:
+               if (boot_cpu_has(X86_FEATURE_PTI))
+                       return sprintf(buf, "Mitigation: PTI\n");
+
+               break;
+
+       case X86_BUG_SPECTRE_V1:
+               return sprintf(buf, "Mitigation: __user pointer 
sanitization\n");
+
+       case X86_BUG_SPECTRE_V2:
+               return sprintf(buf, "%s%s%s%s\n", 
spectre_v2_strings[spectre_v2_enabled],
+                              boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : 
"",
+                              boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", 
IBRS_FW" : "",
+                              spectre_v2_module_string());
+
+       case X86_BUG_SPEC_STORE_BYPASS:
+               return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
+       default:
+               break;
+       }
+
        return sprintf(buf, "Vulnerable\n");
 }
 
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, 
char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
+}
+
 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, 
char *buf)
 {
-       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
-               return sprintf(buf, "Not affected\n");
-       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+       return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
 }
 
 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, 
char *buf)
 {
-       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-               return sprintf(buf, "Not affected\n");
+       return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+}
 
-       return sprintf(buf, "%s%s%s%s\n", 
spectre_v2_strings[spectre_v2_enabled],
-                      boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
-                      boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-                      spectre_v2_module_string());
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute 
*attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
 }
 #endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cf6380200dc2..48e98964ecad 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -733,17 +733,32 @@ static void init_speculation_control(struct cpuinfo_x86 
*c)
         * and they also have a different bit for STIBP support. Also,
         * a hypervisor might have set the individual AMD bits even on
         * Intel CPUs, for finer-grained selection of what's available.
-        *
-        * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
-        * features, which are visible in /proc/cpuinfo and used by the
-        * kernel. So set those accordingly from the Intel bits.
         */
        if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
                set_cpu_cap(c, X86_FEATURE_IBRS);
                set_cpu_cap(c, X86_FEATURE_IBPB);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
        }
+
        if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
                set_cpu_cap(c, X86_FEATURE_STIBP);
+
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+           cpu_has(c, X86_FEATURE_VIRT_SSBD))
+               set_cpu_cap(c, X86_FEATURE_SSBD);
+
+       if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+               set_cpu_cap(c, X86_FEATURE_IBRS);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
+
+       if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+               set_cpu_cap(c, X86_FEATURE_IBPB);
+
+       if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+               set_cpu_cap(c, X86_FEATURE_STIBP);
+               set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+       }
 }
 
 void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -894,21 +909,55 @@ static const __initconst struct x86_cpu_id 
cpu_no_meltdown[] = {
        {}
 };
 
-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PINEVIEW        },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_LINCROFT        },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_PENWELL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CLOVERVIEW      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_CEDARVIEW       },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT1     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_AIRMONT         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_SILVERMONT2     },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_ATOM_MERRIFIELD      },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_CORE_YONAH           },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNL         },
+       { X86_VENDOR_INTEL,     6,      INTEL_FAM6_XEON_PHI_KNM         },
+       { X86_VENDOR_CENTAUR,   5,                                      },
+       { X86_VENDOR_INTEL,     5,                                      },
+       { X86_VENDOR_NSC,       5,                                      },
+       { X86_VENDOR_AMD,       0x12,                                   },
+       { X86_VENDOR_AMD,       0x11,                                   },
+       { X86_VENDOR_AMD,       0x10,                                   },
+       { X86_VENDOR_AMD,       0xf,                                    },
+       { X86_VENDOR_ANY,       4,                                      },
+       {}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
        u64 ia32_cap = 0;
 
-       if (x86_match_cpu(cpu_no_meltdown))
-               return false;
-
        if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
                rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
 
+       if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+          !(ia32_cap & ARCH_CAP_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+       if (x86_match_cpu(cpu_no_speculation))
+               return;
+
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+       if (x86_match_cpu(cpu_no_meltdown))
+               return;
+
        /* Rogue Data Cache Load? No! */
        if (ia32_cap & ARCH_CAP_RDCL_NO)
-               return false;
+               return;
 
-       return true;
+       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
 }
 
 /*
@@ -958,12 +1007,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 
*c)
 
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
-       if (!x86_match_cpu(cpu_no_speculation)) {
-               if (cpu_vulnerable_to_meltdown(c))
-                       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-               setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
-               setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-       }
+       cpu_set_bug_bits(c);
 
        fpu__init_system(c);
 
@@ -1322,6 +1366,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #endif
        mtrr_ap_init();
        validate_apic_and_package_id(c);
+       x86_spec_ctrl_setup_ap();
 }
 
 static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index e806b11a99af..37672d299e35 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
 
 unsigned int aperfmperf_get_khz(int cpu);
 
+extern void x86_spec_ctrl_setup_ap(void);
+
 #endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index c3af167d0a70..c895f38a7a5e 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_IBPB);
                setup_clear_cpu_cap(X86_FEATURE_STIBP);
                setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+               setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
                setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+               setup_clear_cpu_cap(X86_FEATURE_SSBD);
+               setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
        }
 
        /*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8bd1d8292cf7..988a98f34c66 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -39,6 +39,7 @@
 #include <asm/switch_to.h>
 #include <asm/desc.h>
 #include <asm/prctl.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -279,6 +280,148 @@ static inline void switch_to_bitmap(struct tss_struct 
*tss,
        }
 }
 
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+       struct ssb_state        *shared_state;
+       raw_spinlock_t          lock;
+       unsigned int            disable_state;
+       unsigned long           local_state;
+};
+
+#define LSTATE_SSB     0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+       struct ssb_state *st = this_cpu_ptr(&ssb_state);
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
+
+       st->local_state = 0;
+
+       /*
+        * Shared state setup happens once on the first bringup
+        * of the CPU. It's not destroyed on CPU hotunplug.
+        */
+       if (st->shared_state)
+               return;
+
+       raw_spin_lock_init(&st->lock);
+
+       /*
+        * Go over HT siblings and check whether one of them has set up the
+        * shared state pointer already.
+        */
+       for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+               if (cpu == this_cpu)
+                       continue;
+
+               if (!per_cpu(ssb_state, cpu).shared_state)
+                       continue;
+
+               /* Link it to the state of the sibling: */
+               st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+               return;
+       }
+
+       /*
+        * First HT sibling to come up on the core.  Link shared state of
+        * the first HT sibling to itself. The siblings on the same core
+        * which come up later will see the shared state pointer and link
+        * themself to the state of this CPU.
+        */
+       st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+       struct ssb_state *st = this_cpu_ptr(&ssb_state);
+       u64 msr = x86_amd_ls_cfg_base;
+
+       if (!static_cpu_has(X86_FEATURE_ZEN)) {
+               msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+               wrmsrl(MSR_AMD64_LS_CFG, msr);
+               return;
+       }
+
+       if (tifn & _TIF_SSBD) {
+               /*
+                * Since this can race with prctl(), block reentry on the
+                * same CPU.
+                */
+               if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+                       return;
+
+               msr |= x86_amd_ls_cfg_ssbd_mask;
+
+               raw_spin_lock(&st->shared_state->lock);
+               /* First sibling enables SSBD: */
+               if (!st->shared_state->disable_state)
+                       wrmsrl(MSR_AMD64_LS_CFG, msr);
+               st->shared_state->disable_state++;
+               raw_spin_unlock(&st->shared_state->lock);
+       } else {
+               if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+                       return;
+
+               raw_spin_lock(&st->shared_state->lock);
+               st->shared_state->disable_state--;
+               if (!st->shared_state->disable_state)
+                       wrmsrl(MSR_AMD64_LS_CFG, msr);
+               raw_spin_unlock(&st->shared_state->lock);
+       }
+}
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+       u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+       wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+       /*
+        * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+        * so ssbd_tif_to_spec_ctrl() just works.
+        */
+       wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+       u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long 
tifn)
+{
+       if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+               amd_set_ssb_virt_state(tifn);
+       else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+               amd_set_core_ssb_state(tifn);
+       else
+               intel_set_ssb_state(tifn);
+}
+
+void speculative_store_bypass_update(unsigned long tif)
+{
+       preempt_disable();
+       __speculative_store_bypass_update(tif);
+       preempt_enable();
+}
+
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                      struct tss_struct *tss)
 {
@@ -310,6 +453,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct 
task_struct *next_p,
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+       if ((tifp ^ tifn) & _TIF_SSBD)
+               __speculative_store_bypass_update(tifn);
 }
 
 /*
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9eb448c7859d..fa093b77689f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -528,6 +528,7 @@ void set_personality_64bit(void)
        clear_thread_flag(TIF_X32);
        /* Pretend that this comes from a 64bit execve */
        task_pt_regs(current)->orig_ax = __NR_execve;
+       current_thread_info()->status &= ~TS_COMPAT;
 
        /* Ensure the corresponding mm is not marked. */
        if (current->mm)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 6b841262b790..4a96aa004390 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -77,6 +77,7 @@
 #include <asm/i8259.h>
 #include <asm/realmode.h>
 #include <asm/misc.h>
+#include <asm/spec-ctrl.h>
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
@@ -245,6 +246,8 @@ static void notrace start_secondary(void *unused)
         */
        check_tsc_sync_target();
 
+       speculative_store_bypass_ht_init();
+
        /*
         * Lock vector_lock and initialize the vectors on this cpu
         * before setting the cpu online. We must set it online with
@@ -1349,6 +1352,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        set_mtrr_aps_delayed_init();
 
        smp_quirk_init_udelay();
+
+       speculative_store_bypass_ht_init();
 }
 
 void arch_enable_nonboot_cpus_begin(void)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 4f544f2a7b06..d67e3b31f3db 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -367,7 +367,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
 
        /* cpuid 0x80000008.ebx */
        const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-               F(IBPB) | F(IBRS);
+               F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
 
        /* cpuid 0xC0000001.edx */
        const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -394,7 +394,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
 
        /* cpuid 7.0.edx*/
        const u32 kvm_cpuid_7_0_edx_x86_features =
-               F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
+               F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
                F(ARCH_CAPABILITIES);
 
        /* all calls to cpuid_count() should be made on the same cpu */
@@ -632,13 +632,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
                        g_phys_as = phys_as;
                entry->eax = g_phys_as | (virt_as << 8);
                entry->edx = 0;
-               /* IBRS and IBPB aren't necessarily present in hardware cpuid */
-               if (boot_cpu_has(X86_FEATURE_IBPB))
-                       entry->ebx |= F(IBPB);
-               if (boot_cpu_has(X86_FEATURE_IBRS))
-                       entry->ebx |= F(IBRS);
+               /*
+                * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+                * hardware cpuid
+                */
+               if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+                       entry->ebx |= F(AMD_IBPB);
+               if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+                       entry->ebx |= F(AMD_IBRS);
+               if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+                       entry->ebx |= F(VIRT_SSBD);
                entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
                cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+               if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+                       entry->ebx |= F(VIRT_SSBD);
                break;
        }
        case 0x80000019:
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 9fb0daf628cb..029aa1318874 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -46,7 +46,7 @@
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
 #include <asm/microcode.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -186,6 +186,12 @@ struct vcpu_svm {
        } host;
 
        u64 spec_ctrl;
+       /*
+        * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+        * translated into the appropriate L2_CFG bits on the host to
+        * perform speculative control.
+        */
+       u64 virt_spec_ctrl;
 
        u32 *msrpm;
 
@@ -1611,6 +1617,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool 
init_event)
        u32 eax = 1;
 
        svm->spec_ctrl = 0;
+       svm->virt_spec_ctrl = 0;
 
        if (!init_event) {
                svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -3618,11 +3625,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
                        return 1;
 
                msr_info->data = svm->spec_ctrl;
                break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+                       return 1;
+
+               msr_info->data = svm->virt_spec_ctrl;
+               break;
        case MSR_IA32_UCODE_REV:
                msr_info->data = 0x01000065;
                break;
@@ -3716,7 +3730,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
                        return 1;
 
                /* The STIBP bit doesn't fault even if it's not advertised */
@@ -3743,7 +3757,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr)
                break;
        case MSR_IA32_PRED_CMD:
                if (!msr->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
                        return 1;
 
                if (data & ~PRED_CMD_IBPB)
@@ -3757,6 +3771,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr)
                        break;
                set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
                break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               if (!msr->host_initiated &&
+                   !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+                       return 1;
+
+               if (data & ~SPEC_CTRL_SSBD)
+                       return 1;
+
+               svm->virt_spec_ctrl = data;
+               break;
        case MSR_STAR:
                svm->vmcb->save.star = data;
                break;
@@ -5015,8 +5039,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+       x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
 
        asm volatile (
                "push %%" _ASM_BP "; \n\t"
@@ -5110,6 +5133,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
                );
 
+       /* Eliminate branch target predictions from guest mode */
+       vmexit_fill_RSB();
+
+#ifdef CONFIG_X86_64
+       wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+       loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+       loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
        /*
         * We do not use IBRS in the kernel. If this vCPU has used the
         * SPEC_CTRL MSR it may have left it on; save the value and
@@ -5128,20 +5163,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (svm->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
-
-       /* Eliminate branch target predictions from guest mode */
-       vmexit_fill_RSB();
-
-#ifdef CONFIG_X86_64
-       wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
-       loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
-       loadsegment(gs, svm->host.gs);
-#endif
-#endif
+       x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
 
        reload_tss(vcpu);
 
@@ -5244,7 +5266,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
        return false;
 }
 
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
 {
        return true;
 }
@@ -5551,7 +5573,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
        .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+       .has_emulated_msr = svm_has_emulated_msr,
 
        .vcpu_create = svm_create_vcpu,
        .vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bdd84ce4491e..2e63edf8312c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -52,7 +52,7 @@
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
 #include <asm/microcode.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -3293,7 +3293,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
@@ -3414,12 +3413,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
                /* The STIBP bit doesn't fault even if it's not advertised */
-               if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+               if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
                        return 1;
 
                vmx->spec_ctrl = data;
@@ -3445,7 +3443,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                break;
        case MSR_IA32_PRED_CMD:
                if (!msr_info->host_initiated &&
-                   !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
                        return 1;
 
@@ -9217,9 +9214,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu 
*vcpu)
 }
 STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
 
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
 {
-       return enable_unrestricted_guest || emulate_invalid_guest_state;
+       switch (index) {
+       case MSR_IA32_SMBASE:
+               /*
+                * We cannot do SMM unless we can run the guest in big
+                * real mode.
+                */
+               return enable_unrestricted_guest || emulate_invalid_guest_state;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               /* This is AMD only.  */
+               return false;
+       default:
+               return true;
+       }
 }
 
 static bool vmx_mpx_supported(void)
@@ -9452,10 +9461,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
*vcpu)
         * is no need to worry about the conditional branch over the wrmsr
         * being speculatively taken.
         */
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+       x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
 
        vmx->__launched = vmx->loaded_vmcs->launched;
+
        asm(
                /* Store host registers */
                "push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -9591,8 +9600,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
                vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
 
-       if (vmx->spec_ctrl)
-               native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
 
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
@@ -12182,7 +12190,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = 
{
        .hardware_enable = hardware_enable,
        .hardware_disable = hardware_disable,
        .cpu_has_accelerated_tpr = report_flexpriority,
-       .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+       .has_emulated_msr = vmx_has_emulated_msr,
 
        .vcpu_create = vmx_create_vcpu,
        .vcpu_free = vmx_free_vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3b2c3aa2cd07..649f476039de 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1036,6 +1036,7 @@ static u32 emulated_msrs[] = {
        MSR_IA32_SMBASE,
        MSR_PLATFORM_INFO,
        MSR_MISC_FEATURES_ENABLES,
+       MSR_AMD64_VIRT_SPEC_CTRL,
 };
 
 static unsigned num_emulated_msrs;
@@ -2721,7 +2722,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long 
ext)
                 * fringe case that is not enabled except via specific settings
                 * of the module parameters.
                 */
-               r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+               r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
                break;
        case KVM_CAP_VAPIC:
                r = !kvm_x86_ops->cpu_has_accelerated_tpr();
@@ -4324,14 +4325,8 @@ static void kvm_init_msr_list(void)
        num_msrs_to_save = j;
 
        for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-               switch (emulated_msrs[i]) {
-               case MSR_IA32_SMBASE:
-                       if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
-                               continue;
-                       break;
-               default:
-                       break;
-               }
+               if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+                       continue;
 
                if (j < i)
                        emulated_msrs[j] = emulated_msrs[i];
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index d7bc0eea20a5..6e98e0a7c923 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct 
*vma, int prot, int pkey
         */
        if (pkey != -1)
                return pkey;
-       /*
-        * Look for a protection-key-drive execute-only mapping
-        * which is now being given permissions that are not
-        * execute-only.  Move it back to the default pkey.
-        */
-       if (vma_is_pkey_exec_only(vma) &&
-           (prot & (PROT_READ|PROT_WRITE))) {
-               return 0;
-       }
+
        /*
         * The mapping is execute-only.  Go try to get the
         * execute-only protection key.  If we fail to do that,
         * fall through as if we do not have execute-only
-        * support.
+        * support in this mm.
         */
        if (prot == PROT_EXEC) {
                pkey = execute_only_pkey(vma->vm_mm);
                if (pkey > 0)
                        return pkey;
+       } else if (vma_is_pkey_exec_only(vma)) {
+               /*
+                * Protections are *not* PROT_EXEC, but the mapping
+                * is using the exec-only pkey.  This mapping was
+                * PROT_EXEC and will no longer be.  Move back to
+                * the default pkey.
+                */
+               return ARCH_DEFAULT_PKEY;
        }
+
        /*
         * This is a vanilla, non-pkey mprotect (or we failed to
         * setup execute-only), inherit the pkey from the VMA we
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3e15345abfe7..de0263348f2d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
 }
 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
-static void xen_flush_tlb_all(void)
+static noinline void xen_flush_tlb_all(void)
 {
        struct mmuext_op *op;
        struct multicall_space mcs;
 
-       trace_xen_mmu_flush_tlb_all(0);
-
        preempt_disable();
 
        mcs = xen_mc_entry(sizeof(*op));
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 042e9c422b21..b3526a98a5a5 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1280,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void)
        return this_cpu_read(xen_vcpu_info.arch.cr2);
 }
 
-static void xen_flush_tlb(void)
+static noinline void xen_flush_tlb(void)
 {
        struct mmuext_op *op;
        struct multicall_space mcs;
 
-       trace_xen_mmu_flush_tlb(0);
-
        preempt_disable();
 
        mcs = xen_mc_entry(sizeof(*op));
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 825964efda1d..433f14bcab15 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -521,14 +521,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
        return sprintf(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+                                         struct device_attribute *attr, char 
*buf)
+{
+       return sprintf(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
        &dev_attr_spectre_v1.attr,
        &dev_attr_spectre_v2.attr,
+       &dev_attr_spec_store_bypass.attr,
        NULL
 };
 
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index be38ac7050d4..a7b6734bc3c3 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -749,7 +749,7 @@ static void remove_compat_control_link(struct drm_device 
*dev)
        if (!minor)
                return;
 
-       name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
+       name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
        if (!name)
                return;
 
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cc70e2470272..61a2203b75df 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7044,6 +7044,9 @@ enum {
 #define SLICE_ECO_CHICKEN0                     _MMIO(0x7308)
 #define   PIXEL_MASK_CAMMING_DISABLE           (1 << 14)
 
+#define GEN9_WM_CHICKEN3                       _MMIO(0x5588)
+#define   GEN9_FACTOR_IN_CLR_VAL_HIZ           (1 << 9)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         _MMIO(0x9030)
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index b6a7e492c1a3..c0e3e2ffb87d 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -900,6 +900,10 @@ static int gen9_init_workarounds(struct intel_engine_cs 
*engine)
        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
+       /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
+       if (IS_GEN9_LP(dev_priv))
+               WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
+
        /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
        ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
        if (ret)
diff --git a/drivers/i2c/busses/i2c-designware-master.c 
b/drivers/i2c/busses/i2c-designware-master.c
index 13e849bf9aa0..4915fa303a7e 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -207,7 +207,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
        i2c_dw_disable_int(dev);
 
        /* Enable the adapter */
-       __i2c_dw_enable_and_wait(dev, true);
+       __i2c_dw_enable(dev, true);
+
+       /* Dummy read to avoid the register getting stuck on Bay Trail */
+       dw_readl(dev, DW_IC_ENABLE_STATUS);
 
        /* Clear and enable interrupts */
        dw_readl(dev, DW_IC_CLR_INTR);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866a9ded..35286907c636 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, 
int nr_queues)
        int i;
 
        for (i = 0; i < nr_queues; i++) {
-               q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+               q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
                if (!q)
                        return -ENOMEM;
 
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
 {
        struct ciw *ciw;
        struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
-       int rc;
 
        memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
        memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
        ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
        if (!ciw) {
                DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
-               rc = -EINVAL;
-               goto out_err;
+               return -EINVAL;
        }
        irq_ptr->equeue = *ciw;
 
        ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
        if (!ciw) {
                DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
-               rc = -EINVAL;
-               goto out_err;
+               return -EINVAL;
        }
        irq_ptr->aqueue = *ciw;
 
@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
        irq_ptr->orig_handler = init_data->cdev->handler;
        init_data->cdev->handler = qdio_int_handler;
        return 0;
-out_err:
-       qdio_release_memory(irq_ptr);
-       return rc;
 }
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 4a39b54732d0..72ce6ad95767 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -703,6 +703,10 @@ void cp_free(struct channel_program *cp)
  * and stores the result to ccwchain list. @cp must have been
  * initialized by a previous call with cp_init(). Otherwise, undefined
  * behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
  *
  * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
  * as helpers to do ccw chain translation inside the kernel. Basically
@@ -737,11 +741,18 @@ int cp_prefetch(struct channel_program *cp)
                for (idx = 0; idx < len; idx++) {
                        ret = ccwchain_fetch_one(chain, idx, cp);
                        if (ret)
-                               return ret;
+                               goto out_err;
                }
        }
 
        return 0;
+out_err:
+       /* Only cleanup the chain elements that were actually translated. */
+       chain->ch_len = idx;
+       list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+               chain->ch_len = 0;
+       }
+       return ret;
 }
 
 /**
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index a172ab299e80..ff01f865a173 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
 
 static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
 {
-       if (!has_bspi(qspi) || (qspi->bspi_enabled))
+       if (!has_bspi(qspi))
                return;
 
        qspi->bspi_enabled = 1;
@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
 
 static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
 {
-       if (!has_bspi(qspi) || (!qspi->bspi_enabled))
+       if (!has_bspi(qspi))
                return;
 
        qspi->bspi_enabled = 0;
@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
 
 static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
 {
-       u32 data = 0;
+       u32 rd = 0;
+       u32 wr = 0;
 
-       if (qspi->curr_cs == cs)
-               return;
        if (qspi->base[CHIP_SELECT]) {
-               data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
-               data = (data & ~0xff) | (1 << cs);
-               bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
+               rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+               wr = (rd & ~0xff) | (1 << cs);
+               if (rd == wr)
+                       return;
+               bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
                usleep_range(10, 20);
        }
+
+       dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
        qspi->curr_cs = cs;
 }
 
@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct 
spi_device *spi)
                        dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
                }
                mspi_cdram = MSPI_CDRAM_CONT_BIT;
-               mspi_cdram |= (~(1 << spi->chip_select) &
-                              MSPI_CDRAM_PCS);
+
+               if (has_bspi(qspi))
+                       mspi_cdram &= ~1;
+               else
+                       mspi_cdram |= (~(1 << spi->chip_select) &
+                                      MSPI_CDRAM_PCS);
+
                mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
                                MSPI_CDRAM_BITSE_BIT);
 
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 94f7b0713281..02a8012a318a 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
 
        /* SSP register addresses */
        void __iomem *ioaddr;
-       u32 ssdr_physical;
+       phys_addr_t ssdr_physical;
 
        /* SSP masks*/
        u32 dma_cr1;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 4bc7956cefc4..ea3ce4e17b85 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -203,9 +203,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
        if ((shm->flags & req_flags) != req_flags)
                return -EINVAL;
 
+       get_dma_buf(shm->dmabuf);
        fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
-       if (fd >= 0)
-               get_dma_buf(shm->dmabuf);
+       if (fd < 0)
+               dma_buf_put(shm->dmabuf);
        return fd;
 }
 
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 910f027773aa..84c0599b45b7 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -87,6 +87,7 @@ struct bus_id_priv {
        struct stub_device *sdev;
        struct usb_device *udev;
        char shutdown_busid;
+       spinlock_t busid_lock;
 };
 
 /* stub_priv is allocated from stub_priv_cache */
@@ -97,6 +98,7 @@ extern struct usb_device_driver stub_driver;
 
 /* stub_main.c */
 struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
 int del_match_busid(char *busid);
 void stub_device_cleanup_urbs(struct stub_device *sdev);
 
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index b8915513fc84..cc847f2edf38 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -314,9 +314,9 @@ static int stub_probe(struct usb_device *udev)
        struct stub_device *sdev = NULL;
        const char *udev_busid = dev_name(&udev->dev);
        struct bus_id_priv *busid_priv;
-       int rc;
+       int rc = 0;
 
-       dev_dbg(&udev->dev, "Enter\n");
+       dev_dbg(&udev->dev, "Enter probe\n");
 
        /* check we should claim or not by busid_table */
        busid_priv = get_busid_priv(udev_busid);
@@ -331,13 +331,15 @@ static int stub_probe(struct usb_device *udev)
                 * other matched drivers by the driver core.
                 * See driver_probe_device() in driver/base/dd.c
                 */
-               return -ENODEV;
+               rc = -ENODEV;
+               goto call_put_busid_priv;
        }
 
        if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
                dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
                         udev_busid);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto call_put_busid_priv;
        }
 
        if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -345,13 +347,16 @@ static int stub_probe(struct usb_device *udev)
                        "%s is attached on vhci_hcd... skip!\n",
                        udev_busid);
 
-               return -ENODEV;
+               rc = -ENODEV;
+               goto call_put_busid_priv;
        }
 
        /* ok, this is my device */
        sdev = stub_device_alloc(udev);
-       if (!sdev)
-               return -ENOMEM;
+       if (!sdev) {
+               rc = -ENOMEM;
+               goto call_put_busid_priv;
+       }
 
        dev_info(&udev->dev,
                "usbip-host: register new device (bus %u dev %u)\n",
@@ -383,7 +388,9 @@ static int stub_probe(struct usb_device *udev)
        }
        busid_priv->status = STUB_BUSID_ALLOC;
 
-       return 0;
+       rc = 0;
+       goto call_put_busid_priv;
+
 err_files:
        usb_hub_release_port(udev->parent, udev->portnum,
                             (struct usb_dev_state *) udev);
@@ -393,6 +400,9 @@ static int stub_probe(struct usb_device *udev)
 
        busid_priv->sdev = NULL;
        stub_device_free(sdev);
+
+call_put_busid_priv:
+       put_busid_priv(busid_priv);
        return rc;
 }
 
@@ -418,7 +428,7 @@ static void stub_disconnect(struct usb_device *udev)
        struct bus_id_priv *busid_priv;
        int rc;
 
-       dev_dbg(&udev->dev, "Enter\n");
+       dev_dbg(&udev->dev, "Enter disconnect\n");
 
        busid_priv = get_busid_priv(udev_busid);
        if (!busid_priv) {
@@ -431,7 +441,7 @@ static void stub_disconnect(struct usb_device *udev)
        /* get stub_device */
        if (!sdev) {
                dev_err(&udev->dev, "could not get device");
-               return;
+               goto call_put_busid_priv;
        }
 
        dev_set_drvdata(&udev->dev, NULL);
@@ -446,12 +456,12 @@ static void stub_disconnect(struct usb_device *udev)
                                  (struct usb_dev_state *) udev);
        if (rc) {
                dev_dbg(&udev->dev, "unable to release port\n");
-               return;
+               goto call_put_busid_priv;
        }
 
        /* If usb reset is called from event handler */
        if (usbip_in_eh(current))
-               return;
+               goto call_put_busid_priv;
 
        /* shutdown the current connection */
        shutdown_busid(busid_priv);
@@ -462,12 +472,11 @@ static void stub_disconnect(struct usb_device *udev)
        busid_priv->sdev = NULL;
        stub_device_free(sdev);
 
-       if (busid_priv->status == STUB_BUSID_ALLOC) {
+       if (busid_priv->status == STUB_BUSID_ALLOC)
                busid_priv->status = STUB_BUSID_ADDED;
-       } else {
-               busid_priv->status = STUB_BUSID_OTHER;
-               del_match_busid((char *)udev_busid);
-       }
+
+call_put_busid_priv:
+       put_busid_priv(busid_priv);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index b59a253a8479..108dd65fbfbc 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -28,6 +28,7 @@
 #define DRIVER_DESC "USB/IP Host Driver"
 
 struct kmem_cache *stub_priv_cache;
+
 /*
  * busid_tables defines matching busids that usbip can grab. A user can change
  * dynamically what device is locally used and what device is exported to a
@@ -39,6 +40,8 @@ static spinlock_t busid_table_lock;
 
 static void init_busid_table(void)
 {
+       int i;
+
        /*
         * This also sets the bus_table[i].status to
         * STUB_BUSID_OTHER, which is 0.
@@ -46,6 +49,9 @@ static void init_busid_table(void)
        memset(busid_table, 0, sizeof(busid_table));
 
        spin_lock_init(&busid_table_lock);
+
+       for (i = 0; i < MAX_BUSID; i++)
+               spin_lock_init(&busid_table[i].busid_lock);
 }
 
 /*
@@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid)
        int i;
        int idx = -1;
 
-       for (i = 0; i < MAX_BUSID; i++)
+       for (i = 0; i < MAX_BUSID; i++) {
+               spin_lock(&busid_table[i].busid_lock);
                if (busid_table[i].name[0])
                        if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
                                idx = i;
+                               spin_unlock(&busid_table[i].busid_lock);
                                break;
                        }
+               spin_unlock(&busid_table[i].busid_lock);
+       }
        return idx;
 }
 
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
 struct bus_id_priv *get_busid_priv(const char *busid)
 {
        int idx;
@@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
 
        spin_lock(&busid_table_lock);
        idx = get_busid_idx(busid);
-       if (idx >= 0)
+       if (idx >= 0) {
                bid = &(busid_table[idx]);
+               /* get busid_lock before returning */
+               spin_lock(&bid->busid_lock);
+       }
        spin_unlock(&busid_table_lock);
 
        return bid;
 }
 
+void put_busid_priv(struct bus_id_priv *bid)
+{
+       if (bid)
+               spin_unlock(&bid->busid_lock);
+}
+
 static int add_match_busid(char *busid)
 {
        int i;
@@ -92,15 +112,19 @@ static int add_match_busid(char *busid)
                goto out;
        }
 
-       for (i = 0; i < MAX_BUSID; i++)
+       for (i = 0; i < MAX_BUSID; i++) {
+               spin_lock(&busid_table[i].busid_lock);
                if (!busid_table[i].name[0]) {
                        strlcpy(busid_table[i].name, busid, BUSID_SIZE);
                        if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
                            (busid_table[i].status != STUB_BUSID_REMOV))
                                busid_table[i].status = STUB_BUSID_ADDED;
                        ret = 0;
+                       spin_unlock(&busid_table[i].busid_lock);
                        break;
                }
+               spin_unlock(&busid_table[i].busid_lock);
+       }
 
 out:
        spin_unlock(&busid_table_lock);
@@ -121,6 +145,8 @@ int del_match_busid(char *busid)
        /* found */
        ret = 0;
 
+       spin_lock(&busid_table[idx].busid_lock);
+
        if (busid_table[idx].status == STUB_BUSID_OTHER)
                memset(busid_table[idx].name, 0, BUSID_SIZE);
 
@@ -128,6 +154,7 @@ int del_match_busid(char *busid)
            (busid_table[idx].status != STUB_BUSID_ADDED))
                busid_table[idx].status = STUB_BUSID_REMOV;
 
+       spin_unlock(&busid_table[idx].busid_lock);
 out:
        spin_unlock(&busid_table_lock);
 
@@ -140,9 +167,12 @@ static ssize_t match_busid_show(struct device_driver *drv, 
char *buf)
        char *out = buf;
 
        spin_lock(&busid_table_lock);
-       for (i = 0; i < MAX_BUSID; i++)
+       for (i = 0; i < MAX_BUSID; i++) {
+               spin_lock(&busid_table[i].busid_lock);
                if (busid_table[i].name[0])
                        out += sprintf(out, "%s ", busid_table[i].name);
+               spin_unlock(&busid_table[i].busid_lock);
+       }
        spin_unlock(&busid_table_lock);
        out += sprintf(out, "\n");
 
@@ -183,6 +213,51 @@ static ssize_t match_busid_store(struct device_driver 
*dev, const char *buf,
 }
 static DRIVER_ATTR_RW(match_busid);
 
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+       int ret;
+
+       /* device_attach() callers should hold parent lock for USB */
+       if (busid_priv->udev->dev.parent)
+               device_lock(busid_priv->udev->dev.parent);
+       ret = device_attach(&busid_priv->udev->dev);
+       if (busid_priv->udev->dev.parent)
+               device_unlock(busid_priv->udev->dev.parent);
+       if (ret < 0) {
+               dev_err(&busid_priv->udev->dev, "rebind failed\n");
+               return ret;
+       }
+       return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+       struct bus_id_priv *busid_priv;
+       int i;
+
+       /* update status to STUB_BUSID_OTHER so probe ignores the device */
+       spin_lock(&busid_table_lock);
+       for (i = 0; i < MAX_BUSID; i++) {
+               if (busid_table[i].name[0] &&
+                   busid_table[i].shutdown_busid) {
+                       busid_priv = &(busid_table[i]);
+                       busid_priv->status = STUB_BUSID_OTHER;
+               }
+       }
+       spin_unlock(&busid_table_lock);
+
+       /* now run rebind - no need to hold locks. driver files are removed */
+       for (i = 0; i < MAX_BUSID; i++) {
+               if (busid_table[i].name[0] &&
+                   busid_table[i].shutdown_busid) {
+                       busid_priv = &(busid_table[i]);
+                       do_rebind(busid_table[i].name, busid_priv);
+               }
+       }
+#endif
+}
+
 static ssize_t rebind_store(struct device_driver *dev, const char *buf,
                                 size_t count)
 {
@@ -200,16 +275,17 @@ static ssize_t rebind_store(struct device_driver *dev, 
const char *buf,
        if (!bid)
                return -ENODEV;
 
-       /* device_attach() callers should hold parent lock for USB */
-       if (bid->udev->dev.parent)
-               device_lock(bid->udev->dev.parent);
-       ret = device_attach(&bid->udev->dev);
-       if (bid->udev->dev.parent)
-               device_unlock(bid->udev->dev.parent);
-       if (ret < 0) {
-               dev_err(&bid->udev->dev, "rebind failed\n");
+       /* mark the device for deletion so probe ignores it during rescan */
+       bid->status = STUB_BUSID_OTHER;
+       /* release the busid lock */
+       put_busid_priv(bid);
+
+       ret = do_rebind((char *) buf, bid);
+       if (ret < 0)
                return ret;
-       }
+
+       /* delete device from busid_table */
+       del_match_busid((char *) buf);
 
        return count;
 }
@@ -331,6 +407,9 @@ static void __exit usbip_host_exit(void)
         */
        usb_deregister_device_driver(&stub_driver);
 
+       /* initiate scan to attach devices */
+       stub_device_rebind();
+
        kmem_cache_destroy(stub_priv_cache);
 }
 
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 21cc27509993..d1b9900ebc9b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2497,10 +2497,8 @@ read_block_for_search(struct btrfs_root *root, struct 
btrfs_path *p,
        if (p->reada != READA_NONE)
                reada_for_search(fs_info, p, level, slot, key->objectid);
 
-       btrfs_release_path(p);
-
        ret = -EAGAIN;
-       tmp = read_tree_block(fs_info, blocknr, 0);
+       tmp = read_tree_block(fs_info, blocknr, gen);
        if (!IS_ERR(tmp)) {
                /*
                 * If the read above didn't mark this buffer up to date,
@@ -2514,6 +2512,8 @@ read_block_for_search(struct btrfs_root *root, struct 
btrfs_path *p,
        } else {
                ret = PTR_ERR(tmp);
        }
+
+       btrfs_release_path(p);
        return ret;
 }
 
@@ -5454,12 +5454,24 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
        down_read(&fs_info->commit_root_sem);
        left_level = btrfs_header_level(left_root->commit_root);
        left_root_level = left_level;
-       left_path->nodes[left_level] = left_root->commit_root;
+       left_path->nodes[left_level] =
+                       btrfs_clone_extent_buffer(left_root->commit_root);
+       if (!left_path->nodes[left_level]) {
+               up_read(&fs_info->commit_root_sem);
+               ret = -ENOMEM;
+               goto out;
+       }
        extent_buffer_get(left_path->nodes[left_level]);
 
        right_level = btrfs_header_level(right_root->commit_root);
        right_root_level = right_level;
-       right_path->nodes[right_level] = right_root->commit_root;
+       right_path->nodes[right_level] =
+                       btrfs_clone_extent_buffer(right_root->commit_root);
+       if (!right_path->nodes[right_level]) {
+               up_read(&fs_info->commit_root_sem);
+               ret = -ENOMEM;
+               goto out;
+       }
        extent_buffer_get(right_path->nodes[right_level]);
        up_read(&fs_info->commit_root_sem);
 
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8fc690384c58..c44703e21396 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3150,6 +3150,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 
offset, u64 *len,
                              u64 *orig_start, u64 *orig_block_len,
                              u64 *ram_bytes);
 
+void __btrfs_del_delalloc_inode(struct btrfs_root *root,
+                               struct btrfs_inode *inode);
 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 79f0f282a0ef..4a630aeabb10 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3905,6 +3905,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
        set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
 
        btrfs_free_qgroup_config(fs_info);
+       ASSERT(list_empty(&fs_info->delalloc_roots));
 
        if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
                btrfs_info(fs_info, "at unmount delalloc count %lld",
@@ -4203,15 +4204,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info 
*fs_info)
 
 static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
 {
+       /* cleanup FS via transaction */
+       btrfs_cleanup_transaction(fs_info);
+
        mutex_lock(&fs_info->cleaner_mutex);
        btrfs_run_delayed_iputs(fs_info);
        mutex_unlock(&fs_info->cleaner_mutex);
 
        down_write(&fs_info->cleanup_work_sem);
        up_write(&fs_info->cleanup_work_sem);
-
-       /* cleanup FS via transaction */
-       btrfs_cleanup_transaction(fs_info);
 }
 
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
@@ -4334,19 +4335,23 @@ static void btrfs_destroy_delalloc_inodes(struct 
btrfs_root *root)
        list_splice_init(&root->delalloc_inodes, &splice);
 
        while (!list_empty(&splice)) {
+               struct inode *inode = NULL;
                btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
                                               delalloc_inodes);
-
-               list_del_init(&btrfs_inode->delalloc_inodes);
-               clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
-                         &btrfs_inode->runtime_flags);
+               __btrfs_del_delalloc_inode(root, btrfs_inode);
                spin_unlock(&root->delalloc_lock);
 
-               btrfs_invalidate_inodes(btrfs_inode->root);
-
+               /*
+                * Make sure we get a live inode and that it'll not disappear
+                * meanwhile.
+                */
+               inode = igrab(&btrfs_inode->vfs_inode);
+               if (inode) {
+                       invalidate_inode_pages2(inode->i_mapping);
+                       iput(inode);
+               }
                spin_lock(&root->delalloc_lock);
        }
-
        spin_unlock(&root->delalloc_lock);
 }
 
@@ -4362,7 +4367,6 @@ static void btrfs_destroy_all_delalloc_inodes(struct 
btrfs_fs_info *fs_info)
        while (!list_empty(&splice)) {
                root = list_first_entry(&splice, struct btrfs_root,
                                         delalloc_root);
-               list_del_init(&root->delalloc_root);
                root = btrfs_grab_fs_root(root);
                BUG_ON(!root);
                spin_unlock(&fs_info->delalloc_root_lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9f21c29d0259..b0fa3a032143 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1754,12 +1754,12 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root 
*root,
        spin_unlock(&root->delalloc_lock);
 }
 
-static void btrfs_del_delalloc_inode(struct btrfs_root *root,
-                                    struct btrfs_inode *inode)
+
+void __btrfs_del_delalloc_inode(struct btrfs_root *root,
+                               struct btrfs_inode *inode)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
 
-       spin_lock(&root->delalloc_lock);
        if (!list_empty(&inode->delalloc_inodes)) {
                list_del_init(&inode->delalloc_inodes);
                clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
@@ -1772,6 +1772,13 @@ static void btrfs_del_delalloc_inode(struct btrfs_root 
*root,
                        spin_unlock(&fs_info->delalloc_root_lock);
                }
        }
+}
+
+static void btrfs_del_delalloc_inode(struct btrfs_root *root,
+                                    struct btrfs_inode *inode)
+{
+       spin_lock(&root->delalloc_lock);
+       __btrfs_del_delalloc_inode(root, inode);
        spin_unlock(&root->delalloc_lock);
 }
 
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index f6a05f836629..cbabc6f2b322 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -400,6 +400,7 @@ static int prop_compression_apply(struct inode *inode,
                                  const char *value,
                                  size_t len)
 {
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int type;
 
        if (len == 0) {
@@ -410,14 +411,17 @@ static int prop_compression_apply(struct inode *inode,
                return 0;
        }
 
-       if (!strncmp("lzo", value, 3))
+       if (!strncmp("lzo", value, 3)) {
                type = BTRFS_COMPRESS_LZO;
-       else if (!strncmp("zlib", value, 4))
+               btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+       } else if (!strncmp("zlib", value, 4)) {
                type = BTRFS_COMPRESS_ZLIB;
-       else if (!strncmp("zstd", value, len))
+       } else if (!strncmp("zstd", value, len)) {
                type = BTRFS_COMPRESS_ZSTD;
-       else
+               btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
+       } else {
                return -EINVAL;
+       }
 
        BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
        BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index b6dfe7af7a1f..2794f3550db6 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4669,6 +4669,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle 
*trans,
        struct extent_map_tree *em_tree = &inode->extent_tree;
        u64 logged_isize = 0;
        bool need_log_inode_item = true;
+       bool xattrs_logged = false;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -4971,6 +4972,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle 
*trans,
        err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
        if (err)
                goto out_unlock;
+       xattrs_logged = true;
        if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
                btrfs_release_path(path);
                btrfs_release_path(dst_path);
@@ -4983,6 +4985,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle 
*trans,
        btrfs_release_path(dst_path);
        if (need_log_inode_item) {
                err = log_inode_item(trans, log, dst_path, inode);
+               if (!err && !xattrs_logged) {
+                       err = btrfs_log_all_xattrs(trans, root, inode, path,
+                                                  dst_path);
+                       btrfs_release_path(path);
+               }
                if (err)
                        goto out_unlock;
        }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b983e7fb200b..08afafb6ecf7 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3964,6 +3964,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info 
*fs_info)
                return 0;
        }
 
+       /*
+        * A ro->rw remount sequence should continue with the paused balance
+        * regardless of who pauses it, system or the user as of now, so set
+        * the resume flag.
+        */
+       spin_lock(&fs_info->balance_lock);
+       fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
+       spin_unlock(&fs_info->balance_lock);
+
        tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
        return PTR_ERR_OR_ZERO(tsk);
 }
diff --git a/fs/proc/array.c b/fs/proc/array.c
index e6094a15ef30..4ac811e1a26c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -85,6 +85,7 @@
 #include <linux/delayacct.h>
 #include <linux/seq_file.h>
 #include <linux/pid_namespace.h>
+#include <linux/prctl.h>
 #include <linux/ptrace.h>
 #include <linux/tracehook.h>
 #include <linux/string_helpers.h>
@@ -347,6 +348,30 @@ static inline void task_seccomp(struct seq_file *m, struct 
task_struct *p)
 #ifdef CONFIG_SECCOMP
        seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
 #endif
+       seq_printf(m, "\nSpeculation_Store_Bypass:\t");
+       switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+       case -EINVAL:
+               seq_printf(m, "unknown");
+               break;
+       case PR_SPEC_NOT_AFFECTED:
+               seq_printf(m, "not vulnerable");
+               break;
+       case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
+               seq_printf(m, "thread force mitigated");
+               break;
+       case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+               seq_printf(m, "thread mitigated");
+               break;
+       case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
+               seq_printf(m, "thread vulnerable");
+               break;
+       case PR_SPEC_DISABLE:
+               seq_printf(m, "globally mitigated");
+               break;
+       default:
+               seq_printf(m, "vulnerable");
+               break;
+       }
        seq_putc(m, '\n');
 }
 
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c816e6f2730c..9546bf2fe310 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
                                   struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_spectre_v2(struct device *dev,
                                   struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+                                         struct device_attribute *attr, char 
*buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 29fdf8029cf6..b68b7d199fee 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -395,8 +395,8 @@ typedef struct {
        u32 attributes;
        u32 get_bar_attributes;
        u32 set_bar_attributes;
-       uint64_t romsize;
-       void *romimage;
+       u64 romsize;
+       u32 romimage;
 } efi_pci_io_protocol_32;
 
 typedef struct {
@@ -415,8 +415,8 @@ typedef struct {
        u64 attributes;
        u64 get_bar_attributes;
        u64 set_bar_attributes;
-       uint64_t romsize;
-       void *romimage;
+       u64 romsize;
+       u64 romimage;
 } efi_pci_io_protocol_64;
 
 typedef struct {
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index e791ebc65c9c..0c5ef54fd416 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
 #define _LINUX_NOSPEC_H
 #include <asm/barrier.h>
 
+struct task_struct;
+
 /**
  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 
otherwise
  * @index: array element index
@@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned 
long index,
                                                                        \
        (typeof(_i)) (_i & _mask);                                      \
 })
+
+/* Speculation control prctl */
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+                            unsigned long ctrl);
+/* Speculation control for seccomp enforced mitigation */
+void arch_seccomp_spec_mitigate(struct task_struct *task);
+
 #endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 41354690e4e3..2c9790b43881 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1363,7 +1363,8 @@ static inline bool is_percpu_thread(void)
 #define PFA_NO_NEW_PRIVS               0       /* May not gain new privileges. 
*/
 #define PFA_SPREAD_PAGE                        1       /* Spread page cache 
over cpuset */
 #define PFA_SPREAD_SLAB                        2       /* Spread some slab 
caches over cpuset */
-
+#define PFA_SPEC_SSB_DISABLE           3       /* Speculative Store Bypass 
disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE     4       /* Speculative Store Bypass 
force disabled*/
 
 #define TASK_PFA_TEST(name, func)                                      \
        static inline bool task_##func(struct task_struct *p)           \
@@ -1388,6 +1389,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
 
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
 static inline void
 current_restore_flags(unsigned long orig_flags, unsigned long flags)
 {
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 10f25f7e4304..a9d5c52de4ea 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -4,8 +4,9 @@
 
 #include <uapi/linux/seccomp.h>
 
-#define SECCOMP_FILTER_FLAG_MASK       (SECCOMP_FILTER_FLAG_TSYNC | \
-                                        SECCOMP_FILTER_FLAG_LOG)
+#define SECCOMP_FILTER_FLAG_MASK       (SECCOMP_FILTER_FLAG_TSYNC      | \
+                                        SECCOMP_FILTER_FLAG_LOG        | \
+                                        SECCOMP_FILTER_FLAG_SPEC_ALLOW)
 
 #ifdef CONFIG_SECCOMP
 
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index d791863b62fc..2ec9064a2bb7 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -349,22 +349,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
 
-TRACE_EVENT(xen_mmu_flush_tlb_all,
-           TP_PROTO(int x),
-           TP_ARGS(x),
-           TP_STRUCT__entry(__array(char, x, 0)),
-           TP_fast_assign((void)x),
-           TP_printk("%s", "")
-       );
-
-TRACE_EVENT(xen_mmu_flush_tlb,
-           TP_PROTO(int x),
-           TP_ARGS(x),
-           TP_STRUCT__entry(__array(char, x, 0)),
-           TP_fast_assign((void)x),
-           TP_printk("%s", "")
-       );
-
 TRACE_EVENT(xen_mmu_flush_tlb_one_user,
            TP_PROTO(unsigned long addr),
            TP_ARGS(addr),
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index b640071421f7..3027f943f4b3 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -198,4 +198,16 @@ struct prctl_mm_map {
 # define PR_CAP_AMBIENT_LOWER          3
 # define PR_CAP_AMBIENT_CLEAR_ALL      4
 
+/* Per task speculation control */
+#define PR_GET_SPECULATION_CTRL                52
+#define PR_SET_SPECULATION_CTRL                53
+/* Speculation control variants */
+# define PR_SPEC_STORE_BYPASS          0
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+# define PR_SPEC_NOT_AFFECTED          0
+# define PR_SPEC_PRCTL                 (1UL << 0)
+# define PR_SPEC_ENABLE                        (1UL << 1)
+# define PR_SPEC_DISABLE               (1UL << 2)
+# define PR_SPEC_FORCE_DISABLE         (1UL << 3)
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 2a0bd9dd104d..9efc0e73d50b 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -17,8 +17,9 @@
 #define SECCOMP_GET_ACTION_AVAIL       2
 
 /* Valid flags for SECCOMP_SET_MODE_FILTER */
-#define SECCOMP_FILTER_FLAG_TSYNC      1
-#define SECCOMP_FILTER_FLAG_LOG                2
+#define SECCOMP_FILTER_FLAG_TSYNC      (1UL << 0)
+#define SECCOMP_FILTER_FLAG_LOG                (1UL << 1)
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
 
 /*
  * All BPF programs must return a 32-bit value.
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 5f0dfb2abb8d..075e344a87c3 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -19,6 +19,8 @@
 #include <linux/compat.h>
 #include <linux/coredump.h>
 #include <linux/kmemleak.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 #include <linux/seccomp.h>
@@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long 
seccomp_mode)
        return true;
 }
 
+void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
 static inline void seccomp_assign_mode(struct task_struct *task,
-                                      unsigned long seccomp_mode)
+                                      unsigned long seccomp_mode,
+                                      unsigned long flags)
 {
        assert_spin_locked(&task->sighand->siglock);
 
@@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct 
*task,
         * filter) is set.
         */
        smp_mb__before_atomic();
+       /* Assume default seccomp processes want spec flaw mitigation. */
+       if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+               arch_seccomp_spec_mitigate(task);
        set_tsk_thread_flag(task, TIF_SECCOMP);
 }
 
@@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void)
  * without dropping the locks.
  *
  */
-static inline void seccomp_sync_threads(void)
+static inline void seccomp_sync_threads(unsigned long flags)
 {
        struct task_struct *thread, *caller;
 
@@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void)
                 * allow one thread to transition the other.
                 */
                if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
-                       seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+                       seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
+                                           flags);
        }
 }
 
@@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags,
 
        /* Now that the new filter is in place, synchronize to all threads. */
        if (flags & SECCOMP_FILTER_FLAG_TSYNC)
-               seccomp_sync_threads();
+               seccomp_sync_threads(flags);
 
        return 0;
 }
@@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void)
 #ifdef TIF_NOTSC
        disable_TSC();
 #endif
-       seccomp_assign_mode(current, seccomp_mode);
+       seccomp_assign_mode(current, seccomp_mode, 0);
        ret = 0;
 
 out:
@@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
        /* Do not free the successfully attached filter. */
        prepared = NULL;
 
-       seccomp_assign_mode(current, seccomp_mode);
+       seccomp_assign_mode(current, seccomp_mode, flags);
 out:
        spin_unlock_irq(&current->sighand->siglock);
        if (flags & SECCOMP_FILTER_FLAG_TSYNC)
diff --git a/kernel/sys.c b/kernel/sys.c
index 524a4cb9bbe2..b5c1bc9e3769 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -61,6 +61,8 @@
 #include <linux/uidgid.h>
 #include <linux/cred.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kmsg_dump.h>
 /* Move somewhere else to avoid recompiling? */
 #include <generated/utsrelease.h>
@@ -2184,6 +2186,17 @@ static int propagate_has_child_subreaper(struct 
task_struct *p, void *data)
        return 1;
 }
 
+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+{
+       return -EINVAL;
+}
+
+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
+                                   unsigned long ctrl)
+{
+       return -EINVAL;
+}
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                unsigned long, arg4, unsigned long, arg5)
 {
@@ -2386,6 +2399,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, 
unsigned long, arg3,
        case PR_GET_FP_MODE:
                error = GET_FP_MODE(me);
                break;
+       case PR_GET_SPECULATION_CTRL:
+               if (arg3 || arg4 || arg5)
+                       return -EINVAL;
+               error = arch_prctl_spec_ctrl_get(me, arg2);
+               break;
+       case PR_SET_SPECULATION_CTRL:
+               if (arg4 || arg5)
+                       return -EINVAL;
+               error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+               break;
        default:
                error = -EINVAL;
                break;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index b398c2ea69b2..aa2094d5dd27 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct 
clock_event_device *dev)
        now = ktime_get();
        /* Find all expired events */
        for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
+               /*
+                * Required for !SMP because for_each_cpu() reports
+                * unconditionally CPU0 as set on UP kernels.
+                */
+               if (!IS_ENABLED(CONFIG_SMP) &&
+                   cpumask_empty(tick_broadcast_oneshot_mask))
+                       break;
+
                td = &per_cpu(tick_cpu_device, cpu);
                if (td->evtdev->next_event <= now) {
                        cpumask_set_cpu(cpu, tmpmask);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8b1feca1230a..70d677820740 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1613,11 +1613,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
 static void __rcu **skip_siblings(struct radix_tree_node **nodep,
                        void __rcu **slot, struct radix_tree_iter *iter)
 {
-       void *sib = node_to_entry(slot - 1);
-
        while (iter->index < iter->next_index) {
                *nodep = rcu_dereference_raw(*slot);
-               if (*nodep && *nodep != sib)
+               if (*nodep && !is_sibling_entry(iter->node, *nodep))
                        return slot;
                slot++;
                iter->index = __radix_tree_iter_add(iter, 1);
@@ -1632,7 +1630,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
                                struct radix_tree_iter *iter, unsigned flags)
 {
        unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
-       struct radix_tree_node *node = rcu_dereference_raw(*slot);
+       struct radix_tree_node *node;
 
        slot = skip_siblings(&node, slot, iter);
 
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 0ddf293cfac3..0a6f492fb9d9 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -434,23 +434,32 @@ static void noinline __init test_mem_optimisations(void)
        unsigned int start, nbits;
 
        for (start = 0; start < 1024; start += 8) {
-               memset(bmap1, 0x5a, sizeof(bmap1));
-               memset(bmap2, 0x5a, sizeof(bmap2));
                for (nbits = 0; nbits < 1024 - start; nbits += 8) {
+                       memset(bmap1, 0x5a, sizeof(bmap1));
+                       memset(bmap2, 0x5a, sizeof(bmap2));
+
                        bitmap_set(bmap1, start, nbits);
                        __bitmap_set(bmap2, start, nbits);
-                       if (!bitmap_equal(bmap1, bmap2, 1024))
+                       if (!bitmap_equal(bmap1, bmap2, 1024)) {
                                printk("set not equal %d %d\n", start, nbits);
-                       if (!__bitmap_equal(bmap1, bmap2, 1024))
+                               failed_tests++;
+                       }
+                       if (!__bitmap_equal(bmap1, bmap2, 1024)) {
                                printk("set not __equal %d %d\n", start, nbits);
+                               failed_tests++;
+                       }
 
                        bitmap_clear(bmap1, start, nbits);
                        __bitmap_clear(bmap2, start, nbits);
-                       if (!bitmap_equal(bmap1, bmap2, 1024))
+                       if (!bitmap_equal(bmap1, bmap2, 1024)) {
                                printk("clear not equal %d %d\n", start, nbits);
-                       if (!__bitmap_equal(bmap1, bmap2, 1024))
+                               failed_tests++;
+                       }
+                       if (!__bitmap_equal(bmap1, bmap2, 1024)) {
                                printk("clear not __equal %d %d\n", start,
                                                                        nbits);
+                               failed_tests++;
+                       }
                }
        }
 }
diff --git a/mm/Kconfig b/mm/Kconfig
index 9c4bdddd80c2..59efbd3337e0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -649,6 +649,7 @@ config DEFERRED_STRUCT_PAGE_INIT
        depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        depends on NO_BOOTMEM && MEMORY_HOTPLUG
        depends on !FLATMEM
+       depends on !NEED_PER_CPU_KM
        help
          Ordinarily all struct pages are initialised during early boot in a
          single thread. On very large machines this can take a considerable
diff --git a/net/ipv4/netfilter/nf_socket_ipv4.c 
b/net/ipv4/netfilter/nf_socket_ipv4.c
index e9293bdebba0..4824b1e183a1 100644
--- a/net/ipv4/netfilter/nf_socket_ipv4.c
+++ b/net/ipv4/netfilter/nf_socket_ipv4.c
@@ -108,10 +108,12 @@ struct sock *nf_sk_lookup_slow_v4(struct net *net, const 
struct sk_buff *skb,
        int doff = 0;
 
        if (iph->protocol == IPPROTO_UDP || iph->protocol == IPPROTO_TCP) {
-               struct udphdr _hdr, *hp;
+               struct tcphdr _hdr;
+               struct udphdr *hp;
 
                hp = skb_header_pointer(skb, ip_hdrlen(skb),
-                                       sizeof(_hdr), &_hdr);
+                                       iph->protocol == IPPROTO_UDP ?
+                                       sizeof(*hp) : sizeof(_hdr), &_hdr);
                if (hp == NULL)
                        return NULL;
 
diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c 
b/net/ipv6/netfilter/nf_socket_ipv6.c
index ebb2bf84232a..f14de4b6d639 100644
--- a/net/ipv6/netfilter/nf_socket_ipv6.c
+++ b/net/ipv6/netfilter/nf_socket_ipv6.c
@@ -116,9 +116,11 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const 
struct sk_buff *skb,
        }
 
        if (tproto == IPPROTO_UDP || tproto == IPPROTO_TCP) {
-               struct udphdr _hdr, *hp;
+               struct tcphdr _hdr;
+               struct udphdr *hp;
 
-               hp = skb_header_pointer(skb, thoff, sizeof(_hdr), &_hdr);
+               hp = skb_header_pointer(skb, thoff, tproto == IPPROTO_UDP ?
+                                       sizeof(*hp) : sizeof(_hdr), &_hdr);
                if (hp == NULL)
                        return NULL;
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 5b504aa653f5..689e9c0570ba 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2344,41 +2344,46 @@ static int nf_tables_newrule(struct net *net, struct 
sock *nlsk,
        }
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
-               if (nft_is_active_next(net, old_rule)) {
-                       trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
-                                                  old_rule);
-                       if (trans == NULL) {
-                               err = -ENOMEM;
-                               goto err2;
-                       }
-                       nft_deactivate_next(net, old_rule);
-                       chain->use--;
-                       list_add_tail_rcu(&rule->list, &old_rule->list);
-               } else {
+               if (!nft_is_active_next(net, old_rule)) {
                        err = -ENOENT;
                        goto err2;
                }
-       } else if (nlh->nlmsg_flags & NLM_F_APPEND)
-               if (old_rule)
-                       list_add_rcu(&rule->list, &old_rule->list);
-               else
-                       list_add_tail_rcu(&rule->list, &chain->rules);
-       else {
-               if (old_rule)
-                       list_add_tail_rcu(&rule->list, &old_rule->list);
-               else
-                       list_add_rcu(&rule->list, &chain->rules);
-       }
+               trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
+                                          old_rule);
+               if (trans == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+               nft_deactivate_next(net, old_rule);
+               chain->use--;
 
-       if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
-               err = -ENOMEM;
-               goto err3;
+               if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+
+               list_add_tail_rcu(&rule->list, &old_rule->list);
+       } else {
+               if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+
+               if (nlh->nlmsg_flags & NLM_F_APPEND) {
+                       if (old_rule)
+                               list_add_rcu(&rule->list, &old_rule->list);
+                       else
+                               list_add_tail_rcu(&rule->list, &chain->rules);
+                } else {
+                       if (old_rule)
+                               list_add_tail_rcu(&rule->list, &old_rule->list);
+                       else
+                               list_add_rcu(&rule->list, &chain->rules);
+               }
        }
        chain->use++;
        return 0;
 
-err3:
-       list_del_rcu(&rule->list);
 err2:
        nf_tables_rule_destroy(&ctx, rule);
 err1:
@@ -3196,18 +3201,20 @@ static int nf_tables_newset(struct net *net, struct 
sock *nlsk,
 
        err = ops->init(set, &desc, nla);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
        if (err < 0)
-               goto err3;
+               goto err4;
 
        list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
-err3:
+err4:
        ops->destroy(set);
+err3:
+       kfree(set->name);
 err2:
        kvfree(set);
 err1:
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index a848836a5de0..507fd5210c1c 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file 
*file,
        if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
            copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
                goto error;
-       if (get_user(data->owner, &data32->owner) ||
-           get_user(data->type, &data32->type))
+       if (get_user(data->owner, &data32->owner))
                goto error;
        switch (data->type) {
        case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 738e1fe90312..62fbdbe74b93 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2208,6 +2208,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+       SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
        /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
        SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
        {}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 89efec891e68..4d950b7c2f97 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -911,6 +911,14 @@ static void volume_control_quirks(struct 
usb_mixer_elem_info *cval,
                }
                break;
 
+       case USB_ID(0x0d8c, 0x0103):
+               if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
+                       usb_audio_info(chip,
+                                "set volume quirk for CM102-A+/102S+\n");
+                       cval->min = -256;
+               }
+               break;
+
        case USB_ID(0x0471, 0x0101):
        case USB_ID(0x0471, 0x0104):
        case USB_ID(0x0471, 0x0105):
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c 
b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 0b457e8e0f0c..194759ec9e70 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -134,11 +134,15 @@ struct seccomp_data {
 #endif
 
 #ifndef SECCOMP_FILTER_FLAG_TSYNC
-#define SECCOMP_FILTER_FLAG_TSYNC 1
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
 #endif
 
 #ifndef SECCOMP_FILTER_FLAG_LOG
-#define SECCOMP_FILTER_FLAG_LOG 2
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
+#endif
+
+#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
 #endif
 
 #ifndef seccomp
@@ -2063,14 +2067,26 @@ TEST(seccomp_syscall_mode_lock)
 TEST(detect_seccomp_filter_flags)
 {
        unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
-                                SECCOMP_FILTER_FLAG_LOG };
+                                SECCOMP_FILTER_FLAG_LOG,
+                                SECCOMP_FILTER_FLAG_SPEC_ALLOW };
        unsigned int flag, all_flags;
        int i;
        long ret;
 
        /* Test detection of known-good filter flags */
        for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
+               int bits = 0;
+
                flag = flags[i];
+               /* Make sure the flag is a single bit! */
+               while (flag) {
+                       if (flag & 0x1)
+                               bits ++;
+                       flag >>= 1;
+               }
+               ASSERT_EQ(1, bits);
+               flag = flags[i];
+
                ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
                ASSERT_NE(ENOSYS, errno) {
                        TH_LOG("Kernel does not support seccomp syscall!");
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 5a11f4d3972c..d72b8481f250 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -279,8 +279,8 @@ static int update_lpi_config(struct kvm *kvm, struct 
vgic_irq *irq,
        u8 prop;
        int ret;
 
-       ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
-                            &prop, 1);
+       ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+                                 &prop, 1);
 
        if (ret)
                return ret;
@@ -413,8 +413,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
                 * this very same byte in the last iteration. Reuse that.
                 */
                if (byte_offset != last_byte_offset) {
-                       ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
-                                            &pendmask, 1);
+                       ret = kvm_read_guest_lock(vcpu->kvm,
+                                                 pendbase + byte_offset,
+                                                 &pendmask, 1);
                        if (ret) {
                                kfree(intids);
                                return ret;
@@ -740,7 +741,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 
baser, u32 id,
                return false;
 
        /* Each 1st level entry is represented by a 64-bit value. */
-       if (kvm_read_guest(its->dev->kvm,
+       if (kvm_read_guest_lock(its->dev->kvm,
                           BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
                           &indirect_ptr, sizeof(indirect_ptr)))
                return false;
@@ -1297,8 +1298,8 @@ static void vgic_its_process_commands(struct kvm *kvm, 
struct vgic_its *its)
        cbaser = CBASER_ADDRESS(its->cbaser);
 
        while (its->cwriter != its->creadr) {
-               int ret = kvm_read_guest(kvm, cbaser + its->creadr,
-                                        cmd_buf, ITS_CMD_SIZE);
+               int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
+                                             cmd_buf, ITS_CMD_SIZE);
                /*
                 * If kvm_read_guest() fails, this could be due to the guest
                 * programming a bogus value in CBASER or something else going
@@ -1830,7 +1831,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t 
base, int size, int esz,
                int next_offset;
                size_t byte_offset;
 
-               ret = kvm_read_guest(kvm, gpa, entry, esz);
+               ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
                if (ret)
                        return ret;
 
@@ -2191,7 +2192,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, 
gpa_t gpa, int esz)
        int ret;
 
        BUG_ON(esz > sizeof(val));
-       ret = kvm_read_guest(kvm, gpa, &val, esz);
+       ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
        if (ret)
                return ret;
        val = le64_to_cpu(val);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9dcc31600a8b..6b4fcd52f14c 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -297,7 +297,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct 
vgic_irq *irq)
        bit_nr = irq->intid % BITS_PER_BYTE;
        ptr = pendbase + byte_offset;
 
-       ret = kvm_read_guest(kvm, ptr, &val, 1);
+       ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
        if (ret)
                return ret;
 
@@ -350,7 +350,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
                ptr = pendbase + byte_offset;
 
                if (byte_offset != last_byte_offset) {
-                       ret = kvm_read_guest(kvm, ptr, &val, 1);
+                       ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
                        if (ret)
                                return ret;
                        last_byte_offset = byte_offset;

Reply via email to