diff --git a/Documentation/filesystems/affs.txt 
b/Documentation/filesystems/affs.txt
index 71b63c2b9841..a8f1a58e3692 100644
--- a/Documentation/filesystems/affs.txt
+++ b/Documentation/filesystems/affs.txt
@@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as 
follows:
 
   - R maps to r for user, group and others. On directories, R implies x.
 
-  - If both W and D are allowed, w will be set.
+  - W maps to w.
 
   - E maps to x.
 
-  - H and P are always retained and ignored under Linux.
+  - D is ignored.
 
-  - A is always reset when a file is written to.
+  - H, S and P are always retained and ignored under Linux.
+
+  - A is cleared when a file is written to.
 
 User id and group id will be used unless set[gu]id are given as mount
 options. Since most of the Amiga file systems are single user systems
@@ -111,11 +113,13 @@ Linux -> Amiga:
 
 The Linux rwxrwxrwx file mode is handled as follows:
 
-  - r permission will set R for user, group and others.
+  - r permission will allow R for user, group and others.
+
+  - w permission will allow W for user, group and others.
 
-  - w permission will set W and D for user, group and others.
+  - x permission of the user will allow E for plain files.
 
-  - x permission of the user will set E for plain files.
+  - D will be allowed for user, group and others.
 
   - All other flags (suid, sgid, ...) are ignored and will
     not be retained.
diff --git a/Makefile b/Makefile
index 6fa3278df77c..ba9d0b4476e1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 19
-SUBLEVEL = 143
+SUBLEVEL = 144
 EXTRAVERSION =
 NAME = "People's Front"
 
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8b284cbf8162..a3b6f58d188c 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -83,11 +83,12 @@
  * IMO:                Override CPSR.I and enable signaling with VI
  * FMO:                Override CPSR.F and enable signaling with VF
  * SWIO:       Turn set/way invalidates into set/way clean+invalidate
+ * PTW:                Take a stage2 fault if a stage1 walk steps in device 
memory
  */
 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
                         HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
                         HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
-                        HCR_FMO | HCR_IMO)
+                        HCR_FMO | HCR_IMO | HCR_PTW )
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
 #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
 #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 102b5a5c47b6..e3c0dba5bdde 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -87,6 +87,34 @@ extern u32 __init_stage2_translation(void);
                *__hyp_this_cpu_ptr(sym);                               \
         })
 
+#define __KVM_EXTABLE(from, to)                                                
\
+       "       .pushsection    __kvm_ex_table, \"a\"\n"                \
+       "       .align          3\n"                                    \
+       "       .long           (" #from " - .), (" #to " - .)\n"       \
+       "       .popsection\n"
+
+
+#define __kvm_at(at_op, addr)                                          \
+( {                                                                    \
+       int __kvm_at_err = 0;                                           \
+       u64 spsr, elr;                                                  \
+       asm volatile(                                                   \
+       "       mrs     %1, spsr_el2\n"                                 \
+       "       mrs     %2, elr_el2\n"                                  \
+       "1:     at      "at_op", %3\n"                                  \
+       "       isb\n"                                                  \
+       "       b       9f\n"                                           \
+       "2:     msr     spsr_el2, %1\n"                                 \
+       "       msr     elr_el2, %2\n"                                  \
+       "       mov     %w0, %4\n"                                      \
+       "9:\n"                                                          \
+       __KVM_EXTABLE(1b, 2b)                                           \
+       : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
+       : "r" (addr), "i" (-EFAULT));                                   \
+       __kvm_at_err;                                                   \
+} )
+
+
 #else /* __ASSEMBLY__ */
 
 .macro hyp_adr_this_cpu reg, sym, tmp
@@ -111,6 +139,21 @@ extern u32 __init_stage2_translation(void);
        kern_hyp_va     \vcpu
 .endm
 
+/*
+ * KVM extable for unexpected exceptions.
+ * In the same format _asm_extable, but output to a different section so that
+ * it can be mapped to EL2. The KVM version is not sorted. The caller must
+ * ensure:
+ * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
+ * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the 
fixup.
+ */
+.macro _kvm_extable, from, to
+       .pushsection    __kvm_ex_table, "a"
+       .align          3
+       .long           (\from - .), (\to - .)
+       .popsection
+.endm
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index d6050c6e65bc..69e7c8d4a00f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -24,6 +24,13 @@ ENTRY(_text)
 
 jiffies = jiffies_64;
 
+
+#define HYPERVISOR_EXTABLE                                     \
+       . = ALIGN(SZ_8);                                        \
+       __start___kvm_ex_table = .;                             \
+       *(__kvm_ex_table)                                       \
+       __stop___kvm_ex_table = .;
+
 #define HYPERVISOR_TEXT                                        \
        /*                                              \
         * Align to 4 KB so that                        \
@@ -39,6 +46,7 @@ jiffies = jiffies_64;
        __hyp_idmap_text_end = .;                       \
        __hyp_text_start = .;                           \
        *(.hyp.text)                                    \
+       HYPERVISOR_EXTABLE                              \
        __hyp_text_end = .;
 
 #define IDMAP_TEXT                                     \
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fad1e164fe48..fc83e932afbe 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -17,6 +17,7 @@
 
 #include <linux/linkage.h>
 
+#include <asm/alternative.h>
 #include <asm/asm-offsets.h>
 #include <asm/assembler.h>
 #include <asm/fpsimdmacros.h>
@@ -62,6 +63,20 @@ ENTRY(__guest_enter)
        // Store the host regs
        save_callee_saved_regs x1
 
+       // Now the host state is stored if we have a pending RAS SError it must
+       // affect the host. If any asynchronous exception is pending we defer
+       // the guest entry. The DSB isn't necessary before v8.2 as any SError
+       // would be fatal.
+alternative_if ARM64_HAS_RAS_EXTN
+       dsb     nshst
+       isb
+alternative_else_nop_endif
+       mrs     x1, isr_el1
+       cbz     x1,  1f
+       mov     x0, #ARM_EXCEPTION_IRQ
+       ret
+
+1:
        add     x18, x0, #VCPU_CONTEXT
 
        // Restore guest regs x0-x17
@@ -148,18 +163,22 @@ alternative_endif
        // This is our single instruction exception window. A pending
        // SError is guaranteed to occur at the earliest when we unmask
        // it, and at the latest just after the ISB.
-       .global abort_guest_exit_start
 abort_guest_exit_start:
 
        isb
 
-       .global abort_guest_exit_end
 abort_guest_exit_end:
+       msr     daifset, #4     // Mask aborts
+       ret
+
+       _kvm_extable    abort_guest_exit_start, 9997f
+       _kvm_extable    abort_guest_exit_end, 9997f
+9997:
+       msr     daifset, #4     // Mask aborts
+       mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
 
-       // If the exception took place, restore the EL1 exception
-       // context so that we can report some information.
-       // Merge the exception code with the SError pending bit.
-       tbz     x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
+       // restore the EL1 exception context so that we can report some
+       // information. Merge the exception code with the SError pending bit.
        msr     elr_el2, x2
        msr     esr_el2, x3
        msr     spsr_el2, x4
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 24b4fbafe3e4..ea063312bca1 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -26,6 +26,30 @@
 #include <asm/kvm_mmu.h>
 #include <asm/mmu.h>
 
+.macro save_caller_saved_regs_vect
+       /* x0 and x1 were saved in the vector entry */
+       stp     x2, x3,   [sp, #-16]!
+       stp     x4, x5,   [sp, #-16]!
+       stp     x6, x7,   [sp, #-16]!
+       stp     x8, x9,   [sp, #-16]!
+       stp     x10, x11, [sp, #-16]!
+       stp     x12, x13, [sp, #-16]!
+       stp     x14, x15, [sp, #-16]!
+       stp     x16, x17, [sp, #-16]!
+.endm
+
+.macro restore_caller_saved_regs_vect
+       ldp     x16, x17, [sp], #16
+       ldp     x14, x15, [sp], #16
+       ldp     x12, x13, [sp], #16
+       ldp     x10, x11, [sp], #16
+       ldp     x8, x9,   [sp], #16
+       ldp     x6, x7,   [sp], #16
+       ldp     x4, x5,   [sp], #16
+       ldp     x2, x3,   [sp], #16
+       ldp     x0, x1,   [sp], #16
+.endm
+
        .text
        .pushsection    .hyp.text, "ax"
 
@@ -162,28 +186,24 @@ el1_error:
        mov     x0, #ARM_EXCEPTION_EL1_SERROR
        b       __guest_exit
 
+el2_sync:
+       save_caller_saved_regs_vect
+       stp     x29, x30, [sp, #-16]!
+       bl      kvm_unexpected_el2_exception
+       ldp     x29, x30, [sp], #16
+       restore_caller_saved_regs_vect
+
+       eret
+
 el2_error:
-       ldp     x0, x1, [sp], #16
+       save_caller_saved_regs_vect
+       stp     x29, x30, [sp, #-16]!
+
+       bl      kvm_unexpected_el2_exception
+
+       ldp     x29, x30, [sp], #16
+       restore_caller_saved_regs_vect
 
-       /*
-        * Only two possibilities:
-        * 1) Either we come from the exit path, having just unmasked
-        *    PSTATE.A: change the return code to an EL2 fault, and
-        *    carry on, as we're already in a sane state to handle it.
-        * 2) Or we come from anywhere else, and that's a bug: we panic.
-        *
-        * For (1), x0 contains the original return code and x1 doesn't
-        * contain anything meaningful at that stage. We can reuse them
-        * as temp registers.
-        * For (2), who cares?
-        */
-       mrs     x0, elr_el2
-       adr     x1, abort_guest_exit_start
-       cmp     x0, x1
-       adr     x1, abort_guest_exit_end
-       ccmp    x0, x1, #4, ne
-       b.ne    __hyp_panic
-       mov     x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
        eret
 
 ENTRY(__hyp_do_panic)
@@ -212,7 +232,6 @@ ENDPROC(\label)
        invalid_vector  el2t_irq_invalid
        invalid_vector  el2t_fiq_invalid
        invalid_vector  el2t_error_invalid
-       invalid_vector  el2h_sync_invalid
        invalid_vector  el2h_irq_invalid
        invalid_vector  el2h_fiq_invalid
        invalid_vector  el1_fiq_invalid
@@ -240,7 +259,7 @@ ENTRY(__kvm_hyp_vector)
        invalid_vect    el2t_fiq_invalid        // FIQ EL2t
        invalid_vect    el2t_error_invalid      // Error EL2t
 
-       invalid_vect    el2h_sync_invalid       // Synchronous EL2h
+       valid_vect      el2_sync                // Synchronous EL2h
        invalid_vect    el2h_irq_invalid        // IRQ EL2h
        invalid_vect    el2h_fiq_invalid        // FIQ EL2h
        valid_vect      el2_error               // Error EL2h
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 3cdefd84af54..f146bff53edf 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -23,6 +23,7 @@
 #include <kvm/arm_psci.h>
 
 #include <asm/cpufeature.h>
+#include <asm/extable.h>
 #include <asm/kprobes.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
@@ -34,6 +35,9 @@
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 
+extern struct exception_table_entry __start___kvm_ex_table;
+extern struct exception_table_entry __stop___kvm_ex_table;
+
 /* Check whether the FP regs were dirtied while in the host-side run loop: */
 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
 {
@@ -264,10 +268,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, 
u64 *hpfar)
         * saved the guest context yet, and we may return early...
         */
        par = read_sysreg(par_el1);
-       asm volatile("at s1e1r, %0" : : "r" (far));
-       isb();
-
-       tmp = read_sysreg(par_el1);
+       if (!__kvm_at("s1e1r", far))
+               tmp = read_sysreg(par_el1);
+       else
+               tmp = 1; /* back to the guest */
        write_sysreg(par, par_el1);
 
        if (unlikely(tmp & 1))
@@ -663,3 +667,30 @@ void __hyp_text __noreturn hyp_panic(struct 
kvm_cpu_context *host_ctxt)
 
        unreachable();
 }
+
+asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
+{
+       unsigned long addr, fixup;
+       struct kvm_cpu_context *host_ctxt;
+       struct exception_table_entry *entry, *end;
+       unsigned long elr_el2 = read_sysreg(elr_el2);
+
+       entry = hyp_symbol_addr(__start___kvm_ex_table);
+       end = hyp_symbol_addr(__stop___kvm_ex_table);
+       host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
+
+       while (entry < end) {
+               addr = (unsigned long)&entry->insn + entry->insn;
+               fixup = (unsigned long)&entry->fixup + entry->fixup;
+
+               if (addr != elr_el2) {
+                       entry++;
+                       continue;
+               }
+
+               write_sysreg(fixup, elr_el2);
+               return;
+       }
+
+       hyp_panic(host_ctxt);
+}
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 5ec546b5eed1..d16e6654a655 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct 
*idle)
  */
 static void bmips_init_secondary(void)
 {
+       bmips_cpu_setup();
+
        switch (current_cpu_type()) {
        case CPU_BMIPS4350:
        case CPU_BMIPS4380:
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 05a539d3a597..7650edd5cf7f 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1789,7 +1789,11 @@ static void setup_scache(void)
                                printk("MIPS secondary cache %ldkB, %s, 
linesize %d bytes.\n",
                                       scache_size >> 10,
                                       way_string[c->scache.ways], 
c->scache.linesz);
+
+                               if (current_cpu_type() == CPU_BMIPS5000)
+                                       c->options |= MIPS_CPU_INCLUSIVE_CACHES;
                        }
+
 #else
                        if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
                                panic("Dunno how to handle MIPS32 / MIPS64 
second level cache");
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 0095ddb58ff6..50f6661ba566 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -29,7 +29,7 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ old__, new__, prev__;                                \
        pcp_op_T__ *ptr__;                                              \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        prev__ = *ptr__;                                                \
        do {                                                            \
@@ -37,7 +37,7 @@
                new__ = old__ op (val);                                 \
                prev__ = cmpxchg(ptr__, old__, new__);                  \
        } while (prev__ != old__);                                      \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        new__;                                                          \
 })
 
@@ -68,7 +68,7 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ val__ = (val);                                       \
        pcp_op_T__ old__, *ptr__;                                       \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                            \
        if (__builtin_constant_p(val__) &&                              \
            ((szcast)val__ > -129) && ((szcast)val__ < 128)) {          \
@@ -84,7 +84,7 @@
                        : [val__] "d" (val__)                           \
                        : "cc");                                        \
        }                                                               \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 }
 
 #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
@@ -95,14 +95,14 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ val__ = (val);                                       \
        pcp_op_T__ old__, *ptr__;                                       \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        asm volatile(                                                   \
                op "    %[old__],%[val__],%[ptr__]\n"                   \
                : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
                : [val__] "d" (val__)                                   \
                : "cc");                                                \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                               
\
        old__ + val__;                                                  \
 })
 
@@ -114,14 +114,14 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ val__ = (val);                                       \
        pcp_op_T__ old__, *ptr__;                                       \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        asm volatile(                                                   \
                op "    %[old__],%[val__],%[ptr__]\n"                   \
                : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
                : [val__] "d" (val__)                                   \
                : "cc");                                                \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
 }
 
 #define this_cpu_and_4(pcp, val)       arch_this_cpu_to_op(pcp, val, "lan")
@@ -136,10 +136,10 @@
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ ret__;                                               \
        pcp_op_T__ *ptr__;                                              \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        ret__ = cmpxchg(ptr__, oval, nval);                             \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        ret__;                                                          \
 })
 
@@ -152,10 +152,10 @@
 ({                                                                     \
        typeof(pcp) *ptr__;                                             \
        typeof(pcp) ret__;                                              \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
        ret__ = xchg(ptr__, nval);                                      \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        ret__;                                                          \
 })
 
@@ -171,11 +171,11 @@
        typeof(pcp1) *p1__;                                             \
        typeof(pcp2) *p2__;                                             \
        int ret__;                                                      \
-       preempt_disable();                                              \
+       preempt_disable_notrace();                                      \
        p1__ = raw_cpu_ptr(&(pcp1));                                    \
        p2__ = raw_cpu_ptr(&(pcp2));                                    \
        ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__);   \
-       preempt_enable();                                               \
+       preempt_enable_notrace();                                       \
        ret__;                                                          \
 })
 
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index d71d72cf6c66..4686757a74d7 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -322,7 +322,7 @@ static int __init split_nodes_size_interleave(struct 
numa_meminfo *ei,
                                              u64 addr, u64 max_addr, u64 size)
 {
        return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
-                       0, NULL, NUMA_NO_NODE);
+                       0, NULL, 0);
 }
 
 int __init setup_emu2phys_nid(int *dfl_phys_nid)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 6b372fa58382..fead7243930c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4492,9 +4492,8 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
        /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
        { "C300-CTFDDAC128MAG", "0001",         ATA_HORKAGE_NONCQ, },
 
-       /* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
-          SD7SN6S256G and SD8SN8U256G */
-       { "SanDisk SD[78]SN*G", NULL,           ATA_HORKAGE_NONCQ, },
+       /* Sandisk SD7/8/9s lock up hard on large trims */
+       { "SanDisk SD[789]*",   NULL,           ATA_HORKAGE_MAX_TRIM_128M, },
 
        /* devices which puke on READ_NATIVE_MAX */
        { "HDS724040KLSA80",    "KFAOA20N",     ATA_HORKAGE_BROKEN_HPA, },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6c2c2b07f029..e7af41d95490 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2391,6 +2391,7 @@ static unsigned int ata_scsiop_inq_89(struct 
ata_scsi_args *args, u8 *rbuf)
 
 static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
 {
+       struct ata_device *dev = args->dev;
        u16 min_io_sectors;
 
        rbuf[1] = 0xb0;
@@ -2416,7 +2417,12 @@ static unsigned int ata_scsiop_inq_b0(struct 
ata_scsi_args *args, u8 *rbuf)
         * with the unmap bit set.
         */
        if (ata_id_has_trim(args->id)) {
-               put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
+               u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
+
+               if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+                       max_blocks = 128 << (20 - SECTOR_SHIFT);
+
+               put_unaligned_be64(max_blocks, &rbuf[36]);
                put_unaligned_be32(1, &rbuf[28]);
        }
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6df894d65d9e..2d182dc1b49e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -148,7 +148,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
         */
        stop_critical_timings();
        drv->states[index].enter_s2idle(dev, drv, index);
-       WARN_ON(!irqs_disabled());
+       if (WARN_ON_ONCE(!irqs_disabled()))
+               local_irq_disable();
        /*
         * timekeeping_resume() that will be called by tick_unfreeze() for the
         * first CPU executing it calls functions containing RCU read-side
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index dbc51154f122..86427f6ba78c 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1677,6 +1677,8 @@ static struct dma_chan *at_dma_xlate(struct 
of_phandle_args *dma_spec,
                return NULL;
 
        dmac_pdev = of_find_device_by_node(dma_spec->np);
+       if (!dmac_pdev)
+               return NULL;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 91fd395c90c4..8344a60c2131 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct 
of_phandle_args *dma_spec,
                return NULL;
 
        chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
-       if (chan) {
-               chan->router = ofdma->dma_router;
-               chan->route_data = route_data;
-       } else {
+       if (IS_ERR_OR_NULL(chan)) {
                ofdma->dma_router->route_free(ofdma->dma_router->dev,
                                              route_data);
+       } else {
+               chan->router = ofdma->dma_router;
+               chan->route_data = route_data;
        }
 
        /*
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bc8050c025b7..c564df713efc 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2769,6 +2769,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t 
dst,
        while (burst != (1 << desc->rqcfg.brst_size))
                desc->rqcfg.brst_size++;
 
+       desc->rqcfg.brst_len = get_burst_len(desc, len);
        /*
         * If burst size is smaller than bus width then make sure we only
         * transfer one at a time to avoid a burst stradling an MFIFO entry.
@@ -2776,7 +2777,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t 
dst,
        if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
                desc->rqcfg.brst_len = 1;
 
-       desc->rqcfg.brst_len = get_burst_len(desc, len);
        desc->bytes_requested = len;
 
        desc->txd.flags = flags;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 9cde79a7335c..739ca9c2081a 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -117,12 +117,22 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
 {
        int ret;
        u32 val;
+       u32 mask, reset_val;
+
+       val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
+       if (val <= 0x20010004) {
+               mask = 0xffffffff;
+               reset_val = 0xbabeface;
+       } else {
+               mask = 0x1ff;
+               reset_val = 0x100;
+       }
 
        gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
        gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
 
        ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
-               val == 0xbabeface, 100, 10000);
+               (val & mask) == reset_val, 100, 10000);
 
        if (ret)
                dev_err(gmu->dev, "GMU firmware initialization timed out\n");
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6f81de85fb86..7f45486b6650 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1358,6 +1358,13 @@ static int msm_pdev_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+       struct drm_device *drm = platform_get_drvdata(pdev);
+
+       drm_atomic_helper_shutdown(drm);
+}
+
 static const struct of_device_id dt_match[] = {
        { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
        { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
@@ -1369,6 +1376,7 @@ MODULE_DEVICE_TABLE(of, dt_match);
 static struct platform_driver msm_platform_driver = {
        .probe      = msm_pdev_probe,
        .remove     = msm_pdev_remove,
+       .shutdown   = msm_pdev_shutdown,
        .driver     = {
                .name   = "msm",
                .of_match_table = dt_match,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2c85d075daee..05122167d9d8 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1425,6 +1425,17 @@ static void hid_output_field(const struct hid_device 
*hid,
        }
 }
 
+/*
+ * Compute the size of a report.
+ */
+static size_t hid_compute_report_size(struct hid_report *report)
+{
+       if (report->size)
+               return ((report->size - 1) >> 3) + 1;
+
+       return 0;
+}
+
 /*
  * Create a report. 'data' has to be allocated using
  * hid_alloc_report_buf() so that it has proper size.
@@ -1437,7 +1448,7 @@ void hid_output_report(struct hid_report *report, __u8 
*data)
        if (report->id > 0)
                *data++ = report->id;
 
-       memset(data, 0, ((report->size - 1) >> 3) + 1);
+       memset(data, 0, hid_compute_report_size(report));
        for (n = 0; n < report->maxfield; n++)
                hid_output_field(report->device, report->field[n], data);
 }
@@ -1564,7 +1575,7 @@ int hid_report_raw_event(struct hid_device *hid, int 
type, u8 *data, u32 size,
                csize--;
        }
 
-       rsize = ((report->size - 1) >> 3) + 1;
+       rsize = hid_compute_report_size(report);
 
        if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
                rsize = HID_MAX_BUFFER_SIZE - 1;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 51bfe23d00bc..a9da1526c40a 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1125,6 +1125,10 @@ static void hidinput_configure_usage(struct hid_input 
*hidinput, struct hid_fiel
        }
 
 mapped:
+       /* Mapping failed, bail out */
+       if (!bit)
+               return;
+
        if (device->driver->input_mapped &&
            device->driver->input_mapped(device, hidinput, field, usage,
                                         &bit, &max) < 0) {
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 8baf10beb1d5..ccda72f748ee 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -841,6 +841,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, 
struct hid_input *hi,
                        code = BTN_0  + ((usage->hid - 1) & HID_USAGE);
 
                hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+               if (!*bit)
+                       return -1;
                input_set_capability(hi->input, EV_KEY, code);
                return 1;
 
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 5c677ba44014..b201129a9bea 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -760,15 +760,18 @@ static ssize_t applesmc_light_show(struct device *dev,
        }
 
        ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
+       if (ret)
+               goto out;
        /* newer macbooks report a single 10-bit bigendian value */
        if (data_length == 10) {
                left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
                goto out;
        }
        left = buffer[2];
+
+       ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
        if (ret)
                goto out;
-       ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length);
        right = buffer[2];
 
 out:
@@ -817,12 +820,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
                  to_index(attr));
 
        ret = applesmc_read_key(newkey, buffer, 2);
-       speed = ((buffer[0] << 8 | buffer[1]) >> 2);
-
        if (ret)
                return ret;
-       else
-               return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
+
+       speed = ((buffer[0] << 8 | buffer[1]) >> 2);
+       return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed);
 }
 
 static ssize_t applesmc_store_fan_speed(struct device *dev,
@@ -858,12 +860,11 @@ static ssize_t applesmc_show_fan_manual(struct device 
*dev,
        u8 buffer[2];
 
        ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
-       manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
-
        if (ret)
                return ret;
-       else
-               return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
+
+       manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01;
+       return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual);
 }
 
 static ssize_t applesmc_store_fan_manual(struct device *dev,
@@ -879,10 +880,11 @@ static ssize_t applesmc_store_fan_manual(struct device 
*dev,
                return -EINVAL;
 
        ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
-       val = (buffer[0] << 8 | buffer[1]);
        if (ret)
                goto out;
 
+       val = (buffer[0] << 8 | buffer[1]);
+
        if (input)
                val = val | (0x01 << to_index(attr));
        else
@@ -958,13 +960,12 @@ static ssize_t applesmc_key_count_show(struct device *dev,
        u32 count;
 
        ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4);
-       count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
-                                               ((u32)buffer[2]<<8) + buffer[3];
-
        if (ret)
                return ret;
-       else
-               return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
+
+       count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) +
+                                               ((u32)buffer[2]<<8) + buffer[3];
+       return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count);
 }
 
 static ssize_t applesmc_key_at_index_read_show(struct device *dev,
diff --git a/drivers/iommu/intel_irq_remapping.c 
b/drivers/iommu/intel_irq_remapping.c
index 15a4ad31c510..9d2d03545bb0 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -479,12 +479,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu 
*iommu)
 
        /* Enable interrupt-remapping */
        iommu->gcmd |= DMA_GCMD_IRE;
-       iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */
        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
                      readl, (sts & DMA_GSTS_IRES), sts);
 
+       /* Block compatibility-format MSIs */
+       if (sts & DMA_GSTS_CFIS) {
+               iommu->gcmd &= ~DMA_GCMD_CFI;
+               writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+               IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+                             readl, !(sts & DMA_GSTS_CFIS), sts);
+       }
+
        /*
         * With CFI clear in the Global Command register, we should be
         * protected from dangerous (i.e. compatibility) interrupts
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 151aa95775be..af6d4f898e4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct 
dm_cache_metadata *cmd,
                                          CACHE_MAX_CONCURRENT_LOCKS);
        if (IS_ERR(cmd->bm)) {
                DMERR("could not create block manager");
-               return PTR_ERR(cmd->bm);
+               r = PTR_ERR(cmd->bm);
+               cmd->bm = NULL;
+               return r;
        }
 
        r = __open_or_format_metadata(cmd, may_format_device);
-       if (r)
+       if (r) {
                dm_block_manager_destroy(cmd->bm);
+               cmd->bm = NULL;
+       }
 
        return r;
 }
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6a26afcc1fd6..85077f4d257a 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -698,12 +698,16 @@ static int __create_persistent_data_objects(struct 
dm_pool_metadata *pmd, bool f
                                          THIN_MAX_CONCURRENT_LOCKS);
        if (IS_ERR(pmd->bm)) {
                DMERR("could not create block manager");
-               return PTR_ERR(pmd->bm);
+               r = PTR_ERR(pmd->bm);
+               pmd->bm = NULL;
+               return r;
        }
 
        r = __open_or_format_metadata(pmd, format_device);
-       if (r)
+       if (r) {
                dm_block_manager_destroy(pmd->bm);
+               pmd->bm = NULL;
+       }
 
        return r;
 }
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index cc028353f9d5..776aaf5951e4 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -226,6 +226,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        pfn_t pfn;
        int id;
        struct page **pages;
+       sector_t offset;
 
        wc->memory_vmapped = false;
 
@@ -244,9 +245,16 @@ static int persistent_memory_claim(struct dm_writecache 
*wc)
                goto err1;
        }
 
+       offset = get_start_sect(wc->ssd_dev->bdev);
+       if (offset & (PAGE_SIZE / 512 - 1)) {
+               r = -EINVAL;
+               goto err1;
+       }
+       offset >>= PAGE_SHIFT - 9;
+
        id = dax_read_lock();
 
-       da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, 
&pfn);
+       da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, 
&wc->memory_map, &pfn);
        if (da < 0) {
                wc->memory_map = NULL;
                r = da;
@@ -268,7 +276,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                i = 0;
                do {
                        long daa;
-                       daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
+                       daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + 
i, p - i,
                                                NULL, &pfn);
                        if (daa <= 0) {
                                r = daa ? daa : -EINVAL;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index c30affbd43a9..cf3df733d960 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1245,6 +1245,10 @@ static ssize_t store_protocols(struct device *device,
        }
 
        mutex_lock(&dev->lock);
+       if (!dev->registered) {
+               mutex_unlock(&dev->lock);
+               return -ENODEV;
+       }
 
        old_protocols = *current_protocols;
        new_protocols = old_protocols;
@@ -1383,6 +1387,10 @@ static ssize_t store_filter(struct device *device,
                return -EINVAL;
 
        mutex_lock(&dev->lock);
+       if (!dev->registered) {
+               mutex_unlock(&dev->lock);
+               return -ENODEV;
+       }
 
        new_filter = *filter;
        if (fattr->mask)
@@ -1497,6 +1505,10 @@ static ssize_t store_wakeup_protocols(struct device 
*device,
        int i;
 
        mutex_lock(&dev->lock);
+       if (!dev->registered) {
+               mutex_unlock(&dev->lock);
+               return -ENODEV;
+       }
 
        allowed = dev->allowed_wakeup_protocols;
 
@@ -1556,25 +1568,25 @@ static void rc_dev_release(struct device *device)
        kfree(dev);
 }
 
-#define ADD_HOTPLUG_VAR(fmt, val...)                                   \
-       do {                                                            \
-               int err = add_uevent_var(env, fmt, val);                \
-               if (err)                                                \
-                       return err;                                     \
-       } while (0)
-
 static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
 {
        struct rc_dev *dev = to_rc_dev(device);
+       int ret = 0;
 
-       if (dev->rc_map.name)
-               ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
-       if (dev->driver_name)
-               ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
-       if (dev->device_name)
-               ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
+       mutex_lock(&dev->lock);
 
-       return 0;
+       if (!dev->registered)
+               ret = -ENODEV;
+       if (ret == 0 && dev->rc_map.name)
+               ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name);
+       if (ret == 0 && dev->driver_name)
+               ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name);
+       if (ret == 0 && dev->device_name)
+               ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name);
+
+       mutex_unlock(&dev->lock);
+
+       return ret;
 }
 
 /*
@@ -1958,14 +1970,14 @@ void rc_unregister_device(struct rc_dev *dev)
        del_timer_sync(&dev->timer_keyup);
        del_timer_sync(&dev->timer_repeat);
 
-       rc_free_rx_device(dev);
-
        mutex_lock(&dev->lock);
        if (dev->users && dev->close)
                dev->close(dev);
        dev->registered = false;
        mutex_unlock(&dev->lock);
 
+       rc_free_rx_device(dev);
+
        /*
         * lirc device should be freed with dev->registered = false, so
         * that userspace polling will get notified.
diff --git a/drivers/net/ethernet/arc/emac_mdio.c 
b/drivers/net/ethernet/arc/emac_mdio.c
index 0187dbf3b87d..54cdafdd067d 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
        if (IS_ERR(data->reset_gpio)) {
                error = PTR_ERR(data->reset_gpio);
                dev_err(priv->dev, "Failed to request gpio: %d\n", error);
+               mdiobus_free(bus);
                return error;
        }
 
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c 
b/drivers/net/ethernet/broadcom/bcmsysport.c
index 6b761f6b8fd5..9a614c5cdfa2 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2441,8 +2441,10 @@ static int bcm_sysport_probe(struct platform_device 
*pdev)
        priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
                                      sizeof(struct bcm_sysport_tx_ring),
                                      GFP_KERNEL);
-       if (!priv->tx_rings)
-               return -ENOMEM;
+       if (!priv->tx_rings) {
+               ret = -ENOMEM;
+               goto err_free_netdev;
+       }
 
        priv->is_lite = params->is_lite;
        priv->num_rx_desc_words = params->num_rx_desc_words;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index ab4d1dacb585..a267380b267d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6836,16 +6836,19 @@ static ssize_t bnxt_show_temp(struct device *dev,
        struct hwrm_temp_monitor_query_input req = {0};
        struct hwrm_temp_monitor_query_output *resp;
        struct bnxt *bp = dev_get_drvdata(dev);
-       u32 temp = 0;
+       u32 len = 0;
 
        resp = bp->hwrm_cmd_resp_addr;
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
        mutex_lock(&bp->hwrm_cmd_lock);
-       if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
-               temp = resp->temp * 1000; /* display millidegree */
+       if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+               len = sprintf(buf, "%u\n", resp->temp * 1000); /* display 
millidegree */
        mutex_unlock(&bp->hwrm_cmd_lock);
 
-       return sprintf(buf, "%u\n", temp);
+       if (len)
+               return len;
+
+       return sprintf(buf, "unknown\n");
 }
 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
 
@@ -7024,15 +7027,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool 
irq_re_init, bool link_re_init)
                }
        }
 
-       bnxt_enable_napi(bp);
-       bnxt_debug_dev_init(bp);
-
        rc = bnxt_init_nic(bp, irq_re_init);
        if (rc) {
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
-               goto open_err;
+               goto open_err_irq;
        }
 
+       bnxt_enable_napi(bp);
+       bnxt_debug_dev_init(bp);
+
        if (link_re_init) {
                mutex_lock(&bp->link_lock);
                rc = bnxt_update_phy_setting(bp);
@@ -7063,10 +7066,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool 
irq_re_init, bool link_re_init)
                bnxt_vf_reps_open(bp);
        return 0;
 
-open_err:
-       bnxt_debug_dev_exit(bp);
-       bnxt_disable_napi(bp);
-
 open_err_irq:
        bnxt_del_napi(bp);
 
@@ -9128,6 +9127,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const 
struct pci_device_id *ent)
                    (long)pci_resource_start(pdev, 0), dev->dev_addr);
        pcie_print_link_status(pdev);
 
+       pci_save_state(pdev);
        return 0;
 
 init_err_cleanup_tc:
@@ -9289,6 +9289,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev 
*pdev)
                        "Cannot re-enable PCI device after reset.\n");
        } else {
                pci_set_master(pdev);
+               pci_restore_state(pdev);
+               pci_save_state(pdev);
 
                err = bnxt_hwrm_func_reset(bp);
                if (!err && netif_running(netdev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 63730e449e08..a1cb99110092 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -471,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev,
        int max_tx_sch_inputs;
 
        /* Get the most up-to-date max_tx_sch_inputs. */
-       if (BNXT_NEW_RM(bp))
+       if (netif_running(dev) && BNXT_NEW_RM(bp))
                bnxt_hwrm_func_resc_qcaps(bp, false);
        max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
 
@@ -1877,6 +1877,9 @@ static int bnxt_get_nvram_directory(struct net_device 
*dev, u32 len, u8 *data)
        if (rc != 0)
                return rc;
 
+       if (!dir_entries || !entry_length)
+               return -EIO;
+
        /* Insert 2 bytes of directory info (count and size of entries) */
        if (len < 2)
                return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/tg3.c 
b/drivers/net/ethernet/broadcom/tg3.c
index be845df05039..6fcf9646d141 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7219,8 +7219,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
 
 static inline void tg3_reset_task_cancel(struct tg3 *tp)
 {
-       cancel_work_sync(&tp->reset_task);
-       tg3_flag_clear(tp, RESET_TASK_PENDING);
+       if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+               cancel_work_sync(&tp->reset_task);
        tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 }
 
@@ -11213,18 +11213,27 @@ static void tg3_reset_task(struct work_struct *work)
 
        tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
        err = tg3_init_hw(tp, true);
-       if (err)
+       if (err) {
+               tg3_full_unlock(tp);
+               tp->irq_sync = 0;
+               tg3_napi_enable(tp);
+               /* Clear this flag so that tg3_reset_task_cancel() will not
+                * call cancel_work_sync() and wait forever.
+                */
+               tg3_flag_clear(tp, RESET_TASK_PENDING);
+               dev_close(tp->dev);
                goto out;
+       }
 
        tg3_netif_start(tp);
 
-out:
        tg3_full_unlock(tp);
 
        if (!err)
                tg3_phy_start(tp);
 
        tg3_flag_clear(tp, RESET_TASK_PENDING);
+out:
        rtnl_unlock();
 }
 
diff --git a/drivers/net/ethernet/cortina/gemini.c 
b/drivers/net/ethernet/cortina/gemini.c
index 16de0fa92ab7..5242687060b4 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2451,8 +2451,8 @@ static int gemini_ethernet_port_probe(struct 
platform_device *pdev)
        port->reset = devm_reset_control_get_exclusive(dev, NULL);
        if (IS_ERR(port->reset)) {
                dev_err(dev, "no reset\n");
-               clk_disable_unprepare(port->pclk);
-               return PTR_ERR(port->reset);
+               ret = PTR_ERR(port->reset);
+               goto unprepare;
        }
        reset_control_reset(port->reset);
        usleep_range(100, 500);
@@ -2507,25 +2507,25 @@ static int gemini_ethernet_port_probe(struct 
platform_device *pdev)
                                        IRQF_SHARED,
                                        port_names[port->id],
                                        port);
-       if (ret) {
-               clk_disable_unprepare(port->pclk);
-               return ret;
-       }
+       if (ret)
+               goto unprepare;
 
        ret = register_netdev(netdev);
-       if (!ret) {
+       if (ret)
+               goto unprepare;
+
+       netdev_info(netdev,
+                   "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+                   port->irq, &dmares->start,
+                   &gmacres->start);
+       ret = gmac_setup_phy(netdev);
+       if (ret)
                netdev_info(netdev,
-                           "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
-                           port->irq, &dmares->start,
-                           &gmacres->start);
-               ret = gmac_setup_phy(netdev);
-               if (ret)
-                       netdev_info(netdev,
-                                   "PHY init failed, deferring to ifup 
time\n");
-               return 0;
-       }
+                           "PHY init failed, deferring to ifup time\n");
+       return 0;
 
-       port->netdev = NULL;
+unprepare:
+       clk_disable_unprepare(port->pclk);
        return ret;
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c 
b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 024b08fafd3b..4de65a9de0a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2297,8 +2297,10 @@ static int hns_nic_dev_probe(struct platform_device 
*pdev)
                        priv->enet_ver = AE_VERSION_1;
                else if (acpi_dev_found(hns_enet_acpi_match[1].id))
                        priv->enet_ver = AE_VERSION_2;
-               else
-                       return -ENXIO;
+               else {
+                       ret = -ENXIO;
+                       goto out_read_prop_fail;
+               }
 
                /* try to find port-idx-in-ae first */
                ret = acpi_node_get_property_reference(dev->fwnode,
@@ -2314,7 +2316,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
                priv->fwnode = args.fwnode;
        } else {
                dev_err(dev, "cannot read cfg data from OF or acpi\n");
-               return -ENXIO;
+               ret = -ENXIO;
+               goto out_read_prop_fail;
        }
 
        ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c 
b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 1a11bc0e1612..cfa0bba3940f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int 
max_order)
                goto err_out;
 
        for (i = 0; i <= buddy->max_order; ++i) {
-               s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+               s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
                buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | 
__GFP_ZERO);
                if (!buddy->bits[i])
                        goto err_out_free;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c 
b/drivers/net/ethernet/renesas/ravb_main.c
index 569e698b5c80..b5066cf86c85 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1337,6 +1337,51 @@ static inline int ravb_hook_irq(unsigned int irq, 
irq_handler_t handler,
        return error;
 }
 
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+       struct platform_device *pdev = priv->pdev;
+       struct device *dev = &pdev->dev;
+       int error;
+
+       /* Bitbang init */
+       priv->mdiobb.ops = &bb_ops;
+
+       /* MII controller setting */
+       priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+       if (!priv->mii_bus)
+               return -ENOMEM;
+
+       /* Hook up MII support for ethtool */
+       priv->mii_bus->name = "ravb_mii";
+       priv->mii_bus->parent = dev;
+       snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+                pdev->name, pdev->id);
+
+       /* Register MDIO bus */
+       error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+       if (error)
+               goto out_free_bus;
+
+       return 0;
+
+out_free_bus:
+       free_mdio_bitbang(priv->mii_bus);
+       return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+       /* Unregister mdio bus */
+       mdiobus_unregister(priv->mii_bus);
+
+       /* Free bitbang info */
+       free_mdio_bitbang(priv->mii_bus);
+
+       return 0;
+}
+
 /* Network device open function for Ethernet AVB */
 static int ravb_open(struct net_device *ndev)
 {
@@ -1345,6 +1390,13 @@ static int ravb_open(struct net_device *ndev)
        struct device *dev = &pdev->dev;
        int error;
 
+       /* MDIO bus init */
+       error = ravb_mdio_init(priv);
+       if (error) {
+               netdev_err(ndev, "failed to initialize MDIO\n");
+               return error;
+       }
+
        napi_enable(&priv->napi[RAVB_BE]);
        napi_enable(&priv->napi[RAVB_NC]);
 
@@ -1422,6 +1474,7 @@ static int ravb_open(struct net_device *ndev)
 out_napi_off:
        napi_disable(&priv->napi[RAVB_NC]);
        napi_disable(&priv->napi[RAVB_BE]);
+       ravb_mdio_release(priv);
        return error;
 }
 
@@ -1721,6 +1774,8 @@ static int ravb_close(struct net_device *ndev)
        ravb_ring_free(ndev, RAVB_BE);
        ravb_ring_free(ndev, RAVB_NC);
 
+       ravb_mdio_release(priv);
+
        return 0;
 }
 
@@ -1867,51 +1922,6 @@ static const struct net_device_ops ravb_netdev_ops = {
        .ndo_set_features       = ravb_set_features,
 };
 
-/* MDIO bus init function */
-static int ravb_mdio_init(struct ravb_private *priv)
-{
-       struct platform_device *pdev = priv->pdev;
-       struct device *dev = &pdev->dev;
-       int error;
-
-       /* Bitbang init */
-       priv->mdiobb.ops = &bb_ops;
-
-       /* MII controller setting */
-       priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
-       if (!priv->mii_bus)
-               return -ENOMEM;
-
-       /* Hook up MII support for ethtool */
-       priv->mii_bus->name = "ravb_mii";
-       priv->mii_bus->parent = dev;
-       snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-                pdev->name, pdev->id);
-
-       /* Register MDIO bus */
-       error = of_mdiobus_register(priv->mii_bus, dev->of_node);
-       if (error)
-               goto out_free_bus;
-
-       return 0;
-
-out_free_bus:
-       free_mdio_bitbang(priv->mii_bus);
-       return error;
-}
-
-/* MDIO bus release function */
-static int ravb_mdio_release(struct ravb_private *priv)
-{
-       /* Unregister mdio bus */
-       mdiobus_unregister(priv->mii_bus);
-
-       /* Free bitbang info */
-       free_mdio_bitbang(priv->mii_bus);
-
-       return 0;
-}
-
 static const struct of_device_id ravb_match_table[] = {
        { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
        { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
@@ -2138,13 +2148,6 @@ static int ravb_probe(struct platform_device *pdev)
                eth_hw_addr_random(ndev);
        }
 
-       /* MDIO bus init */
-       error = ravb_mdio_init(priv);
-       if (error) {
-               dev_err(&pdev->dev, "failed to initialize MDIO\n");
-               goto out_dma_free;
-       }
-
        netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
        netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
 
@@ -2166,8 +2169,6 @@ static int ravb_probe(struct platform_device *pdev)
 out_napi_del:
        netif_napi_del(&priv->napi[RAVB_NC]);
        netif_napi_del(&priv->napi[RAVB_BE]);
-       ravb_mdio_release(priv);
-out_dma_free:
        dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
                          priv->desc_bat_dma);
 
@@ -2199,7 +2200,6 @@ static int ravb_remove(struct platform_device *pdev)
        unregister_netdev(ndev);
        netif_napi_del(&priv->napi[RAVB_NC]);
        netif_napi_del(&priv->napi[RAVB_BE]);
-       ravb_mdio_release(priv);
        pm_runtime_disable(&pdev->dev);
        free_netdev(ndev);
        platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index d73850ebb671..f2fecb684220 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1187,6 +1187,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 
snd_portid, u32 snd_seq,
                goto nlmsg_failure;
 
        if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+           nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
            nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
            nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
                goto nla_put_failure;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 023b8d0bf175..8d27786acad9 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -309,7 +309,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
 
        netdev_dbg(dev->net, "asix_get_phy_addr()\n");
 
-       if (ret < 0) {
+       if (ret < 2) {
                netdev_err(dev->net, "Error reading PHYID register: %02x\n", 
ret);
                goto out;
        }
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index ea3c89118614..af58bf54aa9b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1227,6 +1227,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2001, 0x7e16, 3)},    /* D-Link DWM-221 */
        {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
        {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
+       {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)},    /* D-Link DWM-222 A2 */
        {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
        {QMI_FIXED_INTF(0x2020, 0x2060, 4)},    /* BroadMobi BM818 */
@@ -1262,6 +1263,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 776b7e9e23b9..f28df233dfcd 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -307,6 +307,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
 
 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
 {
+       if (unlikely(ctrl->kato == 0))
+               return;
+
        pr_debug("ctrl %d start keep-alive timer for %d secs\n",
                ctrl->cntlid, ctrl->kato);
 
@@ -316,6 +319,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl 
*ctrl)
 
 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
 {
+       if (unlikely(ctrl->kato == 0))
+               return;
+
        pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
 
        cancel_delayed_work_sync(&ctrl->ka_work);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 29b4b236afd8..77e4d184bc99 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1986,9 +1986,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
                        return;
                if (fcpreq->fcp_error ||
                    fcpreq->transferred_length != fcpreq->transfer_length) {
-                       spin_lock(&fod->flock);
+                       spin_lock_irqsave(&fod->flock, flags);
                        fod->abort = true;
-                       spin_unlock(&fod->flock);
+                       spin_unlock_irqrestore(&fod->flock, flags);
 
                        nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
                        return;
diff --git a/drivers/target/target_core_user.c 
b/drivers/target/target_core_user.c
index 91dbac7446a4..99314e516244 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -687,8 +687,10 @@ static void scatter_data_area(struct tcmu_dev *udev,
                from = kmap_atomic(sg_page(sg)) + sg->offset;
                while (sg_remaining > 0) {
                        if (block_remaining == 0) {
-                               if (to)
+                               if (to) {
+                                       flush_dcache_page(page);
                                        kunmap_atomic(to);
+                               }
 
                                block_remaining = DATA_BLOCK_SIZE;
                                dbi = tcmu_cmd_get_dbi(tcmu_cmd);
@@ -733,7 +735,6 @@ static void scatter_data_area(struct tcmu_dev *udev,
                                memcpy(to + offset,
                                       from + sg->length - sg_remaining,
                                       copy_bytes);
-                               tcmu_flush_dcache_range(to, copy_bytes);
                        }
 
                        sg_remaining -= copy_bytes;
@@ -742,8 +743,10 @@ static void scatter_data_area(struct tcmu_dev *udev,
                kunmap_atomic(from - sg->offset);
        }
 
-       if (to)
+       if (to) {
+               flush_dcache_page(page);
                kunmap_atomic(to);
+       }
 }
 
 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
@@ -789,13 +792,13 @@ static void gather_data_area(struct tcmu_dev *udev, 
struct tcmu_cmd *cmd,
                                dbi = tcmu_cmd_get_dbi(cmd);
                                page = tcmu_get_block_page(udev, dbi);
                                from = kmap_atomic(page);
+                               flush_dcache_page(page);
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
                        if (read_len < copy_bytes)
                                copy_bytes = read_len;
                        offset = DATA_BLOCK_SIZE - block_remaining;
-                       tcmu_flush_dcache_range(from, copy_bytes);
                        memcpy(to + sg->length - sg_remaining, from + offset,
                                        copy_bytes);
 
@@ -1018,7 +1021,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, 
sense_reason_t *scsi_err)
                entry->hdr.cmd_id = 0; /* not used for PAD */
                entry->hdr.kflags = 0;
                entry->hdr.uflags = 0;
-               tcmu_flush_dcache_range(entry, sizeof(*entry));
+               tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
 
                UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
                tcmu_flush_dcache_range(mb, sizeof(*mb));
@@ -1083,7 +1086,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, 
sense_reason_t *scsi_err)
        cdb_off = CMDR_OFF + cmd_head + base_command_size;
        memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, 
scsi_command_size(se_cmd->t_task_cdb));
        entry->req.cdb_off = cdb_off;
-       tcmu_flush_dcache_range(entry, sizeof(*entry));
+       tcmu_flush_dcache_range(entry, command_size);
 
        UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
        tcmu_flush_dcache_range(mb, sizeof(*mb));
diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c 
b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
index c12211eaaac4..0b9f835d931f 100644
--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
@@ -46,20 +46,21 @@ static struct temp_sensor_data 
omap4430_mpu_temp_sensor_data = {
 
 /*
  * Temperature values in milli degree celsius
- * ADC code values from 530 to 923
+ * ADC code values from 13 to 107, see TRM
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
  */
 static const int
 omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
-       -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
-       -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
-       -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
-       13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
-       32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
-       48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
-       66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
-       83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
-       100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
-       117000, 118000, 120000, 122000, 123000,
+       -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
+       -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
+       -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
+       12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
+       30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
+       47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
+       64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
+       82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
+       98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
+       115000, 117000, 118500, 120000, 122000, 123500, 125000,
 };
 
 /* OMAP4430 data */
diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h 
b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
index b87c8659ec60..8a081abce4b5 100644
--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
@@ -67,9 +67,13 @@
  * and thresholds for OMAP4430.
  */
 
-/* ADC conversion table limits */
-#define OMAP4430_ADC_START_VALUE                       0
-#define OMAP4430_ADC_END_VALUE                         127
+/*
+ * ADC conversion table limits. Ignore values outside the TRM listed
+ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
+ */
+#define OMAP4430_ADC_START_VALUE                       13
+#define OMAP4430_ADC_END_VALUE                         107
 /* bandgap clock limits (no control on 4430) */
 #define OMAP4430_MAX_FREQ                              32768
 #define OMAP4430_MIN_FREQ                              32768
diff --git a/drivers/tty/serial/qcom_geni_serial.c 
b/drivers/tty/serial/qcom_geni_serial.c
index 0d405cc58e72..cd0768c3e773 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct 
uart_port *uport)
 }
 
 #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
-static int __init qcom_geni_console_setup(struct console *co, char *options)
+static int qcom_geni_console_setup(struct console *co, char *options)
 {
        struct uart_port *uport;
        struct qcom_geni_serial_port *port;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 66783a37f450..9f72a6ee13b5 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -29,6 +29,7 @@
 #include <linux/vfio.h>
 #include <linux/vgaarb.h>
 #include <linux/nospec.h>
+#include <linux/sched/mm.h>
 
 #include "vfio_pci_private.h"
 
@@ -181,6 +182,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device 
*vdev)
 
 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
 static void vfio_pci_disable(struct vfio_pci_device *vdev);
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data);
 
 /*
  * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
@@ -623,6 +625,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device 
*vdev,
        return 0;
 }
 
+struct vfio_devices {
+       struct vfio_device **devices;
+       int cur_index;
+       int max_index;
+};
+
 static long vfio_pci_ioctl(void *device_data,
                           unsigned int cmd, unsigned long arg)
 {
@@ -696,7 +704,7 @@ static long vfio_pci_ioctl(void *device_data,
                {
                        void __iomem *io;
                        size_t size;
-                       u16 orig_cmd;
+                       u16 cmd;
 
                        info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
                        info.flags = 0;
@@ -716,10 +724,7 @@ static long vfio_pci_ioctl(void *device_data,
                         * Is it really there?  Enable memory decode for
                         * implicit access in pci_map_rom().
                         */
-                       pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
-                       pci_write_config_word(pdev, PCI_COMMAND,
-                                             orig_cmd | PCI_COMMAND_MEMORY);
-
+                       cmd = vfio_pci_memory_lock_and_enable(vdev);
                        io = pci_map_rom(pdev, &size);
                        if (io) {
                                info.flags = VFIO_REGION_INFO_FLAG_READ;
@@ -727,8 +732,8 @@ static long vfio_pci_ioctl(void *device_data,
                        } else {
                                info.size = 0;
                        }
+                       vfio_pci_memory_unlock_and_restore(vdev, cmd);
 
-                       pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
                        break;
                }
                case VFIO_PCI_VGA_REGION_INDEX:
@@ -865,8 +870,16 @@ static long vfio_pci_ioctl(void *device_data,
                return ret;
 
        } else if (cmd == VFIO_DEVICE_RESET) {
-               return vdev->reset_works ?
-                       pci_try_reset_function(vdev->pdev) : -EINVAL;
+               int ret;
+
+               if (!vdev->reset_works)
+                       return -EINVAL;
+
+               vfio_pci_zap_and_down_write_memory_lock(vdev);
+               ret = pci_try_reset_function(vdev->pdev);
+               up_write(&vdev->memory_lock);
+
+               return ret;
 
        } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
                struct vfio_pci_hot_reset_info hdr;
@@ -946,8 +959,9 @@ static long vfio_pci_ioctl(void *device_data,
                int32_t *group_fds;
                struct vfio_pci_group_entry *groups;
                struct vfio_pci_group_info info;
+               struct vfio_devices devs = { .cur_index = 0 };
                bool slot = false;
-               int i, count = 0, ret = 0;
+               int i, group_idx, mem_idx = 0, count = 0, ret = 0;
 
                minsz = offsetofend(struct vfio_pci_hot_reset, count);
 
@@ -999,9 +1013,9 @@ static long vfio_pci_ioctl(void *device_data,
                 * user interface and store the group and iommu ID.  This
                 * ensures the group is held across the reset.
                 */
-               for (i = 0; i < hdr.count; i++) {
+               for (group_idx = 0; group_idx < hdr.count; group_idx++) {
                        struct vfio_group *group;
-                       struct fd f = fdget(group_fds[i]);
+                       struct fd f = fdget(group_fds[group_idx]);
                        if (!f.file) {
                                ret = -EBADF;
                                break;
@@ -1014,8 +1028,9 @@ static long vfio_pci_ioctl(void *device_data,
                                break;
                        }
 
-                       groups[i].group = group;
-                       groups[i].id = vfio_external_user_iommu_id(group);
+                       groups[group_idx].group = group;
+                       groups[group_idx].id =
+                                       vfio_external_user_iommu_id(group);
                }
 
                kfree(group_fds);
@@ -1034,13 +1049,63 @@ static long vfio_pci_ioctl(void *device_data,
                ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
                                                    vfio_pci_validate_devs,
                                                    &info, slot);
-               if (!ret)
-                       /* User has access, do the reset */
-                       ret = pci_reset_bus(vdev->pdev);
+               if (ret)
+                       goto hot_reset_release;
+
+               devs.max_index = count;
+               devs.devices = kcalloc(count, sizeof(struct vfio_device *),
+                                      GFP_KERNEL);
+               if (!devs.devices) {
+                       ret = -ENOMEM;
+                       goto hot_reset_release;
+               }
+
+               /*
+                * We need to get memory_lock for each device, but devices
+                * can share mmap_sem, therefore we need to zap and hold
+                * the vma_lock for each device, and only then get each
+                * memory_lock.
+                */
+               ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
+                                           vfio_pci_try_zap_and_vma_lock_cb,
+                                           &devs, slot);
+               if (ret)
+                       goto hot_reset_release;
+
+               for (; mem_idx < devs.cur_index; mem_idx++) {
+                       struct vfio_pci_device *tmp;
+
+                       tmp = vfio_device_data(devs.devices[mem_idx]);
+
+                       ret = down_write_trylock(&tmp->memory_lock);
+                       if (!ret) {
+                               ret = -EBUSY;
+                               goto hot_reset_release;
+                       }
+                       mutex_unlock(&tmp->vma_lock);
+               }
+
+               /* User has access, do the reset */
+               ret = pci_reset_bus(vdev->pdev);
 
 hot_reset_release:
-               for (i--; i >= 0; i--)
-                       vfio_group_put_external_user(groups[i].group);
+               for (i = 0; i < devs.cur_index; i++) {
+                       struct vfio_device *device;
+                       struct vfio_pci_device *tmp;
+
+                       device = devs.devices[i];
+                       tmp = vfio_device_data(device);
+
+                       if (i < mem_idx)
+                               up_write(&tmp->memory_lock);
+                       else
+                               mutex_unlock(&tmp->vma_lock);
+                       vfio_device_put(device);
+               }
+               kfree(devs.devices);
+
+               for (group_idx--; group_idx >= 0; group_idx--)
+                       vfio_group_put_external_user(groups[group_idx].group);
 
                kfree(groups);
                return ret;
@@ -1121,6 +1186,202 @@ static ssize_t vfio_pci_write(void *device_data, const 
char __user *buf,
        return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
 }
 
+/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */
+static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try)
+{
+       struct vfio_pci_mmap_vma *mmap_vma, *tmp;
+
+       /*
+        * Lock ordering:
+        * vma_lock is nested under mmap_sem for vm_ops callback paths.
+        * The memory_lock semaphore is used by both code paths calling
+        * into this function to zap vmas and the vm_ops.fault callback
+        * to protect the memory enable state of the device.
+        *
+        * When zapping vmas we need to maintain the mmap_sem => vma_lock
+        * ordering, which requires using vma_lock to walk vma_list to
+        * acquire an mm, then dropping vma_lock to get the mmap_sem and
+        * reacquiring vma_lock.  This logic is derived from similar
+        * requirements in uverbs_user_mmap_disassociate().
+        *
+        * mmap_sem must always be the top-level lock when it is taken.
+        * Therefore we can only hold the memory_lock write lock when
+        * vma_list is empty, as we'd need to take mmap_sem to clear
+        * entries.  vma_list can only be guaranteed empty when holding
+        * vma_lock, thus memory_lock is nested under vma_lock.
+        *
+        * This enables the vm_ops.fault callback to acquire vma_lock,
+        * followed by memory_lock read lock, while already holding
+        * mmap_sem without risk of deadlock.
+        */
+       while (1) {
+               struct mm_struct *mm = NULL;
+
+               if (try) {
+                       if (!mutex_trylock(&vdev->vma_lock))
+                               return 0;
+               } else {
+                       mutex_lock(&vdev->vma_lock);
+               }
+               while (!list_empty(&vdev->vma_list)) {
+                       mmap_vma = list_first_entry(&vdev->vma_list,
+                                                   struct vfio_pci_mmap_vma,
+                                                   vma_next);
+                       mm = mmap_vma->vma->vm_mm;
+                       if (mmget_not_zero(mm))
+                               break;
+
+                       list_del(&mmap_vma->vma_next);
+                       kfree(mmap_vma);
+                       mm = NULL;
+               }
+               if (!mm)
+                       return 1;
+               mutex_unlock(&vdev->vma_lock);
+
+               if (try) {
+                       if (!down_read_trylock(&mm->mmap_sem)) {
+                               mmput(mm);
+                               return 0;
+                       }
+               } else {
+                       down_read(&mm->mmap_sem);
+               }
+               if (mmget_still_valid(mm)) {
+                       if (try) {
+                               if (!mutex_trylock(&vdev->vma_lock)) {
+                                       up_read(&mm->mmap_sem);
+                                       mmput(mm);
+                                       return 0;
+                               }
+                       } else {
+                               mutex_lock(&vdev->vma_lock);
+                       }
+                       list_for_each_entry_safe(mmap_vma, tmp,
+                                                &vdev->vma_list, vma_next) {
+                               struct vm_area_struct *vma = mmap_vma->vma;
+
+                               if (vma->vm_mm != mm)
+                                       continue;
+
+                               list_del(&mmap_vma->vma_next);
+                               kfree(mmap_vma);
+
+                               zap_vma_ptes(vma, vma->vm_start,
+                                            vma->vm_end - vma->vm_start);
+                       }
+                       mutex_unlock(&vdev->vma_lock);
+               }
+               up_read(&mm->mmap_sem);
+               mmput(mm);
+       }
+}
+
+void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev)
+{
+       vfio_pci_zap_and_vma_lock(vdev, false);
+       down_write(&vdev->memory_lock);
+       mutex_unlock(&vdev->vma_lock);
+}
+
+u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev)
+{
+       u16 cmd;
+
+       down_write(&vdev->memory_lock);
+       pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd);
+       if (!(cmd & PCI_COMMAND_MEMORY))
+               pci_write_config_word(vdev->pdev, PCI_COMMAND,
+                                     cmd | PCI_COMMAND_MEMORY);
+
+       return cmd;
+}
+
+void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd)
+{
+       pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd);
+       up_write(&vdev->memory_lock);
+}
+
+/* Caller holds vma_lock */
+static int __vfio_pci_add_vma(struct vfio_pci_device *vdev,
+                             struct vm_area_struct *vma)
+{
+       struct vfio_pci_mmap_vma *mmap_vma;
+
+       mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL);
+       if (!mmap_vma)
+               return -ENOMEM;
+
+       mmap_vma->vma = vma;
+       list_add(&mmap_vma->vma_next, &vdev->vma_list);
+
+       return 0;
+}
+
+/*
+ * Zap mmaps on open so that we can fault them in on access and therefore
+ * our vma_list only tracks mappings accessed since last zap.
+ */
+static void vfio_pci_mmap_open(struct vm_area_struct *vma)
+{
+       zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void vfio_pci_mmap_close(struct vm_area_struct *vma)
+{
+       struct vfio_pci_device *vdev = vma->vm_private_data;
+       struct vfio_pci_mmap_vma *mmap_vma;
+
+       mutex_lock(&vdev->vma_lock);
+       list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) {
+               if (mmap_vma->vma == vma) {
+                       list_del(&mmap_vma->vma_next);
+                       kfree(mmap_vma);
+                       break;
+               }
+       }
+       mutex_unlock(&vdev->vma_lock);
+}
+
+static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+       struct vfio_pci_device *vdev = vma->vm_private_data;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
+
+       mutex_lock(&vdev->vma_lock);
+       down_read(&vdev->memory_lock);
+
+       if (!__vfio_pci_memory_enabled(vdev)) {
+               ret = VM_FAULT_SIGBUS;
+               mutex_unlock(&vdev->vma_lock);
+               goto up_out;
+       }
+
+       if (__vfio_pci_add_vma(vdev, vma)) {
+               ret = VM_FAULT_OOM;
+               mutex_unlock(&vdev->vma_lock);
+               goto up_out;
+       }
+
+       mutex_unlock(&vdev->vma_lock);
+
+       if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+                           vma->vm_end - vma->vm_start, vma->vm_page_prot))
+               ret = VM_FAULT_SIGBUS;
+
+up_out:
+       up_read(&vdev->memory_lock);
+       return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_mmap_ops = {
+       .open = vfio_pci_mmap_open,
+       .close = vfio_pci_mmap_close,
+       .fault = vfio_pci_mmap_fault,
+};
+
 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
 {
        struct vfio_pci_device *vdev = device_data;
@@ -1170,8 +1431,14 @@ static int vfio_pci_mmap(void *device_data, struct 
vm_area_struct *vma)
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
 
-       return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-                              req_len, vma->vm_page_prot);
+       /*
+        * See remap_pfn_range(), called from vfio_pci_fault() but we can't
+        * change vm_flags within the fault handler.  Set them now.
+        */
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_ops = &vfio_pci_mmap_ops;
+
+       return 0;
 }
 
 static void vfio_pci_request(void *device_data, unsigned int count)
@@ -1243,6 +1510,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const 
struct pci_device_id *id)
        spin_lock_init(&vdev->irqlock);
        mutex_init(&vdev->ioeventfds_lock);
        INIT_LIST_HEAD(&vdev->ioeventfds_list);
+       mutex_init(&vdev->vma_lock);
+       INIT_LIST_HEAD(&vdev->vma_list);
+       init_rwsem(&vdev->memory_lock);
 
        ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
        if (ret) {
@@ -1338,12 +1608,6 @@ static struct pci_driver vfio_pci_driver = {
        .err_handler    = &vfio_err_handlers,
 };
 
-struct vfio_devices {
-       struct vfio_device **devices;
-       int cur_index;
-       int max_index;
-};
-
 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
 {
        struct vfio_devices *devs = data;
@@ -1365,6 +1629,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void 
*data)
        return 0;
 }
 
+static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data)
+{
+       struct vfio_devices *devs = data;
+       struct vfio_device *device;
+       struct vfio_pci_device *vdev;
+
+       if (devs->cur_index == devs->max_index)
+               return -ENOSPC;
+
+       device = vfio_device_get_from_dev(&pdev->dev);
+       if (!device)
+               return -EINVAL;
+
+       if (pci_dev_driver(pdev) != &vfio_pci_driver) {
+               vfio_device_put(device);
+               return -EBUSY;
+       }
+
+       vdev = vfio_device_data(device);
+
+       /*
+        * Locking multiple devices is prone to deadlock, runaway and
+        * unwind if we hit contention.
+        */
+       if (!vfio_pci_zap_and_vma_lock(vdev, true)) {
+               vfio_device_put(device);
+               return -EBUSY;
+       }
+
+       devs->devices[devs->cur_index++] = device;
+       return 0;
+}
+
 /*
  * Attempt to do a bus/slot reset if there are devices affected by a reset for
  * this device that are needs_reset and all of the affected devices are unused
diff --git a/drivers/vfio/pci/vfio_pci_config.c 
b/drivers/vfio/pci/vfio_pci_config.c
index 36bc8f104e42..a1a26465d224 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -398,6 +398,20 @@ static inline void p_setd(struct perm_bits *p, int off, 
u32 virt, u32 write)
        *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
 }
 
+/* Caller should hold memory_lock semaphore */
+bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
+{
+       struct pci_dev *pdev = vdev->pdev;
+       u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+
+       /*
+        * SR-IOV VF memory enable is handled by the MSE bit in the
+        * PF SR-IOV capability, there's therefore no need to trigger
+        * faults based on the virtual value.
+        */
+       return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
+}
+
 /*
  * Restore the *real* BARs after we detect a FLR or backdoor reset.
  * (backdoor = some device specific technique that we didn't catch)
@@ -558,13 +572,18 @@ static int vfio_basic_config_write(struct vfio_pci_device 
*vdev, int pos,
 
                new_cmd = le32_to_cpu(val);
 
+               phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+               virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+               new_io = !!(new_cmd & PCI_COMMAND_IO);
+
                phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
                virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
                new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
 
-               phys_io = !!(phys_cmd & PCI_COMMAND_IO);
-               virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
-               new_io = !!(new_cmd & PCI_COMMAND_IO);
+               if (!new_mem)
+                       vfio_pci_zap_and_down_write_memory_lock(vdev);
+               else
+                       down_write(&vdev->memory_lock);
 
                /*
                 * If the user is writing mem/io enable (new_mem/io) and we
@@ -581,8 +600,11 @@ static int vfio_basic_config_write(struct vfio_pci_device 
*vdev, int pos,
        }
 
        count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
-       if (count < 0)
+       if (count < 0) {
+               if (offset == PCI_COMMAND)
+                       up_write(&vdev->memory_lock);
                return count;
+       }
 
        /*
         * Save current memory/io enable bits in vconfig to allow for
@@ -593,6 +615,8 @@ static int vfio_basic_config_write(struct vfio_pci_device 
*vdev, int pos,
 
                *virt_cmd &= cpu_to_le16(~mask);
                *virt_cmd |= cpu_to_le16(new_cmd & mask);
+
+               up_write(&vdev->memory_lock);
        }
 
        /* Emulate INTx disable */
@@ -830,8 +854,11 @@ static int vfio_exp_config_write(struct vfio_pci_device 
*vdev, int pos,
                                                 pos - offset + PCI_EXP_DEVCAP,
                                                 &cap);
 
-               if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
+               if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) {
+                       vfio_pci_zap_and_down_write_memory_lock(vdev);
                        pci_try_reset_function(vdev->pdev);
+                       up_write(&vdev->memory_lock);
+               }
        }
 
        /*
@@ -909,8 +936,11 @@ static int vfio_af_config_write(struct vfio_pci_device 
*vdev, int pos,
                                                pos - offset + PCI_AF_CAP,
                                                &cap);
 
-               if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
+               if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) {
+                       vfio_pci_zap_and_down_write_memory_lock(vdev);
                        pci_try_reset_function(vdev->pdev);
+                       up_write(&vdev->memory_lock);
+               }
        }
 
        return count;
@@ -1708,6 +1738,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
                                 vconfig[PCI_INTERRUPT_PIN]);
 
                vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+
+               /*
+                * VFs do no implement the memory enable bit of the COMMAND
+                * register therefore we'll not have it set in our initial
+                * copy of config space after pci_enable_device().  For
+                * consistency with PFs, set the virtual enable bit here.
+                */
+               *(__le16 *)&vconfig[PCI_COMMAND] |=
+                                       cpu_to_le16(PCI_COMMAND_MEMORY);
        }
 
        if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c 
b/drivers/vfio/pci/vfio_pci_intrs.c
index 94594dc63c41..bdfdd506bc58 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, 
int nvec, bool msix)
        struct pci_dev *pdev = vdev->pdev;
        unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
        int ret;
+       u16 cmd;
 
        if (!is_irq_none(vdev))
                return -EINVAL;
@@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, 
int nvec, bool msix)
                return -ENOMEM;
 
        /* return the number of supported vectors if we can't get all: */
+       cmd = vfio_pci_memory_lock_and_enable(vdev);
        ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag);
        if (ret < nvec) {
                if (ret > 0)
                        pci_free_irq_vectors(pdev);
+               vfio_pci_memory_unlock_and_restore(vdev, cmd);
                kfree(vdev->ctx);
                return ret;
        }
+       vfio_pci_memory_unlock_and_restore(vdev, cmd);
 
        vdev->num_ctx = nvec;
        vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
@@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct 
vfio_pci_device *vdev,
        struct pci_dev *pdev = vdev->pdev;
        struct eventfd_ctx *trigger;
        int irq, ret;
+       u16 cmd;
 
        if (vector < 0 || vector >= vdev->num_ctx)
                return -EINVAL;
@@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct 
vfio_pci_device *vdev,
 
        if (vdev->ctx[vector].trigger) {
                irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
+
+               cmd = vfio_pci_memory_lock_and_enable(vdev);
                free_irq(irq, vdev->ctx[vector].trigger);
+               vfio_pci_memory_unlock_and_restore(vdev, cmd);
+
                kfree(vdev->ctx[vector].name);
                eventfd_ctx_put(vdev->ctx[vector].trigger);
                vdev->ctx[vector].trigger = NULL;
@@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct 
vfio_pci_device *vdev,
         * such a reset it would be unsuccessful. To avoid this, restore the
         * cached value of the message prior to enabling.
         */
+       cmd = vfio_pci_memory_lock_and_enable(vdev);
        if (msix) {
                struct msi_msg msg;
 
@@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct 
vfio_pci_device *vdev,
 
        ret = request_irq(irq, vfio_msihandler, 0,
                          vdev->ctx[vector].name, trigger);
+       vfio_pci_memory_unlock_and_restore(vdev, cmd);
        if (ret) {
                kfree(vdev->ctx[vector].name);
                eventfd_ctx_put(trigger);
@@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, 
bool msix)
 {
        struct pci_dev *pdev = vdev->pdev;
        int i;
+       u16 cmd;
 
        for (i = 0; i < vdev->num_ctx; i++) {
                vfio_virqfd_disable(&vdev->ctx[i].unmask);
@@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, 
bool msix)
 
        vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
 
+       cmd = vfio_pci_memory_lock_and_enable(vdev);
        pci_free_irq_vectors(pdev);
+       vfio_pci_memory_unlock_and_restore(vdev, cmd);
 
        /*
         * Both disable paths above use pci_intx_for_msi() to clear DisINTx
diff --git a/drivers/vfio/pci/vfio_pci_private.h 
b/drivers/vfio/pci/vfio_pci_private.h
index cde3b5d3441a..17d2bae5b013 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -76,6 +76,11 @@ struct vfio_pci_dummy_resource {
        struct list_head        res_next;
 };
 
+struct vfio_pci_mmap_vma {
+       struct vm_area_struct   *vma;
+       struct list_head        vma_next;
+};
+
 struct vfio_pci_device {
        struct pci_dev          *pdev;
        void __iomem            *barmap[PCI_STD_RESOURCE_END + 1];
@@ -111,6 +116,9 @@ struct vfio_pci_device {
        struct list_head        dummy_resources_list;
        struct mutex            ioeventfds_lock;
        struct list_head        ioeventfds_list;
+       struct mutex            vma_lock;
+       struct list_head        vma_list;
+       struct rw_semaphore     memory_lock;
 };
 
 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
@@ -149,6 +157,14 @@ extern int vfio_pci_register_dev_region(struct 
vfio_pci_device *vdev,
                                        unsigned int type, unsigned int subtype,
                                        const struct vfio_pci_regops *ops,
                                        size_t size, u32 flags, void *data);
+
+extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
+extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
+                                                   *vdev);
+extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
+extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
+                                              u16 cmd);
+
 #ifdef CONFIG_VFIO_PCI_IGD
 extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
 #else
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index a6029d0a5524..3d0ec2bbe131 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -165,6 +165,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char 
__user *buf,
        size_t x_start = 0, x_end = 0;
        resource_size_t end;
        void __iomem *io;
+       struct resource *res = &vdev->pdev->resource[bar];
        ssize_t done;
 
        if (pci_resource_start(pdev, bar))
@@ -180,6 +181,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char 
__user *buf,
 
        count = min(count, (size_t)(end - pos));
 
+       if (res->flags & IORESOURCE_MEM) {
+               down_read(&vdev->memory_lock);
+               if (!__vfio_pci_memory_enabled(vdev)) {
+                       up_read(&vdev->memory_lock);
+                       return -EIO;
+               }
+       }
+
        if (bar == PCI_ROM_RESOURCE) {
                /*
                 * The ROM can fill less space than the BAR, so we start the
@@ -187,13 +196,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, 
char __user *buf,
                 * filling large ROM BARs much faster.
                 */
                io = pci_map_rom(pdev, &x_start);
-               if (!io)
-                       return -ENOMEM;
+               if (!io) {
+                       done = -ENOMEM;
+                       goto out;
+               }
                x_end = end;
        } else {
                int ret = vfio_pci_setup_barmap(vdev, bar);
-               if (ret)
-                       return ret;
+               if (ret) {
+                       done = ret;
+                       goto out;
+               }
 
                io = vdev->barmap[bar];
        }
@@ -210,6 +223,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char 
__user *buf,
 
        if (bar == PCI_ROM_RESOURCE)
                pci_unmap_rom(pdev, io);
+out:
+       if (res->flags & IORESOURCE_MEM)
+               up_read(&vdev->memory_lock);
 
        return done;
 }
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 52083b710b87..05d8553635ee 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,6 +343,32 @@ static int put_pfn(unsigned long pfn, int prot)
        return 0;
 }
 
+static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+                           unsigned long vaddr, unsigned long *pfn,
+                           bool write_fault)
+{
+       int ret;
+
+       ret = follow_pfn(vma, vaddr, pfn);
+       if (ret) {
+               bool unlocked = false;
+
+               ret = fixup_user_fault(NULL, mm, vaddr,
+                                      FAULT_FLAG_REMOTE |
+                                      (write_fault ?  FAULT_FLAG_WRITE : 0),
+                                      &unlocked);
+               if (unlocked)
+                       return -EAGAIN;
+
+               if (ret)
+                       return ret;
+
+               ret = follow_pfn(vma, vaddr, pfn);
+       }
+
+       return ret;
+}
+
 static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
                         int prot, unsigned long *pfn)
 {
@@ -382,12 +408,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned 
long vaddr,
 
        down_read(&mm->mmap_sem);
 
+retry:
        vma = find_vma_intersection(mm, vaddr, vaddr + 1);
 
        if (vma && vma->vm_flags & VM_PFNMAP) {
-               if (!follow_pfn(vma, vaddr, pfn) &&
-                   is_invalid_reserved_pfn(*pfn))
-                       ret = 0;
+               ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
+               if (ret == -EAGAIN)
+                       goto retry;
+
+               if (!ret && !is_invalid_reserved_pfn(*pfn))
+                       ret = -EFAULT;
        }
 
        up_read(&mm->mmap_sem);
diff --git a/drivers/xen/xenbus/xenbus_client.c 
b/drivers/xen/xenbus/xenbus_client.c
index e94a61eaeceb..f7b553faadb1 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -365,8 +365,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void 
*vaddr,
        int i, j;
 
        for (i = 0; i < nr_pages; i++) {
-               err = gnttab_grant_foreign_access(dev->otherend_id,
-                                                 virt_to_gfn(vaddr), 0);
+               unsigned long gfn;
+
+               if (is_vmalloc_addr(vaddr))
+                       gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
+               else
+                       gfn = virt_to_gfn(vaddr);
+
+               err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
                if (err < 0) {
                        xenbus_dev_fatal(dev, err,
                                         "granting access to ring page");
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 14a6c1b90c9f..9a1e761b64a2 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode)
        u32 prot = AFFS_I(inode)->i_protect;
        umode_t mode = inode->i_mode;
 
+       /*
+        * First, clear all RWED bits for owner, group, other.
+        * Then, recalculate them afresh.
+        *
+        * We'll always clear the delete-inhibit bit for the owner, as that is
+        * the classic single-user mode AmigaOS protection bit and we need to
+        * stay compatible with all scenarios.
+        *
+        * Since multi-user AmigaOS is an extension, we'll only set the
+        * delete-allow bit if any of the other bits in the same user class
+        * (group/other) are used.
+        */
+       prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
+                 | FIBF_NOWRITE | FIBF_NODELETE
+                 | FIBF_GRP_EXECUTE | FIBF_GRP_READ
+                 | FIBF_GRP_WRITE   | FIBF_GRP_DELETE
+                 | FIBF_OTR_EXECUTE | FIBF_OTR_READ
+                 | FIBF_OTR_WRITE   | FIBF_OTR_DELETE);
+
+       /* Classic single-user AmigaOS flags. These are inverted. */
        if (!(mode & 0100))
                prot |= FIBF_NOEXECUTE;
        if (!(mode & 0400))
                prot |= FIBF_NOREAD;
        if (!(mode & 0200))
                prot |= FIBF_NOWRITE;
+
+       /* Multi-user extended flags. Not inverted. */
        if (mode & 0010)
                prot |= FIBF_GRP_EXECUTE;
        if (mode & 0040)
                prot |= FIBF_GRP_READ;
        if (mode & 0020)
                prot |= FIBF_GRP_WRITE;
+       if (mode & 0070)
+               prot |= FIBF_GRP_DELETE;
+
        if (mode & 0001)
                prot |= FIBF_OTR_EXECUTE;
        if (mode & 0004)
                prot |= FIBF_OTR_READ;
        if (mode & 0002)
                prot |= FIBF_OTR_WRITE;
+       if (mode & 0007)
+               prot |= FIBF_OTR_DELETE;
 
        AFFS_I(inode)->i_protect = prot;
 }
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a85817f54483..ba084b0b214b 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct 
address_space *mapping,
        return ret;
 }
 
+static int affs_write_end(struct file *file, struct address_space *mapping,
+                         loff_t pos, unsigned int len, unsigned int copied,
+                         struct page *page, void *fsdata)
+{
+       struct inode *inode = mapping->host;
+       int ret;
+
+       ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+       /* Clear Archived bit on file writes, as AmigaOS would do */
+       if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+               AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+               mark_inode_dirty(inode);
+       }
+
+       return ret;
+}
+
 static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
 {
        return generic_block_bmap(mapping,block,affs_get_block);
@@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = {
        .readpage = affs_readpage,
        .writepage = affs_writepage,
        .write_begin = affs_write_begin,
-       .write_end = generic_write_end,
+       .write_end = affs_write_end,
        .direct_IO = affs_direct_IO,
        .bmap = _affs_bmap
 };
@@ -794,6 +812,12 @@ static int affs_write_end_ofs(struct file *file, struct 
address_space *mapping,
        if (tmp > inode->i_size)
                inode->i_size = AFFS_I(inode)->mmu_private = tmp;
 
+       /* Clear Archived bit on file writes, as AmigaOS would do */
+       if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+               AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+               mark_inode_dirty(inode);
+       }
+
 err_first_bh:
        unlock_page(page);
        put_page(page);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c9943d70e2cb..8007b6aacec6 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1347,7 +1347,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct 
btrfs_path *path,
        btrfs_tree_read_unlock_blocking(eb);
        free_extent_buffer(eb);
 
-       extent_buffer_get(eb_rewin);
+       btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
+                                      eb_rewin, btrfs_header_level(eb_rewin));
        btrfs_tree_read_lock(eb_rewin);
        __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
        WARN_ON(btrfs_header_nritems(eb_rewin) >
@@ -1421,8 +1422,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 
        if (!eb)
                return NULL;
-       extent_buffer_get(eb);
-       btrfs_tree_read_lock(eb);
        if (old_root) {
                btrfs_set_header_bytenr(eb, eb->start);
                btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
@@ -1430,6 +1429,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                btrfs_set_header_level(eb, old_root->level);
                btrfs_set_header_generation(eb, old_generation);
        }
+       btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
+                                      btrfs_header_level(eb));
+       btrfs_tree_read_lock(eb);
        if (tm)
                __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
        else
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index fbcd18d96c52..82d597b16152 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5377,9 +5377,9 @@ void read_extent_buffer(const struct extent_buffer *eb, 
void *dstv,
        }
 }
 
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
-                              void __user *dstv,
-                              unsigned long start, unsigned long len)
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+                                      void __user *dstv,
+                                      unsigned long start, unsigned long len)
 {
        size_t cur;
        size_t offset;
@@ -5400,7 +5400,7 @@ int read_extent_buffer_to_user(const struct extent_buffer 
*eb,
 
                cur = min(len, (PAGE_SIZE - offset));
                kaddr = page_address(page);
-               if (copy_to_user(dst, kaddr + offset, cur)) {
+               if (probe_user_write(dst, kaddr + offset, cur)) {
                        ret = -EFAULT;
                        break;
                }
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index a3598b24441e..d5089cadd7c4 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -448,9 +448,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, 
const void *ptrv,
 void read_extent_buffer(const struct extent_buffer *eb, void *dst,
                        unsigned long start,
                        unsigned long len);
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
-                              void __user *dst, unsigned long start,
-                              unsigned long len);
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+                                      void __user *dst, unsigned long start,
+                                      unsigned long len);
 void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
 void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
                const void *src);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a5ae02bf3652..85990755edd9 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2079,9 +2079,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
                sh.len = item_len;
                sh.transid = found_transid;
 
-               /* copy search result header */
-               if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
-                       ret = -EFAULT;
+               /*
+                * Copy search result header. If we fault then loop again so we
+                * can fault in the pages and -EFAULT there if there's a
+                * problem. Otherwise we'll fault and then copy the buffer in
+                * properly this next time through
+                */
+               if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) {
+                       ret = 0;
                        goto out;
                }
 
@@ -2089,10 +2094,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
 
                if (item_len) {
                        char __user *up = ubuf + *sk_offset;
-                       /* copy the item */
-                       if (read_extent_buffer_to_user(leaf, up,
-                                                      item_off, item_len)) {
-                               ret = -EFAULT;
+                       /*
+                        * Copy the item, same behavior as above, but reset the
+                        * * sk_offset so we copy the full thing again.
+                        */
+                       if (read_extent_buffer_to_user_nofault(leaf, up,
+                                               item_off, item_len)) {
+                               ret = 0;
+                               *sk_offset -= sizeof(sh);
                                goto out;
                        }
 
@@ -2180,6 +2189,10 @@ static noinline int search_ioctl(struct inode *inode,
        key.offset = sk->min_offset;
 
        while (1) {
+               ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
+               if (ret)
+                       break;
+
                ret = btrfs_search_forward(root, &key, path, sk->min_transid);
                if (ret != 0) {
                        if (ret > 0)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 4abb2a155ac5..498ec4b10e61 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4172,6 +4172,7 @@ static int btrfs_uuid_scan_kthread(void *data)
                        goto skip;
                }
 update_tree:
+               btrfs_release_path(path);
                if (!btrfs_is_empty_uuid(root_item.uuid)) {
                        ret = btrfs_uuid_tree_add(trans, root_item.uuid,
                                                  BTRFS_UUID_KEY_SUBVOL,
@@ -4196,6 +4197,7 @@ static int btrfs_uuid_scan_kthread(void *data)
                }
 
 skip:
+               btrfs_release_path(path);
                if (trans) {
                        ret = btrfs_end_transaction(trans);
                        trans = NULL;
@@ -4203,7 +4205,6 @@ static int btrfs_uuid_scan_kthread(void *data)
                                break;
                }
 
-               btrfs_release_path(path);
                if (key.offset < (u64)-1) {
                        key.offset++;
                } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index faca455bd3c6..4ce2752c8b71 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1819,6 +1819,7 @@ const struct file_operations ceph_file_fops = {
        .mmap = ceph_mmap,
        .fsync = ceph_fsync,
        .lock = ceph_lock,
+       .setlease = simple_nosetlease,
        .flock = ceph_flock,
        .splice_read = generic_file_splice_read,
        .splice_write = iter_file_splice_write,
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index f988ccd064a2..61a52bb26d12 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1891,9 +1891,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, 
int call_nests)
                         * during ep_insert().
                         */
                        if (list_empty(&epi->ffd.file->f_tfile_llink)) {
-                               get_file(epi->ffd.file);
-                               list_add(&epi->ffd.file->f_tfile_llink,
-                                        &tfile_check_list);
+                               if (get_file_rcu(epi->ffd.file))
+                                       list_add(&epi->ffd.file->f_tfile_llink,
+                                                &tfile_check_list);
                        }
                }
        }
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 28b2609f25c1..d39d90c1b670 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
        struct inode *inode = file_inode(vmf->vma->vm_file);
        struct ext2_inode_info *ei = EXT2_I(inode);
        vm_fault_t ret;
+       bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
+               (vmf->vma->vm_flags & VM_SHARED);
 
-       if (vmf->flags & FAULT_FLAG_WRITE) {
+       if (write) {
                sb_start_pagefault(inode->i_sb);
                file_update_time(vmf->vma->vm_file);
        }
@@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
        ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
 
        up_read(&ei->dax_sem);
-       if (vmf->flags & FAULT_FLAG_WRITE)
+       if (write)
                sb_end_pagefault(inode->i_sb);
        return ret;
 }
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 2652d00842d6..087a5715cf20 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -935,8 +935,10 @@ xfs_attr_shortform_verify(
                 * struct xfs_attr_sf_entry has a variable length.
                 * Check the fixed-offset parts of the structure are
                 * within the data buffer.
+                * xfs_attr_sf_entry is defined with a 1-byte variable
+                * array at the end, so we must subtract that off.
                 */
-               if (((char *)sfep + sizeof(*sfep)) >= endp)
+               if (((char *)sfep + sizeof(*sfep) - 1) >= endp)
                        return __this_address;
 
                /* Don't allow names with known bad length. */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 0b7145fdb8aa..f35e1801f1c9 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -6130,7 +6130,7 @@ xfs_bmap_validate_extent(
 
        isrt = XFS_IS_REALTIME_INODE(ip);
        endfsb = irec->br_startblock + irec->br_blockcount - 1;
-       if (isrt) {
+       if (isrt && whichfork == XFS_DATA_FORK) {
                if (!xfs_verify_rtbno(mp, irec->br_startblock))
                        return __this_address;
                if (!xfs_verify_rtbno(mp, endfsb))
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 259549698ba7..f22acfd53850 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1095,6 +1095,14 @@ __xfs_filemap_fault(
        return ret;
 }
 
+static inline bool
+xfs_is_write_fault(
+       struct vm_fault         *vmf)
+{
+       return (vmf->flags & FAULT_FLAG_WRITE) &&
+              (vmf->vma->vm_flags & VM_SHARED);
+}
+
 static vm_fault_t
 xfs_filemap_fault(
        struct vm_fault         *vmf)
@@ -1102,7 +1110,7 @@ xfs_filemap_fault(
        /* DAX can shortcut the normal fault path on write faults! */
        return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
                        IS_DAX(file_inode(vmf->vma->vm_file)) &&
-                       (vmf->flags & FAULT_FLAG_WRITE));
+                       xfs_is_write_fault(vmf));
 }
 
 static vm_fault_t
@@ -1115,7 +1123,7 @@ xfs_filemap_huge_fault(
 
        /* DAX can shortcut the normal fault path on write faults! */
        return __xfs_filemap_fault(vmf, pe_size,
-                       (vmf->flags & FAULT_FLAG_WRITE));
+                       xfs_is_write_fault(vmf));
 }
 
 static vm_fault_t
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22dd133b..bc1f16e9f3f4 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -119,11 +119,18 @@ static inline bool bvec_iter_rewind(const struct bio_vec 
*bv,
        return true;
 }
 
+static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
+{
+       iter->bi_bvec_done = 0;
+       iter->bi_idx++;
+}
+
 #define for_each_bvec(bvl, bio_vec, iter, start)                       \
        for (iter = (start);                                            \
             (iter).bi_size &&                                          \
                ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
-            bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+            (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
+                    (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
 
 /* for iterating one bio from start to end */
 #define BVEC_ITER_ALL_INIT (struct bvec_iter)                          \
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 8506637f070d..a46b6832b373 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -956,34 +956,49 @@ static inline void hid_device_io_stop(struct hid_device 
*hid) {
  * @max: maximal valid usage->code to consider later (out parameter)
  * @type: input event type (EV_KEY, EV_REL, ...)
  * @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
  */
 static inline void hid_map_usage(struct hid_input *hidinput,
                struct hid_usage *usage, unsigned long **bit, int *max,
-               __u8 type, __u16 c)
+               __u8 type, unsigned int c)
 {
        struct input_dev *input = hidinput->input;
-
-       usage->type = type;
-       usage->code = c;
+       unsigned long *bmap = NULL;
+       unsigned int limit = 0;
 
        switch (type) {
        case EV_ABS:
-               *bit = input->absbit;
-               *max = ABS_MAX;
+               bmap = input->absbit;
+               limit = ABS_MAX;
                break;
        case EV_REL:
-               *bit = input->relbit;
-               *max = REL_MAX;
+               bmap = input->relbit;
+               limit = REL_MAX;
                break;
        case EV_KEY:
-               *bit = input->keybit;
-               *max = KEY_MAX;
+               bmap = input->keybit;
+               limit = KEY_MAX;
                break;
        case EV_LED:
-               *bit = input->ledbit;
-               *max = LED_MAX;
+               bmap = input->ledbit;
+               limit = LED_MAX;
                break;
        }
+
+       if (unlikely(c > limit || !bmap)) {
+               pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+                                   input->name, c, type);
+               *bit = NULL;
+               return;
+       }
+
+       usage->type = type;
+       usage->code = c;
+       *max = limit;
+       *bit = bmap;
 }
 
 /**
@@ -997,7 +1012,8 @@ static inline void hid_map_usage_clear(struct hid_input 
*hidinput,
                __u8 type, __u16 c)
 {
        hid_map_usage(hidinput, usage, bit, max, type, c);
-       clear_bit(c, *bit);
+       if (*bit)
+               clear_bit(usage->code, *bit);
 }
 
 /**
diff --git a/include/linux/libata.h b/include/linux/libata.h
index ed1453c15041..afc1d72161ba 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -439,6 +439,7 @@ enum {
        ATA_HORKAGE_NO_DMA_LOG  = (1 << 23),    /* don't use DMA for log read */
        ATA_HORKAGE_NOTRIM      = (1 << 24),    /* don't use TRIM */
        ATA_HORKAGE_MAX_SEC_1024 = (1 << 25),   /* Limit max sects to 1024 */
+       ATA_HORKAGE_MAX_TRIM_128M = (1 << 26),  /* Limit max trim size to 128M 
*/
 
         /* DMA mask for user DMA control: User visible values; DO NOT
            renumber */
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 2af7f77866d0..78496801cddf 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -177,7 +177,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 #define roundup_pow_of_two(n)                  \
 (                                              \
        __builtin_constant_p(n) ? (             \
-               (n == 1) ? 1 :                  \
+               ((n) == 1) ? 1 :                \
                (1UL << (ilog2((n) - 1) + 1))   \
                                   ) :          \
        __roundup_pow_of_two(n)                 \
diff --git a/include/linux/netfilter/nfnetlink.h 
b/include/linux/netfilter/nfnetlink.h
index cf09ab37b45b..e713476ff29d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int 
group);
 int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
                   unsigned int group, int echo, gfp_t flags);
 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
-                     int flags);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
 
 static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
 {
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d55b68b113de..db9b0dd0a7a3 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -242,6 +242,17 @@ static inline unsigned long 
__copy_from_user_inatomic_nocache(void *to,
 extern long probe_kernel_read(void *dst, const void *src, size_t size);
 extern long __probe_kernel_read(void *dst, const void *src, size_t size);
 
+/*
+ * probe_user_read(): safely attempt to read from a location in user space
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+ *
+ * Safely read from address @src to the buffer at @dst.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long probe_user_read(void *dst, const void __user *src, size_t size);
+
 /*
  * probe_kernel_write(): safely attempt to write to a location
  * @dst: address to write to
@@ -254,7 +265,22 @@ extern long __probe_kernel_read(void *dst, const void 
*src, size_t size);
 extern long notrace probe_kernel_write(void *dst, const void *src, size_t 
size);
 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t 
size);
 
+/*
+ * probe_user_write(): safely attempt to write to a location in user space
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_user_write(void __user *dst, const void *src, size_t 
size);
+extern long notrace __probe_user_write(void __user *dst, const void *src, 
size_t size);
+
 extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long 
count);
+extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+                                    long count);
+extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
 
 /**
  * probe_kernel_address(): safely attempt to read from a location
diff --git a/include/net/netfilter/nf_tables.h 
b/include/net/netfilter/nf_tables.h
index 024636c31adc..93253ba1eeac 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -130,6 +130,8 @@ static inline u8 nft_reg_load8(u32 *sreg)
 static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
                                 unsigned int len)
 {
+       if (len % NFT_REG32_SIZE)
+               dst[len / NFT_REG32_SIZE] = 0;
        memcpy(dst, src, len);
 }
 
diff --git a/include/uapi/linux/netfilter/nf_tables.h 
b/include/uapi/linux/netfilter/nf_tables.h
index 5eac62e1b68d..cc00be102b9f 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -132,7 +132,7 @@ enum nf_tables_msg_types {
  * @NFTA_LIST_ELEM: list element (NLA_NESTED)
  */
 enum nft_list_attributes {
-       NFTA_LIST_UNPEC,
+       NFTA_LIST_UNSPEC,
        NFTA_LIST_ELEM,
        __NFTA_LIST_MAX
 };
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8a5708f31aa0..27e49c5ec219 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2918,6 +2918,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
 }
 
 #ifdef CONFIG_SYSCTL
+static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
+                                         void *buffer, size_t *length,
+                                         loff_t *ppos, unsigned long *out)
+{
+       struct ctl_table dup_table;
+
+       /*
+        * In order to avoid races with __do_proc_doulongvec_minmax(), we
+        * can duplicate the @table and alter the duplicate of it.
+        */
+       dup_table = *table;
+       dup_table.data = out;
+
+       return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
+}
+
 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
                         struct ctl_table *table, int write,
                         void __user *buffer, size_t *length, loff_t *ppos)
@@ -2929,9 +2945,8 @@ static int hugetlb_sysctl_handler_common(bool 
obey_mempolicy,
        if (!hugepages_supported())
                return -EOPNOTSUPP;
 
-       table->data = &tmp;
-       table->maxlen = sizeof(unsigned long);
-       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+                                            &tmp);
        if (ret)
                goto out;
 
@@ -2975,9 +2990,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, 
int write,
        if (write && hstate_is_gigantic(h))
                return -EINVAL;
 
-       table->data = &tmp;
-       table->maxlen = sizeof(unsigned long);
-       ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+       ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+                                            &tmp);
        if (ret)
                goto out;
 
diff --git a/mm/maccess.c b/mm/maccess.c
index ec00be51a24f..6e41ba452e5e 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -5,8 +5,32 @@
 #include <linux/mm.h>
 #include <linux/uaccess.h>
 
+static __always_inline long
+probe_read_common(void *dst, const void __user *src, size_t size)
+{
+       long ret;
+
+       pagefault_disable();
+       ret = __copy_from_user_inatomic(dst, src, size);
+       pagefault_enable();
+
+       return ret ? -EFAULT : 0;
+}
+
+static __always_inline long
+probe_write_common(void __user *dst, const void *src, size_t size)
+{
+       long ret;
+
+       pagefault_disable();
+       ret = __copy_to_user_inatomic(dst, src, size);
+       pagefault_enable();
+
+       return ret ? -EFAULT : 0;
+}
+
 /**
- * probe_kernel_read(): safely attempt to read from a location
+ * probe_kernel_read(): safely attempt to read from a kernel-space location
  * @dst: pointer to the buffer that shall take the data
  * @src: address to read from
  * @size: size of the data chunk
@@ -29,16 +53,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t 
size)
        mm_segment_t old_fs = get_fs();
 
        set_fs(KERNEL_DS);
-       pagefault_disable();
-       ret = __copy_from_user_inatomic(dst,
-                       (__force const void __user *)src, size);
-       pagefault_enable();
+       ret = probe_read_common(dst, (__force const void __user *)src, size);
        set_fs(old_fs);
 
-       return ret ? -EFAULT : 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(probe_kernel_read);
 
+/**
+ * probe_user_read(): safely attempt to read from a user-space location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from. This must be a user address.
+ * @size: size of the data chunk
+ *
+ * Safely read from user address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+
+long __weak probe_user_read(void *dst, const void __user *src, size_t size)
+    __attribute__((alias("__probe_user_read")));
+
+long __probe_user_read(void *dst, const void __user *src, size_t size)
+{
+       long ret = -EFAULT;
+       mm_segment_t old_fs = get_fs();
+
+       set_fs(USER_DS);
+       if (access_ok(VERIFY_READ, src, size))
+               ret = probe_read_common(dst, src, size);
+       set_fs(old_fs);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(probe_user_read);
+
 /**
  * probe_kernel_write(): safely attempt to write to a location
  * @dst: address to write to
@@ -48,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
  * Safely write to address @dst from the buffer at @src.  If a kernel fault
  * happens, handle that and return -EFAULT.
  */
+
 long __weak probe_kernel_write(void *dst, const void *src, size_t size)
     __attribute__((alias("__probe_kernel_write")));
 
@@ -57,15 +106,40 @@ long __probe_kernel_write(void *dst, const void *src, 
size_t size)
        mm_segment_t old_fs = get_fs();
 
        set_fs(KERNEL_DS);
-       pagefault_disable();
-       ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
-       pagefault_enable();
+       ret = probe_write_common((__force void __user *)dst, src, size);
        set_fs(old_fs);
 
-       return ret ? -EFAULT : 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(probe_kernel_write);
 
+/**
+ * probe_user_write(): safely attempt to write to a user-space location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src.  If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+
+long __weak probe_user_write(void __user *dst, const void *src, size_t size)
+    __attribute__((alias("__probe_user_write")));
+
+long __probe_user_write(void __user *dst, const void *src, size_t size)
+{
+       long ret = -EFAULT;
+       mm_segment_t old_fs = get_fs();
+
+       set_fs(USER_DS);
+       if (access_ok(VERIFY_WRITE, dst, size))
+               ret = probe_write_common(dst, src, size);
+       set_fs(old_fs);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(probe_user_write);
+
 /**
  * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
  * @dst:   Destination address, in kernel space.  This buffer must be at
@@ -105,3 +179,76 @@ long strncpy_from_unsafe(char *dst, const void 
*unsafe_addr, long count)
 
        return ret ? -EFAULT : src - unsafe_addr;
 }
+
+/**
+ * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user
+ *                             address.
+ * @dst:   Destination address, in kernel space.  This buffer must be at
+ *         least @count bytes long.
+ * @unsafe_addr: Unsafe user address.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from unsafe user address to kernel buffer.
+ *
+ * On success, returns the length of the string INCLUDING the trailing NUL.
+ *
+ * If access fails, returns -EFAULT (some data may have been copied
+ * and the trailing NUL added).
+ *
+ * If @count is smaller than the length of the string, copies @count-1 bytes,
+ * sets the last byte of @dst buffer to NUL and returns @count.
+ */
+long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+                             long count)
+{
+       mm_segment_t old_fs = get_fs();
+       long ret;
+
+       if (unlikely(count <= 0))
+               return 0;
+
+       set_fs(USER_DS);
+       pagefault_disable();
+       ret = strncpy_from_user(dst, unsafe_addr, count);
+       pagefault_enable();
+       set_fs(old_fs);
+
+       if (ret >= count) {
+               ret = count;
+               dst[ret - 1] = '\0';
+       } else if (ret > 0) {
+               ret++;
+       }
+
+       return ret;
+}
+
+/**
+ * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL.
+ * @unsafe_addr: The string to measure.
+ * @count: Maximum count (including NUL)
+ *
+ * Get the size of a NUL-terminated string in user space without pagefault.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ *
+ * If the string is too long, returns a number larger than @count. User
+ * has to check the return value against "> count".
+ * On exception (or invalid count), returns 0.
+ *
+ * Unlike strnlen_user, this can be used from IRQ handler etc. because
+ * it disables pagefaults.
+ */
+long strnlen_unsafe_user(const void __user *unsafe_addr, long count)
+{
+       mm_segment_t old_fs = get_fs();
+       int ret;
+
+       set_fs(USER_DS);
+       pagefault_disable();
+       ret = strnlen_user(unsafe_addr, count);
+       pagefault_enable();
+       set_fs(old_fs);
+
+       return ret;
+}
diff --git a/mm/slub.c b/mm/slub.c
index 882a1e0ae89c..dfc9b4267603 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -646,12 +646,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
 }
 
 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
-                              void *freelist, void *nextfree)
+                              void **freelist, void *nextfree)
 {
        if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
-           !check_valid_pointer(s, page, nextfree)) {
-               object_err(s, page, freelist, "Freechain corrupt");
-               freelist = NULL;
+           !check_valid_pointer(s, page, nextfree) && freelist) {
+               object_err(s, page, *freelist, "Freechain corrupt");
+               *freelist = NULL;
                slab_fix(s, "Isolate corrupted freechain");
                return true;
        }
@@ -1343,7 +1343,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, 
int node,
                                                        int objects) {}
 
 static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
-                              void *freelist, void *nextfree)
+                              void **freelist, void *nextfree)
 {
        return false;
 }
@@ -2037,7 +2037,7 @@ static void deactivate_slab(struct kmem_cache *s, struct 
page *page,
                 * 'freelist' is already corrupted.  So isolate all objects
                 * starting at 'freelist'.
                 */
-               if (freelist_corrupted(s, page, freelist, nextfree))
+               if (freelist_corrupted(s, page, &freelist, nextfree))
                        break;
 
                do {
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 0458de53cb64..04a620fd1301 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -716,6 +716,12 @@ static void batadv_v_ogm_process(const struct sk_buff 
*skb, int ogm_offset,
                   ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
                   ogm_packet->version, ntohs(ogm_packet->tvlv_len));
 
+       if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "Drop packet: originator packet from ourself\n");
+               return;
+       }
+
        /* If the throughput metric is 0, immediately drop the packet. No need
         * to create orig_node / neigh_node for an unusable route.
         */
@@ -843,11 +849,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
        if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
                goto free_skb;
 
-       ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
-       if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
-               goto free_skb;
-
        batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
        batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
                           skb->len + ETH_HLEN);
diff --git a/net/batman-adv/bridge_loop_avoidance.c 
b/net/batman-adv/bridge_loop_avoidance.c
index 85faf25c2912..9b8bf06ccb61 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -450,7 +450,10 @@ static void batadv_bla_send_claim(struct batadv_priv 
*bat_priv, u8 *mac,
        batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
                           skb->len + ETH_HLEN);
 
-       netif_rx(skb);
+       if (in_interrupt())
+               netif_rx(skb);
+       else
+               netif_rx_ni(skb);
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 140c61a3f1ec..0c59fefc1371 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -714,8 +714,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned 
int *header_len,
 
        chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
        /* store the client address if the message is going to a client */
-       if (ret == BATADV_DHCP_TO_CLIENT &&
-           pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
+       if (ret == BATADV_DHCP_TO_CLIENT) {
+               if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+                       return BATADV_DHCP_NO;
+
                /* check if the DHCP packet carries an Ethernet DHCP */
                p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
                if (*p != BATADV_DHCP_HTYPE_ETHERNET)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1b8a53081632..5b4632826dc6 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -718,11 +718,11 @@ static int nf_tables_gettable(struct net *net, struct 
sock *nlsk,
                                        nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
                                        family, table);
        if (err < 0)
-               goto err;
+               goto err_fill_table_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
 
-err:
+err_fill_table_info:
        kfree_skb(skb2);
        return err;
 }
@@ -1383,11 +1383,11 @@ static int nf_tables_getchain(struct net *net, struct 
sock *nlsk,
                                        nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
                                        family, table, chain);
        if (err < 0)
-               goto err;
+               goto err_fill_chain_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
 
-err:
+err_fill_chain_info:
        kfree_skb(skb2);
        return err;
 }
@@ -2488,11 +2488,11 @@ static int nf_tables_getrule(struct net *net, struct 
sock *nlsk,
                                       nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
                                       family, table, chain, rule);
        if (err < 0)
-               goto err;
+               goto err_fill_rule_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
 
-err:
+err_fill_rule_info:
        kfree_skb(skb2);
        return err;
 }
@@ -3204,7 +3204,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const 
struct nft_ctx *ctx,
                        goto nla_put_failure;
        }
 
-       if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
+       if (set->udata &&
+           nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
                goto nla_put_failure;
 
        desc = nla_nest_start(skb, NFTA_SET_DESC);
@@ -3376,11 +3377,11 @@ static int nf_tables_getset(struct net *net, struct 
sock *nlsk,
 
        err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
        if (err < 0)
-               goto err;
+               goto err_fill_set_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
 
-err:
+err_fill_set_info:
        kfree_skb(skb2);
        return err;
 }
@@ -4156,24 +4157,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct 
nft_set *set,
        err = -ENOMEM;
        skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
        if (skb == NULL)
-               goto err1;
+               return err;
 
        err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
                                          NFT_MSG_NEWSETELEM, 0, set, &elem);
        if (err < 0)
-               goto err2;
+               goto err_fill_setelem;
 
-       err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
-       /* This avoids a loop in nfnetlink. */
-       if (err < 0)
-               goto err1;
+       return nfnetlink_unicast(skb, ctx->net, ctx->portid);
 
-       return 0;
-err2:
+err_fill_setelem:
        kfree_skb(skb);
-err1:
-       /* this avoids a loop in nfnetlink. */
-       return err == -EAGAIN ? -ENOBUFS : err;
+       return err;
 }
 
 /* called with rcu_read_lock held */
@@ -5272,10 +5267,11 @@ static int nf_tables_getobj(struct net *net, struct 
sock *nlsk,
                                      nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
                                      family, table, obj, reset);
        if (err < 0)
-               goto err;
+               goto err_fill_obj_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_obj_info:
        kfree_skb(skb2);
        return err;
 }
@@ -5932,10 +5928,11 @@ static int nf_tables_getflowtable(struct net *net, 
struct sock *nlsk,
                                            NFT_MSG_NEWFLOWTABLE, 0, family,
                                            flowtable);
        if (err < 0)
-               goto err;
+               goto err_fill_flowtable_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_flowtable_info:
        kfree_skb(skb2);
        return err;
 }
@@ -6096,10 +6093,11 @@ static int nf_tables_getgen(struct net *net, struct 
sock *nlsk,
        err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
                                      nlh->nlmsg_seq);
        if (err < 0)
-               goto err;
+               goto err_fill_gen_info;
 
-       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+       return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_gen_info:
        kfree_skb(skb2);
        return err;
 }
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 7f2c1915763f..9bacddc761ba 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -148,10 +148,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 
group, int error)
 }
 EXPORT_SYMBOL_GPL(nfnetlink_set_err);
 
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
-                     int flags)
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
 {
-       return netlink_unicast(net->nfnl, skb, portid, flags);
+       int err;
+
+       err = nlmsg_unicast(net->nfnl, skb, portid);
+       if (err == -EAGAIN)
+               err = -ENOBUFS;
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(nfnetlink_unicast);
 
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 332c69d27b47..25298b3eb854 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -359,8 +359,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
                        goto out;
                }
        }
-       nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
-                         MSG_DONTWAIT);
+       nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
 out:
        inst->qlen = 0;
        inst->skb = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index d33094f4ec41..f81a3ce0fe48 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -685,7 +685,7 @@ __nfqnl_enqueue_packet(struct net *net, struct 
nfqnl_instance *queue,
        *packet_id_ptr = htonl(entry->id);
 
        /* nfnetlink_unicast will either free the nskb or add it to a socket */
-       err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
+       err = nfnetlink_unicast(nskb, net, queue->peer_portid);
        if (err < 0) {
                if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
                        failopen = 1;
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 19446a89a2a8..b1a9f330a51f 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -79,7 +79,9 @@ static void nft_payload_eval(const struct nft_expr *expr,
        u32 *dest = &regs->data[priv->dreg];
        int offset;
 
-       dest[priv->len / NFT_REG32_SIZE] = 0;
+       if (priv->len % NFT_REG32_SIZE)
+               dest[priv->len / NFT_REG32_SIZE] = 0;
+
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
                if (!skb_mac_header_was_set(skb))
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 32f575857e41..935aebf15010 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2936,6 +2936,9 @@ int regulatory_hint_user(const char *alpha2,
        if (WARN_ON(!alpha2))
                return -EINVAL;
 
+       if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
+               return -EINVAL;
+
        request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
        if (!request)
                return -ENOMEM;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 161b0224d6ae..7eb944cbbaea 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2541,8 +2541,8 @@ sub process {
 
 # Check if the commit log has what seems like a diff which can confuse patch
                if ($in_commit_log && !$commit_log_has_diff &&
-                   (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
-                     $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
+                   (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
+                     $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
                     $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
                     $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
                        ERROR("DIFF_IN_COMMIT_MSG",
diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
index 3788906421a7..fe27034f2846 100644
--- a/sound/core/oss/mulaw.c
+++ b/sound/core/oss/mulaw.c
@@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream 
*plug,
                snd_BUG();
                return -EINVAL;
        }
-       if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
-               return -ENXIO;
+       if (!snd_pcm_format_linear(format->format))
+               return -EINVAL;
 
        err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
                                   src_format, dst_format,
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index ef689997d6a5..bf53e342788e 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2");
 #define VENDOR_DIGIDESIGN      0x00a07e
 #define MODEL_CONSOLE          0x000001
 #define MODEL_RACK             0x000002
+#define SPEC_VERSION           0x000001
 
 static int name_card(struct snd_dg00x *dg00x)
 {
@@ -185,14 +186,18 @@ static const struct ieee1394_device_id 
snd_dg00x_id_table[] = {
        /* Both of 002/003 use the same ID. */
        {
                .match_flags = IEEE1394_MATCH_VENDOR_ID |
+                              IEEE1394_MATCH_VERSION |
                               IEEE1394_MATCH_MODEL_ID,
                .vendor_id = VENDOR_DIGIDESIGN,
+               .version = SPEC_VERSION,
                .model_id = MODEL_CONSOLE,
        },
        {
                .match_flags = IEEE1394_MATCH_VENDOR_ID |
+                              IEEE1394_MATCH_VERSION |
                               IEEE1394_MATCH_MODEL_ID,
                .vendor_id = VENDOR_DIGIDESIGN,
+               .version = SPEC_VERSION,
                .model_id = MODEL_RACK,
        },
        {}
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index cd27b5536654..675b812e96d6 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 
*chip, int channel_id,
                else
                        /* Power down */
                        chip->spi_dac_reg[reg] |= bit;
-               return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
+               if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
+                       return -ENXIO;
        }
        return 0;
 }
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 419d099b5582..b8e5f2b19ff8 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2574,6 +2574,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
                               hda_nid_t cvt_nid)
 {
        if (per_pin) {
+               haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid);
                snd_hda_set_dev_select(codec, per_pin->pin_nid,
                               per_pin->dev_id);
                intel_verify_pin_cvt_connect(codec, per_pin);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9c5b3d19bfa7..8092fd5617fa 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2452,6 +2452,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+       SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", 
ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", 
ALC1220_FIXUP_GB_DUAL_CODECS),
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", 
ALC882_FIXUP_ABIT_AW9D_MAX),
diff --git a/tools/include/uapi/linux/perf_event.h 
b/tools/include/uapi/linux/perf_event.h
index f35eb72739c0..a45e7b4f0316 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -1079,7 +1079,7 @@ union perf_mem_data_src {
 
 #define PERF_MEM_SNOOPX_FWD    0x01 /* forward */
 /* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT  37
+#define PERF_MEM_SNOOPX_SHIFT  38
 
 /* locked instruction */
 #define PERF_MEM_LOCK_NA       0x01 /* not available */
diff --git a/tools/perf/Documentation/perf-record.txt 
b/tools/perf/Documentation/perf-record.txt
index 246dee081efd..edf2be251788 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,6 +33,10 @@ OPTIONS
         - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
          hexadecimal event descriptor.
 
+        - a symbolic or raw PMU event followed by an optional colon
+         and a list of event modifiers, e.g., cpu-cycles:p.  See the
+         linkperf:perf-list[1] man page for details on event modifiers.
+
        - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
          'param1', 'param2', etc are defined as formats for the PMU in
          /sys/bus/event_source/devices/<pmu>/format/*.
diff --git a/tools/perf/Documentation/perf-stat.txt 
b/tools/perf/Documentation/perf-stat.txt
index b10a90b6a718..239af8f71f79 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -39,6 +39,10 @@ report::
        - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
          hexadecimal event descriptor.
 
+        - a symbolic or raw PMU event followed by an optional colon
+         and a list of event modifiers, e.g., cpu-cycles:p.  See the
+         linkperf:perf-list[1] man page for details on event modifiers.
+
        - a symbolically formed event like 'pmu/param1=0x3,param2/' where
          param1 and param2 are defined as formats for the PMU in
          /sys/bus/event_source/devices/<pmu>/format/*
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 38b5888ef7b3..c17e59404171 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -137,7 +137,7 @@ static char *fixregex(char *s)
                return s;
 
        /* allocate space for a new string */
-       fixed = (char *) malloc(len + 1);
+       fixed = (char *) malloc(len + esc_count + 1);
        if (!fixed)
                return NULL;
 
diff --git a/tools/testing/selftests/bpf/test_maps.c 
b/tools/testing/selftests/bpf/test_maps.c
index 9b552c0fc47d..4e202217fae1 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1017,6 +1017,8 @@ static void __run_parallel(int tasks, void (*fn)(int 
task, void *data),
        pid_t pid[tasks];
        int i;
 
+       fflush(stdout);
+
        for (i = 0; i < tasks; i++) {
                pid[i] = fork();
                if (pid[i] == 0) {
  • Linux 4.19.144 Greg Kroah-Hartman
    • Re: Linux 4.19.144 Greg Kroah-Hartman

Reply via email to