commit:     333bb572330727d30ce9e4e2b5563e63819eda44
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 30 10:05:12 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 30 10:05:12 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=333bb572

Linux patch 4.12.10

 0000_README              |    4 +
 1009_linux-4.12.10.patch | 3576 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3580 insertions(+)

diff --git a/0000_README b/0000_README
index 90242d0..a64a189 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-4.12.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.12.9
 
+Patch:  1009_linux-4.12.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.12.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-4.12.10.patch b/1009_linux-4.12.10.patch
new file mode 100644
index 0000000..a2ab6c1
--- /dev/null
+++ b/1009_linux-4.12.10.patch
@@ -0,0 +1,3576 @@
+diff --git a/Makefile b/Makefile
+index a6c2a5e7a48d..6889ec6a091d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
+index 19ebddffb279..02fd1cece6ef 100644
+--- a/arch/arc/include/asm/cache.h
++++ b/arch/arc/include/asm/cache.h
+@@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
+ #define ARC_REG_SLC_FLUSH     0x904
+ #define ARC_REG_SLC_INVALIDATE        0x905
+ #define ARC_REG_SLC_RGN_START 0x914
++#define ARC_REG_SLC_RGN_START1        0x915
+ #define ARC_REG_SLC_RGN_END   0x916
++#define ARC_REG_SLC_RGN_END1  0x917
+ 
+ /* Bit val in SLC_CONTROL */
+ #define SLC_CTRL_DIS          0x001
+diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
+index db7319e9b506..efb79fafff1d 100644
+--- a/arch/arc/include/asm/mmu.h
++++ b/arch/arc/include/asm/mmu.h
+@@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
+       return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+ }
+ 
++extern int pae40_exist_but_not_enab(void);
++
+ #endif        /* !__ASSEMBLY__ */
+ 
+ #endif
+diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
+index a867575a758b..7db283b46ebd 100644
+--- a/arch/arc/mm/cache.c
++++ b/arch/arc/mm/cache.c
+@@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, 
const int op)
+       static DEFINE_SPINLOCK(lock);
+       unsigned long flags;
+       unsigned int ctrl;
++      phys_addr_t end;
+ 
+       spin_lock_irqsave(&lock, flags);
+ 
+@@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, 
const int op)
+        * END needs to be setup before START (latter triggers the operation)
+        * END can't be same as START, so add (l2_line_sz - 1) to sz
+        */
+-      write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
+-      write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
++      end = paddr + sz + l2_line_sz - 1;
++      if (is_pae40_enabled())
++              write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
++
++      write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
++
++      if (is_pae40_enabled())
++              write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
++
++      write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
++
++      /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
++      read_aux_reg(ARC_REG_SLC_CTRL);
+ 
+       while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+ 
+@@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
+       __dc_enable();
+ }
+ 
++/*
++ * Cache related boot time checks/setups only needed on master CPU:
++ *  - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
++ *    Assume SMP only, so all cores will have same cache config. A check on
++ *    one core suffices for all
++ *  - IOC setup / dma callbacks only need to be done once
++ */
+ void __init arc_cache_init_master(void)
+ {
+       unsigned int __maybe_unused cpu = smp_processor_id();
+@@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
+ 
+       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ 
+-      /*
+-       * Only master CPU needs to execute rest of function:
+-       *  - Assume SMP so all cores will have same cache config so
+-       *    any geomtry checks will be same for all
+-       *  - IOC setup / dma callbacks only need to be setup once
+-       */
+       if (!cpu)
+               arc_cache_init_master();
++
++      /*
++       * In PAE regime, TLB and cache maintenance ops take wider addresses
++       * And even if PAE is not enabled in kernel, the upper 32-bits still 
need
++       * to be zeroed to keep the ops sane.
++       * As an optimization for more common !PAE enabled case, zero them out
++       * once at init, rather than checking/setting to 0 for every runtime op
++       */
++      if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
++
++              if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
++                      write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
++
++              if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
++                      write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
++
++              if (l2_line_sz) {
++                      write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
++                      write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
++              }
++      }
+ }
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index d0126fdfe2d8..b181f3ee38aa 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -104,6 +104,8 @@
+ /* A copy of the ASID from the PID reg is kept in asid_cache */
+ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
+ 
++static int __read_mostly pae_exists;
++
+ /*
+  * Utility Routine to erase a J-TLB entry
+  * Caller needs to setup Index Reg (manually or via getIndex)
+@@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
+               mmu->u_dtlb = mmu4->u_dtlb * 4;
+               mmu->u_itlb = mmu4->u_itlb * 4;
+               mmu->sasid = mmu4->sasid;
+-              mmu->pae = mmu4->pae;
++              pae_exists = mmu->pae = mmu4->pae;
+       }
+ }
+ 
+@@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
+       return buf;
+ }
+ 
++int pae40_exist_but_not_enab(void)
++{
++      return pae_exists && !is_pae40_enabled();
++}
++
+ void arc_mmu_init(void)
+ {
+       char str[256];
+@@ -859,6 +866,9 @@ void arc_mmu_init(void)
+       /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+       write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+ #endif
++
++      if (pae40_exist_but_not_enab())
++              write_aux_reg(ARC_REG_TLBPD1HI, 0);
+ }
+ 
+ /*
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 06da8ea16bbe..c7b4995868e1 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
+ {
+       if (!system_supports_fpsimd())
+               return;
++      preempt_disable();
+       memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+       fpsimd_flush_task_state(current);
+       set_thread_flag(TIF_FOREIGN_FPSTATE);
++      preempt_enable();
+ }
+ 
+ /*
+diff --git a/arch/powerpc/include/asm/mmu_context.h 
b/arch/powerpc/include/asm/mmu_context.h
+index da7e9432fa8f..db80b301c080 100644
+--- a/arch/powerpc/include/asm/mmu_context.h
++++ b/arch/powerpc/include/asm/mmu_context.h
+@@ -80,9 +80,27 @@ static inline void switch_mm_irqs_off(struct mm_struct 
*prev,
+                                     struct task_struct *tsk)
+ {
+       /* Mark this context has been used on the new CPU */
+-      if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
++      if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
+               cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ 
++              /*
++               * This full barrier orders the store to the cpumask above vs
++               * a subsequent operation which allows this CPU to begin loading
++               * translations for next.
++               *
++               * When using the radix MMU that operation is the load of the
++               * MMU context id, which is then moved to SPRN_PID.
++               *
++               * For the hash MMU it is either the first load from slb_cache
++               * in switch_slb(), and/or the store of paca->mm_ctx_id in
++               * copy_mm_to_paca().
++               *
++               * On the read side the barrier is in pte_xchg(), which orders
++               * the store to the PTE vs the load of mm_cpumask.
++               */
++              smp_mb();
++      }
++
+       /* 32-bit keeps track of the current PGDIR in the thread struct */
+ #ifdef CONFIG_PPC32
+       tsk->thread.pgdir = next->pgd;
+diff --git a/arch/powerpc/include/asm/pgtable-be-types.h 
b/arch/powerpc/include/asm/pgtable-be-types.h
+index 9c0f5db5cf46..67e7e3d990f4 100644
+--- a/arch/powerpc/include/asm/pgtable-be-types.h
++++ b/arch/powerpc/include/asm/pgtable-be-types.h
+@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t 
new)
+       unsigned long *p = (unsigned long *)ptep;
+       __be64 prev;
+ 
++      /* See comment in switch_mm_irqs_off() */
+       prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned 
long)pte_raw(old),
+                                            (__force unsigned 
long)pte_raw(new));
+ 
+diff --git a/arch/powerpc/include/asm/pgtable-types.h 
b/arch/powerpc/include/asm/pgtable-types.h
+index 8bd3b13fe2fb..369a164b545c 100644
+--- a/arch/powerpc/include/asm/pgtable-types.h
++++ b/arch/powerpc/include/asm/pgtable-types.h
+@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t 
new)
+ {
+       unsigned long *p = (unsigned long *)ptep;
+ 
++      /* See comment in switch_mm_irqs_off() */
+       return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
+ }
+ #endif
+diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
+index 926b5244263e..a2e5c24f47a7 100644
+--- a/arch/s390/kvm/sthyi.c
++++ b/arch/s390/kvm/sthyi.c
+@@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
+               "srl     %[cc],28\n"
+               : [cc] "=d" (cc)
+               : [code] "d" (code), [addr] "a" (addr)
+-              : "memory", "cc");
++              : "3", "memory", "cc");
+       return cc;
+ }
+ 
+@@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+       VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
+       trace_kvm_s390_handle_sthyi(vcpu, code, addr);
+ 
+-      if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
++      if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+ 
+       if (code & 0xffff) {
+@@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
+               goto out;
+       }
+ 
++      if (addr & ~PAGE_MASK)
++              return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
++
+       /*
+        * If the page has not yet been faulted in, we want to do that
+        * now and not after all the expensive calculations.
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index 68bec7c97cb8..af6ac9c5d32e 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -1241,8 +1241,6 @@ static int pci_sun4v_probe(struct platform_device *op)
+                        * ATU group, but ATU hcalls won't be available.
+                        */
+                       hv_atu = false;
+-                      pr_err(PFX "Could not register hvapi ATU err=%d\n",
+-                             err);
+               } else {
+                       pr_info(PFX "Registered hvapi ATU major[%lu] 
minor[%lu]\n",
+                               vatu_major, vatu_minor);
+diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
+index 255645f60ca2..554cdb205d17 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+       return 0;
+ }
+ 
+-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
++static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 
mask)
+ {
+       if (use_xsave()) {
+-              copy_kernel_to_xregs(&fpstate->xsave, -1);
++              copy_kernel_to_xregs(&fpstate->xsave, mask);
+       } else {
+               if (use_fxsr())
+                       copy_kernel_to_fxregs(&fpstate->fxsave);
+@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union 
fpregs_state *fpstate)
+                       : : [addr] "m" (fpstate));
+       }
+ 
+-      __copy_kernel_to_fpregs(fpstate);
++      __copy_kernel_to_fpregs(fpstate, -1);
+ }
+ 
+ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int 
size);
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 695605eb1dfb..ed8fdf86acfb 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -486,6 +486,7 @@ struct kvm_vcpu_arch {
+       unsigned long cr4;
+       unsigned long cr4_guest_owned_bits;
+       unsigned long cr8;
++      u32 pkru;
+       u32 hflags;
+       u64 efer;
+       u64 apic_base;
+diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
+index 68b329d77b3a..8463a136f711 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk,
+               mm->context.execute_only_pkey = -1;
+       }
+       #endif
+-      init_new_context_ldt(tsk, mm);
+-
+-      return 0;
++      return init_new_context_ldt(tsk, mm);
+ }
+ static inline void destroy_context(struct mm_struct *mm)
+ {
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 59ca2eea522c..19adbb418443 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 
*entry, u32 function,
+                       entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+                       cpuid_mask(&entry->ecx, CPUID_7_ECX);
+                       /* PKU is not yet implemented for shadow paging. */
+-                      if (!tdp_enabled)
++                      if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
+                               entry->ecx &= ~F(PKU);
+                       entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+                       entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
+diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
+index 762cdf2595f9..e1e89ee4af75 100644
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
+               | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+ }
+ 
+-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
+-{
+-      return kvm_x86_ops->get_pkru(vcpu);
+-}
+-
+ static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
+ {
+       vcpu->arch.hflags |= HF_GUEST_MASK;
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index 330bf3a811fb..b0d36a229d2e 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -182,7 +182,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, 
struct kvm_mmu *mmu,
+               * index of the protection domain, so pte_pkey * 2 is
+               * is the index of the first bit for the domain.
+               */
+-              pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
++              pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
+ 
+               /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
+               offset = (pfec & ~1) +
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index ba9891ac5c56..58dbca7f2106 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1725,11 +1725,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, 
unsigned long rflags)
+       to_svm(vcpu)->vmcb->save.rflags = rflags;
+ }
+ 
+-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
+-{
+-      return 0;
+-}
+-
+ static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+ {
+       switch (reg) {
+@@ -5313,8 +5308,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+       .get_rflags = svm_get_rflags,
+       .set_rflags = svm_set_rflags,
+ 
+-      .get_pkru = svm_get_pkru,
+-
+       .tlb_flush = svm_flush_tlb,
+ 
+       .run = svm_vcpu_run,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 270d83da090c..2461e1a53f8c 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -636,8 +636,6 @@ struct vcpu_vmx {
+ 
+       u64 current_tsc_ratio;
+ 
+-      bool guest_pkru_valid;
+-      u32 guest_pkru;
+       u32 host_pkru;
+ 
+       /*
+@@ -2368,11 +2366,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, 
unsigned long rflags)
+       vmcs_writel(GUEST_RFLAGS, rflags);
+ }
+ 
+-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
+-{
+-      return to_vmx(vcpu)->guest_pkru;
+-}
+-
+ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+ {
+       u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+@@ -8860,8 +8853,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
*vcpu)
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+               vmx_set_interrupt_shadow(vcpu, 0);
+ 
+-      if (vmx->guest_pkru_valid)
+-              __write_pkru(vmx->guest_pkru);
++      if (static_cpu_has(X86_FEATURE_PKU) &&
++          kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
++          vcpu->arch.pkru != vmx->host_pkru)
++              __write_pkru(vcpu->arch.pkru);
+ 
+       atomic_switch_perf_msrs(vmx);
+       debugctlmsr = get_debugctlmsr();
+@@ -9009,13 +9004,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
*vcpu)
+        * back on host, so it is safe to read guest PKRU from current
+        * XSAVE.
+        */
+-      if (boot_cpu_has(X86_FEATURE_OSPKE)) {
+-              vmx->guest_pkru = __read_pkru();
+-              if (vmx->guest_pkru != vmx->host_pkru) {
+-                      vmx->guest_pkru_valid = true;
++      if (static_cpu_has(X86_FEATURE_PKU) &&
++          kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
++              vcpu->arch.pkru = __read_pkru();
++              if (vcpu->arch.pkru != vmx->host_pkru)
+                       __write_pkru(vmx->host_pkru);
+-              } else
+-                      vmx->guest_pkru_valid = false;
+       }
+ 
+       /*
+@@ -11507,8 +11500,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init 
= {
+       .get_rflags = vmx_get_rflags,
+       .set_rflags = vmx_set_rflags,
+ 
+-      .get_pkru = vmx_get_pkru,
+-
+       .tlb_flush = vmx_flush_tlb,
+ 
+       .run = vmx_vcpu_run,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0e846f0cb83b..786e47fc6092 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+                       u32 size, offset, ecx, edx;
+                       cpuid_count(XSTATE_CPUID, index,
+                                   &size, &offset, &ecx, &edx);
+-                      memcpy(dest + offset, src, size);
++                      if (feature == XFEATURE_MASK_PKRU)
++                              memcpy(dest + offset, &vcpu->arch.pkru,
++                                     sizeof(vcpu->arch.pkru));
++                      else
++                              memcpy(dest + offset, src, size);
++
+               }
+ 
+               valid -= feature;
+@@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
+                       u32 size, offset, ecx, edx;
+                       cpuid_count(XSTATE_CPUID, index,
+                                   &size, &offset, &ecx, &edx);
+-                      memcpy(dest, src + offset, size);
++                      if (feature == XFEATURE_MASK_PKRU)
++                              memcpy(&vcpu->arch.pkru, src + offset,
++                                     sizeof(vcpu->arch.pkru));
++                      else
++                              memcpy(dest, src + offset, size);
+               }
+ 
+               valid -= feature;
+@@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+        */
+       vcpu->guest_fpu_loaded = 1;
+       __kernel_fpu_begin();
+-      __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
++      /* PKRU is separately restored in kvm_x86_ops->run.  */
++      __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
++                              ~XFEATURE_MASK_PKRU);
+       trace_kvm_fpu(1);
+ }
+ 
+diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
+index fc6c416f8724..d5999eb41c00 100644
+--- a/drivers/acpi/acpi_apd.c
++++ b/drivers/acpi/acpi_apd.c
+@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = 
{
+       { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
+       { "BRCM900D", APD_ADDR(vulcan_spi_desc) },
+       { "CAV900D",  APD_ADDR(vulcan_spi_desc) },
+-      { "HISI0A21", APD_ADDR(hip07_i2c_desc) },
+-      { "HISI0A22", APD_ADDR(hip08_i2c_desc) },
++      { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
++      { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
+ #endif
+       { }
+ };
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index cfad5d9a22f3..d8b2779b0140 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1703,7 +1703,7 @@ int __init acpi_ec_dsdt_probe(void)
+  * functioning ECDT EC first in order to handle the events.
+  * https://bugzilla.kernel.org/show_bug.cgi?id=115021
+  */
+-int __init acpi_ec_ecdt_start(void)
++static int __init acpi_ec_ecdt_start(void)
+ {
+       acpi_handle handle;
+ 
+@@ -1906,20 +1906,17 @@ static inline void acpi_ec_query_exit(void)
+ int __init acpi_ec_init(void)
+ {
+       int result;
++      int ecdt_fail, dsdt_fail;
+ 
+       /* register workqueue for _Qxx evaluations */
+       result = acpi_ec_query_init();
+       if (result)
+-              goto err_exit;
+-      /* Now register the driver for the EC */
+-      result = acpi_bus_register_driver(&acpi_ec_driver);
+-      if (result)
+-              goto err_exit;
++              return result;
+ 
+-err_exit:
+-      if (result)
+-              acpi_ec_query_exit();
+-      return result;
++      /* Drivers must be started after acpi_ec_query_init() */
++      ecdt_fail = acpi_ec_ecdt_start();
++      dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
++      return ecdt_fail && dsdt_fail ? -ENODEV : 0;
+ }
+ 
+ /* EC driver currently not unloadable */
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 66229ffa909b..7e66f3c72b81 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
+ int acpi_ec_init(void);
+ int acpi_ec_ecdt_probe(void);
+ int acpi_ec_dsdt_probe(void);
+-int acpi_ec_ecdt_start(void);
+ void acpi_ec_block_transactions(void);
+ void acpi_ec_unblock_transactions(void);
+ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 9364398204e9..6822ac9f106b 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -1046,7 +1046,7 @@ static struct fwnode_handle 
*acpi_graph_get_child_prop_value(
+       fwnode_for_each_child_node(fwnode, child) {
+               u32 nr;
+ 
+-              if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
++              if (fwnode_property_read_u32(child, prop_name, &nr))
+                       continue;
+ 
+               if (val == nr)
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index d53162997f32..359d16c30002 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -2085,7 +2085,6 @@ int __init acpi_scan_init(void)
+ 
+       acpi_gpe_apply_masked_gpes();
+       acpi_update_all_gpes();
+-      acpi_ec_ecdt_start();
+ 
+       acpi_scan_initialized = true;
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index aae4d8d4be36..831cdd7d197d 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
+       list_add_tail(&t->work.entry, target_list);
+       tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+       list_add_tail(&tcomplete->entry, &thread->todo);
+-      if (target_wait)
+-              wake_up_interruptible(target_wait);
++      if (target_wait) {
++              if (reply || !(t->flags & TF_ONE_WAY))
++                      wake_up_interruptible_sync(target_wait);
++              else
++                      wake_up_interruptible(target_wait);
++      }
+       return;
+ 
+ err_translate_failed:
+@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned 
int cmd, unsigned long arg)
+       /*pr_info("binder_ioctl: %d:%d %x %lx\n",
+                       proc->pid, current->pid, cmd, arg);*/
+ 
+-      if (unlikely(current->mm != proc->vma_vm_mm)) {
+-              pr_err("current mm mismatch proc mm\n");
+-              return -EINVAL;
+-      }
+       trace_binder_ioctl(cmd, arg);
+ 
+       ret = wait_event_interruptible(binder_user_error_wait, 
binder_stop_on_user_error < 2);
+@@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct 
vm_area_struct *vma)
+       const char *failure_string;
+       struct binder_buffer *buffer;
+ 
+-      if (proc->tsk != current)
++      if (proc->tsk != current->group_leader)
+               return -EINVAL;
+ 
+       if ((vma->vm_end - vma->vm_start) > SZ_4M)
+@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file 
*filp)
+       proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+       if (proc == NULL)
+               return -ENOMEM;
+-      get_task_struct(current);
+-      proc->tsk = current;
+-      proc->vma_vm_mm = current->mm;
++      get_task_struct(current->group_leader);
++      proc->tsk = current->group_leader;
+       INIT_LIST_HEAD(&proc->todo);
+       init_waitqueue_head(&proc->wait);
+       proc->default_priority = task_nice(current);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+index c0a806280257..f4a4efec8737 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+@@ -839,8 +839,6 @@ static int amdgpu_cgs_get_active_displays_info(struct 
cgs_device *cgs_device,
+ 
+       mode_info = info->mode_info;
+       if (mode_info) {
+-              /* if the displays are off, vblank time is max */
+-              mode_info->vblank_time_us = 0xffffffff;
+               /* always set the reference clock */
+               mode_info->ref_clock = adev->clock.spll.reference_freq;
+       }
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index f32506a7c1d6..422404dbfabb 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -1581,6 +1581,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
+       if (config->funcs->atomic_check)
+               ret = config->funcs->atomic_check(state->dev, state);
+ 
++      if (ret)
++              return ret;
++
+       if (!state->allow_modeset) {
+               for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+                       if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+@@ -1591,7 +1594,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
+               }
+       }
+ 
+-      return ret;
++      return 0;
+ }
+ EXPORT_SYMBOL(drm_atomic_check_only);
+ 
+@@ -2093,10 +2096,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
+       struct drm_atomic_state *state;
+       struct drm_modeset_acquire_ctx ctx;
+       struct drm_plane *plane;
+-      struct drm_out_fence_state *fence_state = NULL;
++      struct drm_out_fence_state *fence_state;
+       unsigned plane_mask;
+       int ret = 0;
+-      unsigned int i, j, num_fences = 0;
++      unsigned int i, j, num_fences;
+ 
+       /* disallow for drivers not supporting atomic: */
+       if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+@@ -2137,6 +2140,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
+       plane_mask = 0;
+       copied_objs = 0;
+       copied_props = 0;
++      fence_state = NULL;
++      num_fences = 0;
+ 
+       for (i = 0; i < arg->count_objs; i++) {
+               uint32_t obj_id, count_props;
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index b1e28c944637..08e6e981104f 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void 
*data)
+       struct drm_gem_object *obj = ptr;
+       struct drm_device *dev = obj->dev;
+ 
++      if (dev->driver->gem_close_object)
++              dev->driver->gem_close_object(obj, file_priv);
++
+       if (drm_core_check_feature(dev, DRIVER_PRIME))
+               drm_gem_remove_prime_handles(obj, file_priv);
+       drm_vma_node_revoke(&obj->vma_node, file_priv);
+ 
+-      if (dev->driver->gem_close_object)
+-              dev->driver->gem_close_object(obj, file_priv);
+-
+       drm_gem_object_handle_put_unlocked(obj);
+ 
+       return 0;
+diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
+index 5dc8c4350602..e40c12fabbde 100644
+--- a/drivers/gpu/drm/drm_plane.c
++++ b/drivers/gpu/drm/drm_plane.c
+@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
+ 
+               crtc = drm_crtc_find(dev, plane_req->crtc_id);
+               if (!crtc) {
++                      drm_framebuffer_put(fb);
+                       DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+                                     plane_req->crtc_id);
+                       return -ENOENT;
+diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c 
b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+index 41b2c3aaa04a..37258b7d1bce 100644
+--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
++++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
+@@ -2754,7 +2754,7 @@ static int shadow_indirect_ctx(struct 
intel_shadow_wa_ctx *wa_ctx)
+ unmap_src:
+       i915_gem_object_unpin_map(obj);
+ put_obj:
+-      i915_gem_object_put(wa_ctx->indirect_ctx.obj);
++      i915_gem_object_put(obj);
+       return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_bios.c 
b/drivers/gpu/drm/i915/intel_bios.c
+index 639d45c1dd2e..7ea7fd1e8856 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private 
*dev_priv, enum port port,
+       bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+       uint8_t aux_channel, ddc_pin;
+       /* Each DDI port can have more than one value on the "DVO Port" field,
+-       * so look for all the possible values for each port and abort if more
+-       * than one is found. */
++       * so look for all the possible values for each port.
++       */
+       int dvo_ports[][3] = {
+               {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+               {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private 
*dev_priv, enum port port,
+               {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+       };
+ 
+-      /* Find the child device to use, abort if more than one found. */
++      /*
++       * Find the first child device to reference the port, report if more
++       * than one found.
++       */
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               it = dev_priv->vbt.child_dev + i;
+ 
+@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private 
*dev_priv, enum port port,
+ 
+                       if (it->common.dvo_port == dvo_ports[port][j]) {
+                               if (child) {
+-                                      DRM_DEBUG_KMS("More than one child 
device for port %c in VBT.\n",
++                                      DRM_DEBUG_KMS("More than one child 
device for port %c in VBT, using the first.\n",
+                                                     port_name(port));
+-                                      return;
++                              } else {
++                                      child = it;
+                               }
+-                              child = it;
+                       }
+               }
+       }
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c 
b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index 8ddd72cd5873..05601ab27d7c 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -25,12 +25,20 @@
+ #include "sun4i_framebuffer.h"
+ #include "sun4i_tcon.h"
+ 
++static void sun4i_drv_lastclose(struct drm_device *dev)
++{
++      struct sun4i_drv *drv = dev->dev_private;
++
++      drm_fbdev_cma_restore_mode(drv->fbdev);
++}
++
+ DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
+ 
+ static struct drm_driver sun4i_drv_driver = {
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 
DRIVER_ATOMIC,
+ 
+       /* Generic Operations */
++      .lastclose              = sun4i_drv_lastclose,
+       .fops                   = &sun4i_drv_fops,
+       .name                   = "sun4i-drm",
+       .desc                   = "Allwinner sun4i Display Engine",
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c 
b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 0703da1d946a..eea71c4e969d 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -392,7 +392,7 @@ static void dw_i2c_plat_complete(struct device *dev)
+ #endif
+ 
+ #ifdef CONFIG_PM
+-static int dw_i2c_plat_suspend(struct device *dev)
++static int dw_i2c_plat_runtime_suspend(struct device *dev)
+ {
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+@@ -414,11 +414,21 @@ static int dw_i2c_plat_resume(struct device *dev)
+       return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++static int dw_i2c_plat_suspend(struct device *dev)
++{
++      pm_runtime_resume(dev);
++      return dw_i2c_plat_runtime_suspend(dev);
++}
++#endif
++
+ static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
+       .prepare = dw_i2c_plat_prepare,
+       .complete = dw_i2c_plat_complete,
+       SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+-      SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
++      SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
++                         dw_i2c_plat_resume,
++                         NULL)
+ };
+ 
+ #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c 
b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index 0b5dea050239..6dda332f252a 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common 
*st, bool state)
+       s32 poll_value = 0;
+ 
+       if (state) {
+-              if (!atomic_read(&st->user_requested_state))
+-                      return 0;
+               if (sensor_hub_device_open(st->hsdev))
+                       return -EIO;
+ 
+@@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common 
*st, bool state)
+                                      &report_val);
+       }
+ 
++      pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
++               st->pdev->name, state_val, report_val);
++
+       sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+                              st->power_state.index,
+                              sizeof(state_val), &state_val);
+@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, 
bool state)
+               ret = pm_runtime_get_sync(&st->pdev->dev);
+       else {
+               pm_runtime_mark_last_busy(&st->pdev->dev);
++              pm_runtime_use_autosuspend(&st->pdev->dev);
+               ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+       }
+       if (ret < 0) {
+@@ -205,8 +207,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, 
const char *name,
+       /* Default to 3 seconds, but can be changed from sysfs */
+       pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
+                                        3000);
+-      pm_runtime_use_autosuspend(&attrb->pdev->dev);
+-
+       return ret;
+ error_unreg_trigger:
+       iio_trigger_unregister(trig);
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index 8cf84d3488b2..12898424d838 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -696,7 +696,7 @@ static const struct adis16480_chip_info 
adis16480_chip_info[] = {
+               .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+               .gyro_max_scale = 450,
+               .accel_max_val = IIO_M_S_2_TO_G(12500),
+-              .accel_max_scale = 5,
++              .accel_max_scale = 10,
+       },
+       [ADIS16485] = {
+               .channels = adis16485_channels,
+diff --git a/drivers/iio/magnetometer/st_magn_core.c 
b/drivers/iio/magnetometer/st_magn_core.c
+index 8e1b0861fbe4..c38563699984 100644
+--- a/drivers/iio/magnetometer/st_magn_core.c
++++ b/drivers/iio/magnetometer/st_magn_core.c
+@@ -356,9 +356,7 @@ static const struct st_sensor_settings 
st_magn_sensors_settings[] = {
+               .drdy_irq = {
+                       .addr = 0x62,
+                       .mask_int1 = 0x01,
+-                      .addr_ihl = 0x63,
+-                      .mask_ihl = 0x04,
+-                      .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
++                      .addr_stat_drdy = 0x67,
+               },
+               .multi_read_bit = false,
+               .bootime = 2,
+diff --git a/drivers/infiniband/core/uverbs_cmd.c 
b/drivers/infiniband/core/uverbs_cmd.c
+index f3f9d0b5dce0..5ea2d80800f9 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct 
ib_uverbs_file *file,
+       cq->uobject       = &obj->uobject;
+       cq->comp_handler  = ib_uverbs_comp_handler;
+       cq->event_handler = ib_uverbs_cq_event_handler;
+-      cq->cq_context    = &ev_file->ev_queue;
++      cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
+       atomic_set(&cq->usecnt, 0);
+ 
+       obj->uobject.object = cq;
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 262d1057c1da..850b00e3ad8e 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
+ 
+       case SS4_PACKET_ID_TWO:
+               if (priv->flags & ALPS_BUTTONPAD) {
+-                      f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
++                      if (IS_SS4PLUS_DEV(priv->dev_id)) {
++                              f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++                      } else {
++                              f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
++                      }
+                       f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
+-                      f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+                       f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
+               } else {
+-                      f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++                      if (IS_SS4PLUS_DEV(priv->dev_id)) {
++                              f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++                      } else {
++                              f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++                      }
+                       f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
+-                      f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+                       f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
+               }
+               f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
+@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
+ 
+       case SS4_PACKET_ID_MULTI:
+               if (priv->flags & ALPS_BUTTONPAD) {
+-                      f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
++                      if (IS_SS4PLUS_DEV(priv->dev_id)) {
++                              f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
++                      } else {
++                              f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
++                              f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
++                      }
++
+                       f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
+-                      f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+                       f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
+                       no_data_x = SS4_MFPACKET_NO_AX_BL;
+                       no_data_y = SS4_MFPACKET_NO_AY_BL;
+               } else {
+-                      f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
++                      if (IS_SS4PLUS_DEV(priv->dev_id)) {
++                              f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
++                      } else {
++                              f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
++                              f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
++                      }
+                       f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
+-                      f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
+                       f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
+                       no_data_x = SS4_MFPACKET_NO_AX;
+                       no_data_y = SS4_MFPACKET_NO_AY;
+@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse 
*psmouse,
+ 
+       memset(otp, 0, sizeof(otp));
+ 
+-      if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
+-          alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
++      if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
++          alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
+               return -1;
+ 
+       alps_update_device_area_ss4_v2(otp, priv);
+diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
+index ed2d6879fa52..c80a7c76cb76 100644
+--- a/drivers/input/mouse/alps.h
++++ b/drivers/input/mouse/alps.h
+@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
+                                ((_b[1 + _i * 3]  << 5) & 0x1F00)      \
+                               )
+ 
++#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
++                               ((_b[1 + (_i) * 3]  << 4) & 0x0F80)    \
++                              )
++
+ #define SS4_STD_MF_Y_V2(_b, _i)       (((_b[1 + (_i) * 3] << 3) & 0x0010) |   
\
+                                ((_b[2 + (_i) * 3] << 5) & 0x01E0) |   \
+                                ((_b[2 + (_i) * 3] << 4) & 0x0E00)     \
+@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
+                                ((_b[0 + (_i) * 3] >> 3) & 0x0010)     \
+                               )
+ 
++#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) |  \
++                               ((_b[0 + (_i) * 3] >> 4) & 0x0008)     \
++                              )
++
+ #define SS4_BTL_MF_Y_V2(_b, _i)       (SS4_STD_MF_Y_V2(_b, _i) | \
+                                ((_b[0 + (_i) * 3] >> 3) & 0x0008)     \
+                               )
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index 3b0e9fb33afe..4f3d3543b2fb 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1223,6 +1223,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN0000", 0 },
+       { "ELAN0100", 0 },
+       { "ELAN0600", 0 },
++      { "ELAN0602", 0 },
+       { "ELAN0605", 0 },
+       { "ELAN0608", 0 },
+       { "ELAN0605", 0 },
+diff --git a/drivers/input/mouse/trackpoint.c 
b/drivers/input/mouse/trackpoint.c
+index 922ea02edcc3..fb3810d35c44 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse 
*psmouse, unsigned char *fir
+       if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, 
TP_READ_ID)))
+               return -1;
+ 
+-      if (param[0] != TP_MAGIC_IDENT)
++      /* add new TP ID. */
++      if (!(param[0] & TP_MAGIC_IDENT))
+               return -1;
+ 
+       if (firmware_id)
+diff --git a/drivers/input/mouse/trackpoint.h 
b/drivers/input/mouse/trackpoint.h
+index 5617ed3a7d7a..88055755f82e 100644
+--- a/drivers/input/mouse/trackpoint.h
++++ b/drivers/input/mouse/trackpoint.h
+@@ -21,8 +21,9 @@
+ #define TP_COMMAND            0xE2    /* Commands start with this */
+ 
+ #define TP_READ_ID            0xE1    /* Sent for device identification */
+-#define TP_MAGIC_IDENT                0x01    /* Sent after a TP_READ_ID 
followed */
++#define TP_MAGIC_IDENT                0x03    /* Sent after a TP_READ_ID 
followed */
+                                       /* by the firmware ID */
++                                      /* Firmware ID includes 0x1, 0x2, 0x3 */
+ 
+ 
+ /*
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 4de8f4160bb8..09f9dd166827 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -571,7 +571,9 @@ struct amd_iommu {
+ 
+ static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
+ {
+-      return container_of(dev, struct amd_iommu, iommu.dev);
++      struct iommu_device *iommu = dev_to_iommu_device(dev);
++
++      return container_of(iommu, struct amd_iommu, iommu);
+ }
+ 
+ #define ACPIHID_UID_LEN 256
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index fc2765ccdb57..76791fded8a4 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4749,7 +4749,9 @@ static void intel_disable_iommus(void)
+ 
+ static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
+ {
+-      return container_of(dev, struct intel_iommu, iommu.dev);
++      struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
++
++      return container_of(iommu_dev, struct intel_iommu, iommu);
+ }
+ 
+ static ssize_t intel_iommu_show_version(struct device *dev,
+diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
+index c58351ed61c1..36d1a7ce7fc4 100644
+--- a/drivers/iommu/iommu-sysfs.c
++++ b/drivers/iommu/iommu-sysfs.c
+@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
+       va_list vargs;
+       int ret;
+ 
+-      device_initialize(&iommu->dev);
++      iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
++      if (!iommu->dev)
++              return -ENOMEM;
+ 
+-      iommu->dev.class = &iommu_class;
+-      iommu->dev.parent = parent;
+-      iommu->dev.groups = groups;
++      device_initialize(iommu->dev);
++
++      iommu->dev->class = &iommu_class;
++      iommu->dev->parent = parent;
++      iommu->dev->groups = groups;
+ 
+       va_start(vargs, fmt);
+-      ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
++      ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
+       va_end(vargs);
+       if (ret)
+               goto error;
+ 
+-      ret = device_add(&iommu->dev);
++      ret = device_add(iommu->dev);
+       if (ret)
+               goto error;
+ 
++      dev_set_drvdata(iommu->dev, iommu);
++
+       return 0;
+ 
+ error:
+-      put_device(&iommu->dev);
++      put_device(iommu->dev);
+       return ret;
+ }
+ 
+ void iommu_device_sysfs_remove(struct iommu_device *iommu)
+ {
+-      device_unregister(&iommu->dev);
++      dev_set_drvdata(iommu->dev, NULL);
++      device_unregister(iommu->dev);
++      iommu->dev = NULL;
+ }
+ /*
+  * IOMMU drivers can indicate a device is managed by a given IOMMU using
+@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct 
device *link)
+       if (!iommu || IS_ERR(iommu))
+               return -ENODEV;
+ 
+-      ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
++      ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
+                                     &link->kobj, dev_name(link));
+       if (ret)
+               return ret;
+ 
+-      ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
++      ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
+       if (ret)
+-              sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
++              sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
+                                            dev_name(link));
+ 
+       return ret;
+@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, 
struct device *link)
+               return;
+ 
+       sysfs_remove_link(&link->kobj, "iommu");
+-      sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", 
dev_name(link));
++      sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", 
dev_name(link));
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 224e93aa6d23..510a580e0348 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct 
net_device *slave_dev)
+       new_slave->delay = 0;
+       new_slave->link_failure_count = 0;
+ 
+-      if (bond_update_speed_duplex(new_slave))
++      if (bond_update_speed_duplex(new_slave) &&
++          bond_needs_speed_duplex(bond))
+               new_slave->link = BOND_LINK_DOWN;
+ 
+       new_slave->last_rx = jiffies -
+@@ -2137,11 +2138,13 @@ static void bond_miimon_commit(struct bonding *bond)
+                       continue;
+ 
+               case BOND_LINK_UP:
+-                      if (bond_update_speed_duplex(slave)) {
++                      if (bond_update_speed_duplex(slave) &&
++                          bond_needs_speed_duplex(bond)) {
+                               slave->link = BOND_LINK_DOWN;
+-                              netdev_warn(bond->dev,
+-                                          "failed to get link speed/duplex 
for %s\n",
+-                                          slave->dev->name);
++                              if (net_ratelimit())
++                                      netdev_warn(bond->dev,
++                                                  "failed to get link 
speed/duplex for %s\n",
++                                                  slave->dev->name);
+                               continue;
+                       }
+                       bond_set_slave_link_state(slave, BOND_LINK_UP,
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c 
b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 83aab1e4c8c8..9f214f9fb48c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct 
mlx4_dev_cap *dev_cap)
+               /* Virtual PCI function needs to determine UAR page size from
+                * firmware. Only master PCI function can set the uar page size
+                */
+-              if (enable_4k_uar)
++              if (enable_4k_uar || !dev->persist->num_vfs)
+                       dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
+               else
+                       dev->uar_page_shift = PAGE_SHIFT;
+@@ -2275,7 +2275,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
+ 
+               dev->caps.max_fmr_maps = (1 << (32 - 
ilog2(dev->caps.num_mpts))) - 1;
+ 
+-              if (enable_4k_uar) {
++              if (enable_4k_uar || !dev->persist->num_vfs) {
+                       init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+                                                   PAGE_SHIFT - 
DEFAULT_UAR_PAGE_SHIFT;
+                       init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c 
b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index 82bd6b0935f1..fd4a785431ac 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -881,8 +881,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct 
net_device *netdev)
+       return NETDEV_TX_OK;
+ 
+ err_unmap:
+-      --f;
+-      while (f >= 0) {
++      while (--f >= 0) {
+               frag = &skb_shinfo(skb)->frags[f];
+               dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
+                              skb_frag_size(frag), DMA_TO_DEVICE);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 9ee7d4275640..5bd954d12541 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1876,6 +1876,9 @@ static int tun_set_iff(struct net *net, struct file 
*file, struct ifreq *ifr)
+ 
+ err_detach:
+       tun_detach_all(dev);
++      /* register_netdevice() already called tun_free_netdev() */
++      goto err_free_dev;
++
+ err_free_flow:
+       tun_flow_uninit(tun);
+       security_tun_dev_free_security(tun->security);
+diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
+index 10e5bf460139..f27d1344d198 100644
+--- a/drivers/ntb/ntb_transport.c
++++ b/drivers/ntb/ntb_transport.c
+@@ -920,10 +920,8 @@ static void ntb_transport_link_work(struct work_struct 
*work)
+               ntb_free_mw(nt, i);
+ 
+       /* if there's an actual failure, we should just bail */
+-      if (rc < 0) {
+-              ntb_link_disable(ndev);
++      if (rc < 0)
+               return;
+-      }
+ 
+ out:
+       if (ntb_link_is_up(ndev, NULL, NULL) == 1)
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c 
b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index d283341cfe43..56cd4e5e51b2 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+       {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+       {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+       {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
++      {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+       {}      /* Terminating entry */
+ };
+ 
+diff --git a/drivers/virtio/virtio_pci_common.c 
b/drivers/virtio/virtio_pci_common.c
+index 007a4f366086..1c4797e53f68 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device 
*vdev, int nvectors,
+ {
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       const char *name = dev_name(&vp_dev->vdev.dev);
++      unsigned flags = PCI_IRQ_MSIX;
+       unsigned i, v;
+       int err = -ENOMEM;
+ 
+@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device 
*vdev, int nvectors,
+                                       GFP_KERNEL))
+                       goto error;
+ 
++      if (desc) {
++              flags |= PCI_IRQ_AFFINITY;
++              desc->pre_vectors++; /* virtio config vector */
++      }
++
+       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
+-                                           nvectors, PCI_IRQ_MSIX |
+-                                           (desc ? PCI_IRQ_AFFINITY : 0),
+-                                           desc);
++                                           nvectors, flags, desc);
+       if (err < 0)
+               goto error;
+       vp_dev->msix_enabled = 1;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 56366e984076..569d3fb736be 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -194,15 +194,20 @@ build_path_from_dentry_optional_prefix(struct dentry 
*direntry, bool prefix)
+ }
+ 
+ /*
++ * Don't allow path components longer than the server max.
+  * Don't allow the separator character in a path component.
+  * The VFS will not allow "/", but "\" is allowed by posix.
+  */
+ static int
+-check_name(struct dentry *direntry)
++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ {
+       struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+       int i;
+ 
++      if (unlikely(direntry->d_name.len >
++                   tcon->fsAttrInfo.MaxPathNameComponentLength))
++              return -ENAMETOOLONG;
++
+       if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+               for (i = 0; i < direntry->d_name.len; i++) {
+                       if (direntry->d_name.name[i] == '\\') {
+@@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry 
*direntry,
+               return finish_no_open(file, res);
+       }
+ 
+-      rc = check_name(direntry);
+-      if (rc)
+-              return rc;
+-
+       xid = get_xid();
+ 
+       cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+@@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry 
*direntry,
+       }
+ 
+       tcon = tlink_tcon(tlink);
++
++      rc = check_name(direntry, tcon);
++      if (rc)
++              goto out_free_xid;
++
+       server = tcon->ses->server;
+ 
+       if (server->ops->new_lease_key)
+@@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry 
*direntry,
+       }
+       pTcon = tlink_tcon(tlink);
+ 
+-      rc = check_name(direntry);
++      rc = check_name(direntry, pTcon);
+       if (rc)
+               goto lookup_out;
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index e4afdaae743f..c398f393f2b3 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -3195,8 +3195,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info 
*pfs_inf,
+       kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
+                         le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
+       kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
+-      kst->f_bfree  = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
+-      kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
++      kst->f_bfree  = kst->f_bavail =
++                      le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+       return;
+ }
+ 
+diff --git a/fs/dax.c b/fs/dax.c
+index 9187f3b07f3e..f3ac7674b5cb 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1380,6 +1380,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf,
+ 
+       trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
+ 
++      /*
++       * Make sure that the faulting address's PMD offset (color) matches
++       * the PMD offset from the start of the file.  This is necessary so
++       * that a PMD range in the page table overlaps exactly with a PMD
++       * range in the radix tree.
++       */
++      if ((vmf->pgoff & PG_PMD_COLOUR) !=
++          ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
++              goto fallback;
++
+       /* Fall back to PTEs if we're going to COW */
+       if (write && !(vma->vm_flags & VM_SHARED))
+               goto fallback;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 26780d53a6f9..ed8d6b73d12a 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs 
*argp)
+       argp->p = page_address(argp->pagelist[0]);
+       argp->pagelist++;
+       if (argp->pagelen < PAGE_SIZE) {
+-              argp->end = argp->p + (argp->pagelen>>2);
++              argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
+               argp->pagelen = 0;
+       } else {
+               argp->end = argp->p + (PAGE_SIZE>>2);
+@@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, 
struct nfsd4_write *write)
+               argp->pagelen -= pages * PAGE_SIZE;
+               len -= pages * PAGE_SIZE;
+ 
+-              argp->p = (__be32 *)page_address(argp->pagelist[0]);
+-              argp->pagelist++;
+-              argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
++              next_decode_page(argp);
+       }
+       argp->p += XDR_QUADLEN(len);
+ 
+diff --git a/include/asm-generic/vmlinux.lds.h 
b/include/asm-generic/vmlinux.lds.h
+index 314a0b9219c6..a06342f11259 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -59,6 +59,22 @@
+ /* Align . to a 8 byte boundary equals to maximum function alignment. */
+ #define ALIGN_FUNCTION()  . = ALIGN(8)
+ 
++/*
++ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
++ * generates .data.identifier sections, which need to be pulled in with
++ * .data. We don't want to pull in .data..other sections, which Linux
++ * has defined. Same for text and bss.
++ */
++#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
++#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
++#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
++#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
++#else
++#define TEXT_MAIN .text
++#define DATA_MAIN .data
++#define BSS_MAIN .bss
++#endif
++
+ /*
+  * Align to a 32 byte boundary equal to the
+  * alignment gcc 4.5 uses for a struct
+@@ -199,12 +215,9 @@
+ 
+ /*
+  * .data section
+- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
+- * .data.identifier which needs to be pulled in with .data, but don't want to
+- * pull in .data..stuff which has its own requirements. Same for bss.
+  */
+ #define DATA_DATA                                                     \
+-      *(.data .data.[0-9a-zA-Z_]*)                                    \
++      *(DATA_MAIN)                                                    \
+       *(.ref.data)                                                    \
+       *(.data..shared_aligned) /* percpu related */                   \
+       MEM_KEEP(init.data)                                             \
+@@ -435,16 +448,17 @@
+               VMLINUX_SYMBOL(__security_initcall_end) = .;            \
+       }
+ 
+-/* .text section. Map to function alignment to avoid address changes
++/*
++ * .text section. Map to function alignment to avoid address changes
+  * during second ld run in second ld pass when generating System.map
+- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
+- * .text.identifier which needs to be pulled in with .text , but some
+- * architectures define .text.foo which is not intended to be pulled in here.
+- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
+- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
++ *
++ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
++ * code elimination is enabled, so these sections should be converted
++ * to use ".." first.
++ */
+ #define TEXT_TEXT                                                     \
+               ALIGN_FUNCTION();                                       \
+-              *(.text.hot .text .text.fixup .text.unlikely)           \
++              *(.text.hot TEXT_MAIN .text.fixup .text.unlikely)       \
+               *(.ref.text)                                            \
+       MEM_KEEP(init.text)                                             \
+       MEM_KEEP(exit.text)                                             \
+@@ -613,7 +627,7 @@
+               BSS_FIRST_SECTIONS                                      \
+               *(.bss..page_aligned)                                   \
+               *(.dynbss)                                              \
+-              *(.bss .bss.[0-9a-zA-Z_]*)                              \
++              *(BSS_MAIN)                                             \
+               *(COMMON)                                               \
+       }
+ 
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index d5093b52b485..88f4289e7eee 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -43,6 +43,7 @@ struct bpf_reg_state {
+       u32 min_align;
+       u32 aux_off;
+       u32 aux_off_align;
++      bool value_from_signed;
+ };
+ 
+ enum bpf_stack_slot_type {
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 803e5a9b2654..d6d525039496 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -891,9 +891,9 @@ static inline struct file *get_file(struct file *f)
+ /* Page cache limit. The filesystems should put that into their s_maxbytes 
+    limits, otherwise bad things can happen in VM. */ 
+ #if BITS_PER_LONG==32
+-#define MAX_LFS_FILESIZE      (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
++#define MAX_LFS_FILESIZE      ((loff_t)ULONG_MAX << PAGE_SHIFT)
+ #elif BITS_PER_LONG==64
+-#define MAX_LFS_FILESIZE      ((loff_t)0x7fffffffffffffffLL)
++#define MAX_LFS_FILESIZE      ((loff_t)LLONG_MAX)
+ #endif
+ 
+ #define FL_POSIX      1
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 2cb54adc4a33..176f7569d874 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -240,7 +240,7 @@ struct iommu_device {
+       struct list_head list;
+       const struct iommu_ops *ops;
+       struct fwnode_handle *fwnode;
+-      struct device dev;
++      struct device *dev;
+ };
+ 
+ int  iommu_device_register(struct iommu_device *iommu);
+@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct 
iommu_device *iommu,
+       iommu->fwnode = fwnode;
+ }
+ 
++static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
++{
++      return (struct iommu_device *)dev_get_drvdata(dev);
++}
++
+ #define IOMMU_GROUP_NOTIFY_ADD_DEVICE         1 /* Device added */
+ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE         2 /* Pre Device removed */
+ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER                3 /* Pre Driver bind */
+@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct 
iommu_device *iommu,
+ {
+ }
+ 
++static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
++{
++      return NULL;
++}
++
+ static inline void iommu_device_unregister(struct iommu_device *iommu)
+ {
+ }
+diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
+index 6b2e0dd88569..feff771e8ea0 100644
+--- a/include/linux/ptr_ring.h
++++ b/include/linux/ptr_ring.h
+@@ -371,9 +371,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
+       __PTR_RING_PEEK_CALL_v; \
+ })
+ 
+-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
++static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
+ {
+-      return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
++      return kcalloc(size, sizeof(void *), gfp);
+ }
+ 
+ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
+@@ -462,7 +462,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int 
size, gfp_t gfp,
+  * In particular if you consume ring in interrupt or BH context, you must
+  * disable interrupts/BH when doing so.
+  */
+-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int 
nrings,
++static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
++                                         unsigned int nrings,
+                                          int size,
+                                          gfp_t gfp, void (*destroy)(void *))
+ {
+@@ -470,7 +471,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring 
**rings, int nrings,
+       void ***queues;
+       int i;
+ 
+-      queues = kmalloc(nrings * sizeof *queues, gfp);
++      queues = kmalloc_array(nrings, sizeof(*queues), gfp);
+       if (!queues)
+               goto noqueues;
+ 
+diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
+index f4dfade428f0..be8b902b5845 100644
+--- a/include/linux/skb_array.h
++++ b/include/linux/skb_array.h
+@@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, 
int size, gfp_t gfp)
+ }
+ 
+ static inline int skb_array_resize_multiple(struct skb_array **rings,
+-                                          int nrings, int size, gfp_t gfp)
++                                          int nrings, unsigned int size,
++                                          gfp_t gfp)
+ {
+       BUILD_BUG_ON(offsetof(struct skb_array, ring));
+       return ptr_ring_resize_multiple((struct ptr_ring **)rings,
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index b00508d22e0a..b2e68657a216 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond)
+              BOND_MODE(bond) == BOND_MODE_ALB;
+ }
+ 
++static inline bool bond_needs_speed_duplex(const struct bonding *bond)
++{
++      return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond);
++}
++
+ static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+ {
+       return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 821cedcc8e73..0cf7f5a65fe6 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -352,7 +352,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const 
struct dst_entry *dst,
+           !forwarding)
+               return dst_mtu(dst);
+ 
+-      return min(dst->dev->mtu, IP_MAX_MTU);
++      return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
+ }
+ 
+ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+@@ -364,7 +364,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+               return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
+       }
+ 
+-      return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
++      return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
+ }
+ 
+ u32 ip_idents_reserve(u32 hash, int segs);
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 22e52093bfda..db5b6b6346b3 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -785,8 +785,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc 
*sch, struct Qdisc *new,
+       old = *pold;
+       *pold = new;
+       if (old != NULL) {
+-              qdisc_tree_reduce_backlog(old, old->q.qlen, 
old->qstats.backlog);
++              unsigned int qlen = old->q.qlen;
++              unsigned int backlog = old->qstats.backlog;
++
+               qdisc_reset(old);
++              qdisc_tree_reduce_backlog(old, qlen, backlog);
+       }
+       sch_tree_unlock(sch);
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a8a725697bed..1e64ee3dd650 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state 
*regs, u32 regno)
+ {
+       regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
+       regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
++      regs[regno].value_from_signed = false;
+       regs[regno].min_align = 0;
+ }
+ 
+@@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env 
*env, int off, int size,
+       return -EACCES;
+ }
+ 
+-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
++static bool __is_pointer_value(bool allow_ptr_leaks,
++                             const struct bpf_reg_state *reg)
+ {
+-      if (env->allow_ptr_leaks)
++      if (allow_ptr_leaks)
+               return false;
+ 
+-      switch (env->cur_state.regs[regno].type) {
++      switch (reg->type) {
+       case UNKNOWN_VALUE:
+       case CONST_IMM:
+               return false;
+@@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env 
*env, int regno)
+       }
+ }
+ 
++static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
++{
++      return __is_pointer_value(env->allow_ptr_leaks, 
&env->cur_state.regs[regno]);
++}
++
+ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
+                                  int off, int size, bool strict)
+ {
+@@ -1650,6 +1657,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env 
*env, struct bpf_insn *insn)
+       return 0;
+ }
+ 
++static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
++                                      struct bpf_insn *insn)
++{
++      struct bpf_reg_state *regs = env->cur_state.regs;
++      struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
++      struct bpf_reg_state *src_reg = &regs[insn->src_reg];
++      u8 opcode = BPF_OP(insn->code);
++      s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
++
++      /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
++      if (src_reg->imm > 0 && dst_reg->imm) {
++              switch (opcode) {
++              case BPF_ADD:
++                      /* dreg += sreg
++                       * where both have zero upper bits. Adding them
++                       * can only result making one more bit non-zero
++                       * in the larger value.
++                       * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
++                       *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
++                       */
++                      dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
++                      dst_reg->imm--;
++                      break;
++              case BPF_AND:
++                      /* dreg &= sreg
++                       * AND can not extend zero bits only shrink
++                       * Ex.  0x00..00ffffff
++                       *    & 0x0f..ffffffff
++                       *     ----------------
++                       *      0x00..00ffffff
++                       */
++                      dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
++                      break;
++              case BPF_OR:
++                      /* dreg |= sreg
++                       * OR can only extend zero bits
++                       * Ex.  0x00..00ffffff
++                       *    | 0x0f..ffffffff
++                       *     ----------------
++                       *      0x0f..00ffffff
++                       */
++                      dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
++                      break;
++              case BPF_SUB:
++              case BPF_MUL:
++              case BPF_RSH:
++              case BPF_LSH:
++                      /* These may be flushed out later */
++              default:
++                      mark_reg_unknown_value(regs, insn->dst_reg);
++              }
++      } else {
++              mark_reg_unknown_value(regs, insn->dst_reg);
++      }
++
++      dst_reg->type = UNKNOWN_VALUE;
++      return 0;
++}
++
+ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
+                               struct bpf_insn *insn)
+ {
+@@ -1659,6 +1725,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env 
*env,
+       u8 opcode = BPF_OP(insn->code);
+       u64 dst_imm = dst_reg->imm;
+ 
++      if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
++              return evaluate_reg_imm_alu_unknown(env, insn);
++
+       /* dst_reg->type == CONST_IMM here. Simulate execution of insns
+        * containing ALU ops. Don't care about overflow or negative
+        * values, just add/sub/... them; registers are in u64.
+@@ -1763,10 +1832,24 @@ static void adjust_reg_min_max_vals(struct 
bpf_verifier_env *env,
+       dst_align = dst_reg->min_align;
+ 
+       /* We don't know anything about what was done to this register, mark it
+-       * as unknown.
++       * as unknown. Also, if both derived bounds came from signed/unsigned
++       * mixed compares and one side is unbounded, we cannot really do 
anything
++       * with them as boundaries cannot be trusted. Thus, arithmetic of two
++       * regs of such kind will get invalidated bounds on the dst side.
+        */
+-      if (min_val == BPF_REGISTER_MIN_RANGE &&
+-          max_val == BPF_REGISTER_MAX_RANGE) {
++      if ((min_val == BPF_REGISTER_MIN_RANGE &&
++           max_val == BPF_REGISTER_MAX_RANGE) ||
++          (BPF_SRC(insn->code) == BPF_X &&
++           ((min_val != BPF_REGISTER_MIN_RANGE &&
++             max_val == BPF_REGISTER_MAX_RANGE) ||
++            (min_val == BPF_REGISTER_MIN_RANGE &&
++             max_val != BPF_REGISTER_MAX_RANGE) ||
++            (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
++             dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
++            (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
++             dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
++           regs[insn->dst_reg].value_from_signed !=
++           regs[insn->src_reg].value_from_signed)) {
+               reset_reg_range_values(regs, insn->dst_reg);
+               return;
+       }
+@@ -1775,10 +1858,12 @@ static void adjust_reg_min_max_vals(struct 
bpf_verifier_env *env,
+        * do our normal operations to the register, we need to set the values
+        * to the min/max since they are undefined.
+        */
+-      if (min_val == BPF_REGISTER_MIN_RANGE)
+-              dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+-      if (max_val == BPF_REGISTER_MAX_RANGE)
+-              dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
++      if (opcode != BPF_SUB) {
++              if (min_val == BPF_REGISTER_MIN_RANGE)
++                      dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
++              if (max_val == BPF_REGISTER_MAX_RANGE)
++                      dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
++      }
+ 
+       switch (opcode) {
+       case BPF_ADD:
+@@ -1789,10 +1874,17 @@ static void adjust_reg_min_max_vals(struct 
bpf_verifier_env *env,
+               dst_reg->min_align = min(src_align, dst_align);
+               break;
+       case BPF_SUB:
++              /* If one of our values was at the end of our ranges, then the
++               * _opposite_ value in the dst_reg goes to the end of our range.
++               */
++              if (min_val == BPF_REGISTER_MIN_RANGE)
++                      dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
++              if (max_val == BPF_REGISTER_MAX_RANGE)
++                      dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+               if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+-                      dst_reg->min_value -= min_val;
++                      dst_reg->min_value -= max_val;
+               if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+-                      dst_reg->max_value -= max_val;
++                      dst_reg->max_value -= min_val;
+               dst_reg->min_align = min(src_align, dst_align);
+               break;
+       case BPF_MUL:
+@@ -1953,6 +2045,7 @@ static int check_alu_op(struct bpf_verifier_env *env, 
struct bpf_insn *insn)
+                       regs[insn->dst_reg].max_value = insn->imm;
+                       regs[insn->dst_reg].min_value = insn->imm;
+                       regs[insn->dst_reg].min_align = calc_align(insn->imm);
++                      regs[insn->dst_reg].value_from_signed = false;
+               }
+ 
+       } else if (opcode > BPF_END) {
+@@ -2128,40 +2221,63 @@ static void reg_set_min_max(struct bpf_reg_state 
*true_reg,
+                           struct bpf_reg_state *false_reg, u64 val,
+                           u8 opcode)
+ {
++      bool value_from_signed = true;
++      bool is_range = true;
++
+       switch (opcode) {
+       case BPF_JEQ:
+               /* If this is false then we know nothing Jon Snow, but if it is
+                * true then we know for sure.
+                */
+               true_reg->max_value = true_reg->min_value = val;
++              is_range = false;
+               break;
+       case BPF_JNE:
+               /* If this is true we know nothing Jon Snow, but if it is false
+                * we know the value for sure;
+                */
+               false_reg->max_value = false_reg->min_value = val;
++              is_range = false;
+               break;
+       case BPF_JGT:
+-              /* Unsigned comparison, the minimum value is 0. */
+-              false_reg->min_value = 0;
++              value_from_signed = false;
+               /* fallthrough */
+       case BPF_JSGT:
++              if (true_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(true_reg, 0);
++              if (false_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(false_reg, 0);
++              if (opcode == BPF_JGT) {
++                      /* Unsigned comparison, the minimum value is 0. */
++                      false_reg->min_value = 0;
++              }
+               /* If this is false then we know the maximum val is val,
+                * otherwise we know the min val is val+1.
+                */
+               false_reg->max_value = val;
++              false_reg->value_from_signed = value_from_signed;
+               true_reg->min_value = val + 1;
++              true_reg->value_from_signed = value_from_signed;
+               break;
+       case BPF_JGE:
+-              /* Unsigned comparison, the minimum value is 0. */
+-              false_reg->min_value = 0;
++              value_from_signed = false;
+               /* fallthrough */
+       case BPF_JSGE:
++              if (true_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(true_reg, 0);
++              if (false_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(false_reg, 0);
++              if (opcode == BPF_JGE) {
++                      /* Unsigned comparison, the minimum value is 0. */
++                      false_reg->min_value = 0;
++              }
+               /* If this is false then we know the maximum value is val - 1,
+                * otherwise we know the mimimum value is val.
+                */
+               false_reg->max_value = val - 1;
++              false_reg->value_from_signed = value_from_signed;
+               true_reg->min_value = val;
++              true_reg->value_from_signed = value_from_signed;
+               break;
+       default:
+               break;
+@@ -2169,6 +2285,12 @@ static void reg_set_min_max(struct bpf_reg_state 
*true_reg,
+ 
+       check_reg_overflow(false_reg);
+       check_reg_overflow(true_reg);
++      if (is_range) {
++              if (__is_pointer_value(false, false_reg))
++                      reset_reg_range_values(false_reg, 0);
++              if (__is_pointer_value(false, true_reg))
++                      reset_reg_range_values(true_reg, 0);
++      }
+ }
+ 
+ /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
+@@ -2178,41 +2300,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state 
*true_reg,
+                               struct bpf_reg_state *false_reg, u64 val,
+                               u8 opcode)
+ {
++      bool value_from_signed = true;
++      bool is_range = true;
++
+       switch (opcode) {
+       case BPF_JEQ:
+               /* If this is false then we know nothing Jon Snow, but if it is
+                * true then we know for sure.
+                */
+               true_reg->max_value = true_reg->min_value = val;
++              is_range = false;
+               break;
+       case BPF_JNE:
+               /* If this is true we know nothing Jon Snow, but if it is false
+                * we know the value for sure;
+                */
+               false_reg->max_value = false_reg->min_value = val;
++              is_range = false;
+               break;
+       case BPF_JGT:
+-              /* Unsigned comparison, the minimum value is 0. */
+-              true_reg->min_value = 0;
++              value_from_signed = false;
+               /* fallthrough */
+       case BPF_JSGT:
++              if (true_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(true_reg, 0);
++              if (false_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(false_reg, 0);
++              if (opcode == BPF_JGT) {
++                      /* Unsigned comparison, the minimum value is 0. */
++                      true_reg->min_value = 0;
++              }
+               /*
+                * If this is false, then the val is <= the register, if it is
+                * true the register <= to the val.
+                */
+               false_reg->min_value = val;
++              false_reg->value_from_signed = value_from_signed;
+               true_reg->max_value = val - 1;
++              true_reg->value_from_signed = value_from_signed;
+               break;
+       case BPF_JGE:
+-              /* Unsigned comparison, the minimum value is 0. */
+-              true_reg->min_value = 0;
++              value_from_signed = false;
+               /* fallthrough */
+       case BPF_JSGE:
++              if (true_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(true_reg, 0);
++              if (false_reg->value_from_signed != value_from_signed)
++                      reset_reg_range_values(false_reg, 0);
++              if (opcode == BPF_JGE) {
++                      /* Unsigned comparison, the minimum value is 0. */
++                      true_reg->min_value = 0;
++              }
+               /* If this is false then constant < register, if it is true then
+                * the register < constant.
+                */
+               false_reg->min_value = val + 1;
++              false_reg->value_from_signed = value_from_signed;
+               true_reg->max_value = val;
++              true_reg->value_from_signed = value_from_signed;
+               break;
+       default:
+               break;
+@@ -2220,6 +2365,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state 
*true_reg,
+ 
+       check_reg_overflow(false_reg);
+       check_reg_overflow(true_reg);
++      if (is_range) {
++              if (__is_pointer_value(false, false_reg))
++                      reset_reg_range_values(false_reg, 0);
++              if (__is_pointer_value(false, true_reg))
++                      reset_reg_range_values(true_reg, 0);
++      }
+ }
+ 
+ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index dbb3d273d497..51ecc01b78ff 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9996,28 +9996,27 @@ SYSCALL_DEFINE5(perf_event_open,
+                       goto err_context;
+ 
+               /*
+-               * Do not allow to attach to a group in a different
+-               * task or CPU context:
++               * Make sure we're both events for the same CPU;
++               * grouping events for different CPUs is broken; since
++               * you can never concurrently schedule them anyhow.
+                */
+-              if (move_group) {
+-                      /*
+-                       * Make sure we're both on the same task, or both
+-                       * per-cpu events.
+-                       */
+-                      if (group_leader->ctx->task != ctx->task)
+-                              goto err_context;
++              if (group_leader->cpu != event->cpu)
++                      goto err_context;
+ 
+-                      /*
+-                       * Make sure we're both events for the same CPU;
+-                       * grouping events for different CPUs is broken; since
+-                       * you can never concurrently schedule them anyhow.
+-                       */
+-                      if (group_leader->cpu != event->cpu)
+-                              goto err_context;
+-              } else {
+-                      if (group_leader->ctx != ctx)
+-                              goto err_context;
+-              }
++              /*
++               * Make sure we're both on the same task, or both
++               * per-CPU events.
++               */
++              if (group_leader->ctx->task != ctx->task)
++                      goto err_context;
++
++              /*
++               * Do not allow to attach to a group in a different task
++               * or CPU context. If we're moving SW events, we'll fix
++               * this up later, so allow that.
++               */
++              if (!move_group && group_leader->ctx != ctx)
++                      goto err_context;
+ 
+               /*
+                * Only a group leader can be exclusive or pinned
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 6440e0b70cad..9a2b4b4f13b4 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -802,6 +802,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, 
struct task_struct *p,
+       mm_init_cpumask(mm);
+       mm_init_aio(mm);
+       mm_init_owner(mm, p);
++      RCU_INIT_POINTER(mm->exe_file, NULL);
+       mmu_notifier_mm_init(mm);
+       clear_tlb_flush_pending(mm);
+ #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index d3f33020a06b..36cec054b8ae 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -203,6 +203,7 @@ struct timer_base {
+       bool                    migration_enabled;
+       bool                    nohz_active;
+       bool                    is_idle;
++      bool                    must_forward_clk;
+       DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+       struct hlist_head       vectors[WHEEL_SIZE];
+ } ____cacheline_aligned;
+@@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
+ 
+ static inline void forward_timer_base(struct timer_base *base)
+ {
+-      unsigned long jnow = READ_ONCE(jiffies);
++      unsigned long jnow;
+ 
+       /*
+-       * We only forward the base when it's idle and we have a delta between
+-       * base clock and jiffies.
++       * We only forward the base when we are idle or have just come out of
++       * idle (must_forward_clk logic), and have a delta between base clock
++       * and jiffies. In the common case, run_timers will take care of it.
+        */
+-      if (!base->is_idle || (long) (jnow - base->clk) < 2)
++      if (likely(!base->must_forward_clk))
++              return;
++
++      jnow = READ_ONCE(jiffies);
++      base->must_forward_clk = base->is_idle;
++      if ((long)(jnow - base->clk) < 2)
+               return;
+ 
+       /*
+@@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long 
expires, bool pending_only)
+        * same array bucket then just return:
+        */
+       if (timer_pending(timer)) {
++              /*
++               * The downside of this optimization is that it can result in
++               * larger granularity than you would get from adding a new
++               * timer with this expiry.
++               */
+               if (timer->expires == expires)
+                       return 1;
+ 
+@@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long 
expires, bool pending_only)
+                * dequeue/enqueue dance.
+                */
+               base = lock_timer_base(timer, &flags);
++              forward_timer_base(base);
+ 
+               clk = base->clk;
+               idx = calc_wheel_index(expires, clk);
+@@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long 
expires, bool pending_only)
+               }
+       } else {
+               base = lock_timer_base(timer, &flags);
++              forward_timer_base(base);
+       }
+ 
+       ret = detach_if_pending(timer, base, false);
+@@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long 
expires, bool pending_only)
+                       spin_lock(&base->lock);
+                       WRITE_ONCE(timer->flags,
+                                  (timer->flags & ~TIMER_BASEMASK) | 
base->cpu);
++                      forward_timer_base(base);
+               }
+       }
+ 
+-      /* Try to forward a stale timer base clock */
+-      forward_timer_base(base);
+-
+       timer->expires = expires;
+       /*
+        * If 'idx' was calculated above and the base time did not advance
+@@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
+               WRITE_ONCE(timer->flags,
+                          (timer->flags & ~TIMER_BASEMASK) | cpu);
+       }
++      forward_timer_base(base);
+ 
+       debug_activate(timer, timer->expires);
+       internal_add_timer(base, timer);
+@@ -1497,10 +1510,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 
basem)
+               if (!is_max_delta)
+                       expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
+               /*
+-               * If we expect to sleep more than a tick, mark the base idle:
++               * If we expect to sleep more than a tick, mark the base idle.
++               * Also the tick is stopped so any added timer must forward
++               * the base clk itself to keep granularity small. This idle
++               * logic is only maintained for the BASE_STD base, deferrable
++               * timers may still see large granularity skew (by design).
+                */
+-              if ((expires - basem) > TICK_NSEC)
++              if ((expires - basem) > TICK_NSEC) {
++                      base->must_forward_clk = true;
+                       base->is_idle = true;
++              }
+       }
+       spin_unlock(&base->lock);
+ 
+@@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct 
softirq_action *h)
+ {
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+ 
++      /*
++       * must_forward_clk must be cleared before running timers so that any
++       * timer functions that call mod_timer will not try to forward the
++       * base. idle trcking / clock forwarding logic is only used with
++       * BASE_STD timers.
++       *
++       * The deferrable base does not do idle tracking at all, so we do
++       * not forward it. This can result in very large variations in
++       * granularity for deferrable timers, but they can be deferred for
++       * long periods due to idle.
++       */
++      base->must_forward_clk = false;
++
+       __run_timers(base);
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+               __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 460a031c77e5..d521b301dee9 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, 
u64, arg1,
+               fmt_cnt++;
+       }
+ 
+-      return __trace_printk(1/* fake ip will not be printed */, fmt,
+-                            mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : 
(u32) arg1,
+-                            mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : 
(u32) arg2,
+-                            mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : 
(u32) arg3);
++/* Horrid workaround for getting va_list handling working with different
++ * argument type combinations generically for 32 and 64 bit archs.
++ */
++#define __BPF_TP_EMIT()       __BPF_ARG3_TP()
++#define __BPF_TP(...)                                                 \
++      __trace_printk(1 /* Fake ip will not be printed. */,            \
++                     fmt, ##__VA_ARGS__)
++
++#define __BPF_ARG1_TP(...)                                            \
++      ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))        \
++        ? __BPF_TP(arg1, ##__VA_ARGS__)                               \
++        : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))    \
++            ? __BPF_TP((long)arg1, ##__VA_ARGS__)                     \
++            : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
++
++#define __BPF_ARG2_TP(...)                                            \
++      ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))        \
++        ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)                          \
++        : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))    \
++            ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)                \
++            : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
++
++#define __BPF_ARG3_TP(...)                                            \
++      ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))        \
++        ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)                          \
++        : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))    \
++            ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)                \
++            : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
++
++      return __BPF_TP_EMIT();
+ }
+ 
+ static const struct bpf_func_proto bpf_trace_printk_proto = {
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 28e980d2851b..a2bbce575e88 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -878,6 +878,10 @@ static int profile_graph_entry(struct ftrace_graph_ent 
*trace)
+ 
+       function_profile_call(trace->func, 0, NULL, NULL);
+ 
++      /* If function graph is shutting down, ret_stack can be NULL */
++      if (!current->ret_stack)
++              return 0;
++
+       if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
+               current->ret_stack[index].subtime = 0;
+ 
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 4ae268e687fe..912f62df0279 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
+  * the page that was allocated, with the read page of the buffer.
+  *
+  * Returns:
+- *  The page allocated, or NULL on error.
++ *  The page allocated, or ERR_PTR
+  */
+ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
+ {
+-      struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
++      struct ring_buffer_per_cpu *cpu_buffer;
+       struct buffer_data_page *bpage = NULL;
+       unsigned long flags;
+       struct page *page;
+ 
++      if (!cpumask_test_cpu(cpu, buffer->cpumask))
++              return ERR_PTR(-ENODEV);
++
++      cpu_buffer = buffer->buffers[cpu];
+       local_irq_save(flags);
+       arch_spin_lock(&cpu_buffer->lock);
+ 
+@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer 
*buffer, int cpu)
+       page = alloc_pages_node(cpu_to_node(cpu),
+                               GFP_KERNEL | __GFP_NORETRY, 0);
+       if (!page)
+-              return NULL;
++              return ERR_PTR(-ENOMEM);
+ 
+       bpage = page_address(page);
+ 
+@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
+  *
+  * for example:
+  *    rpage = ring_buffer_alloc_read_page(buffer, cpu);
+- *    if (!rpage)
+- *            return error;
++ *    if (IS_ERR(rpage))
++ *            return PTR_ERR(rpage);
+  *    ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
+  *    if (ret >= 0)
+  *            process_page(rpage, ret);
+diff --git a/kernel/trace/ring_buffer_benchmark.c 
b/kernel/trace/ring_buffer_benchmark.c
+index 9fbcaf567886..68ee79afe31c 100644
+--- a/kernel/trace/ring_buffer_benchmark.c
++++ b/kernel/trace/ring_buffer_benchmark.c
+@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
+       int i;
+ 
+       bpage = ring_buffer_alloc_read_page(buffer, cpu);
+-      if (!bpage)
++      if (IS_ERR(bpage))
+               return EVENT_DROPPED;
+ 
+       ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 5764318357de..749a82c6a832 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6403,7 +6403,7 @@ tracing_buffers_read(struct file *filp, char __user 
*ubuf,
+ {
+       struct ftrace_buffer_info *info = filp->private_data;
+       struct trace_iterator *iter = &info->iter;
+-      ssize_t ret;
++      ssize_t ret = 0;
+       ssize_t size;
+ 
+       if (!count)
+@@ -6417,10 +6417,15 @@ tracing_buffers_read(struct file *filp, char __user 
*ubuf,
+       if (!info->spare) {
+               info->spare = 
ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
+                                                         iter->cpu_file);
+-              info->spare_cpu = iter->cpu_file;
++              if (IS_ERR(info->spare)) {
++                      ret = PTR_ERR(info->spare);
++                      info->spare = NULL;
++              } else {
++                      info->spare_cpu = iter->cpu_file;
++              }
+       }
+       if (!info->spare)
+-              return -ENOMEM;
++              return ret;
+ 
+       /* Do we have previous read data to read? */
+       if (info->read < PAGE_SIZE)
+@@ -6595,8 +6600,9 @@ tracing_buffers_splice_read(struct file *file, loff_t 
*ppos,
+               ref->ref = 1;
+               ref->buffer = iter->trace_buffer->buffer;
+               ref->page = ring_buffer_alloc_read_page(ref->buffer, 
iter->cpu_file);
+-              if (!ref->page) {
+-                      ret = -ENOMEM;
++              if (IS_ERR(ref->page)) {
++                      ret = PTR_ERR(ref->page);
++                      ref->page = NULL;
+                       kfree(ref);
+                       break;
+               }
+@@ -8110,6 +8116,7 @@ __init static int tracer_alloc_buffers(void)
+       if (ret < 0)
+               goto out_free_cpumask;
+       /* Used for event triggers */
++      ret = -ENOMEM;
+       temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
+       if (!temp_buffer)
+               goto out_rm_hp_state;
+@@ -8224,4 +8231,4 @@ __init static int clear_boot_tracer(void)
+ }
+ 
+ fs_initcall(tracer_init_tracefs);
+-late_initcall(clear_boot_tracer);
++late_initcall_sync(clear_boot_tracer);
+diff --git a/kernel/trace/trace_events_filter.c 
b/kernel/trace/trace_events_filter.c
+index 59a411ff60c7..181e139a8057 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
+               if (err && set_str)
+                       append_filter_err(ps, filter);
+       }
++      if (err && !set_str) {
++              free_event_filter(filter);
++              filter = NULL;
++      }
+       create_filter_finish(ps);
+ 
+       *filterp = filter;
+diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
+index 0a689bbb78ef..305039b122fa 100644
+--- a/kernel/trace/tracing_map.c
++++ b/kernel/trace/tracing_map.c
+@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
+       if (!a)
+               return;
+ 
+-      if (!a->pages) {
+-              kfree(a);
+-              return;
+-      }
++      if (!a->pages)
++              goto free;
+ 
+       for (i = 0; i < a->n_pages; i++) {
+               if (!a->pages[i])
+                       break;
+               free_page((unsigned long)a->pages[i]);
+       }
++
++      kfree(a->pages);
++
++ free:
++      kfree(a);
+ }
+ 
+ struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 75d2cffbe61d..fc6bfbe19a16 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -368,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned 
long addr,
+                               pte_offset_map_lock(mm, pmd, addr, &ptl);
+                               goto out;
+                       }
+-                      put_page(page);
+                       unlock_page(page);
++                      put_page(page);
+                       pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+                       pte--;
+                       addr -= PAGE_SIZE;
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 7087d5578866..43d0919e29f3 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -302,7 +302,7 @@ void __init memblock_discard(void)
+               __memblock_free_late(addr, size);
+       }
+ 
+-      if (memblock.memory.regions == memblock_memory_init_regions) {
++      if (memblock.memory.regions != memblock_memory_init_regions) {
+               addr = __pa(memblock.memory.regions);
+               size = PAGE_ALIGN(sizeof(struct memblock_region) *
+                                 memblock.memory.max);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4d16ef9d42a9..f553b3a6eca8 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -66,6 +66,7 @@
+ #include <linux/kthread.h>
+ #include <linux/memcontrol.h>
+ #include <linux/ftrace.h>
++#include <linux/nmi.h>
+ 
+ #include <asm/sections.h>
+ #include <asm/tlbflush.h>
+@@ -2495,9 +2496,14 @@ void drain_all_pages(struct zone *zone)
+ 
+ #ifdef CONFIG_HIBERNATION
+ 
++/*
++ * Touch the watchdog for every WD_PAGE_COUNT pages.
++ */
++#define WD_PAGE_COUNT (128*1024)
++
+ void mark_free_pages(struct zone *zone)
+ {
+-      unsigned long pfn, max_zone_pfn;
++      unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
+       unsigned long flags;
+       unsigned int order, t;
+       struct page *page;
+@@ -2512,6 +2518,11 @@ void mark_free_pages(struct zone *zone)
+               if (pfn_valid(pfn)) {
+                       page = pfn_to_page(pfn);
+ 
++                      if (!--page_count) {
++                              touch_nmi_watchdog();
++                              page_count = WD_PAGE_COUNT;
++                      }
++
+                       if (page_zone(page) != zone)
+                               continue;
+ 
+@@ -2525,8 +2536,13 @@ void mark_free_pages(struct zone *zone)
+                       unsigned long i;
+ 
+                       pfn = page_to_pfn(page);
+-                      for (i = 0; i < (1UL << order); i++)
++                      for (i = 0; i < (1UL << order); i++) {
++                              if (!--page_count) {
++                                      touch_nmi_watchdog();
++                                      page_count = WD_PAGE_COUNT;
++                              }
+                               swsusp_set_page_free(pfn_to_page(pfn + i));
++                      }
+               }
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 1183e898743b..0474c7a73cfa 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -3964,7 +3964,7 @@ int __init shmem_init(void)
+       }
+ 
+ #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+-      if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
++      if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
+               SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
+       else
+               shmem_huge = 0; /* just in case it was patched */
+@@ -4025,7 +4025,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
+               return -EINVAL;
+ 
+       shmem_huge = huge;
+-      if (shmem_huge < SHMEM_HUGE_DENY)
++      if (shmem_huge > SHMEM_HUGE_DENY)
+               SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
+       return count;
+ }
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index fbf251fef70f..4d6b94d7ce5f 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -484,16 +484,16 @@ static int bnep_session(void *arg)
+       struct net_device *dev = s->dev;
+       struct sock *sk = s->sock->sk;
+       struct sk_buff *skb;
+-      wait_queue_t wait;
++      DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
+       BT_DBG("");
+ 
+       set_user_nice(current, -15);
+ 
+-      init_waitqueue_entry(&wait, current);
+       add_wait_queue(sk_sleep(sk), &wait);
+       while (1) {
+-              set_current_state(TASK_INTERRUPTIBLE);
++              /* Ensure session->terminate is updated */
++              smp_mb__before_atomic();
+ 
+               if (atomic_read(&s->terminate))
+                       break;
+@@ -515,9 +515,8 @@ static int bnep_session(void *arg)
+                               break;
+               netif_wake_queue(dev);
+ 
+-              schedule();
++              wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+       }
+-      __set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+ 
+       /* Cleanup session */
+@@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
+       s = __bnep_get_session(req->dst);
+       if (s) {
+               atomic_inc(&s->terminate);
+-              wake_up_process(s->task);
++              wake_up_interruptible(sk_sleep(s->sock->sk));
+       } else
+               err = -ENOENT;
+ 
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index 9e59b6654126..1152ce34dad4 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -280,16 +280,16 @@ static int cmtp_session(void *arg)
+       struct cmtp_session *session = arg;
+       struct sock *sk = session->sock->sk;
+       struct sk_buff *skb;
+-      wait_queue_t wait;
++      DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
+       BT_DBG("session %p", session);
+ 
+       set_user_nice(current, -15);
+ 
+-      init_waitqueue_entry(&wait, current);
+       add_wait_queue(sk_sleep(sk), &wait);
+       while (1) {
+-              set_current_state(TASK_INTERRUPTIBLE);
++              /* Ensure session->terminate is updated */
++              smp_mb__before_atomic();
+ 
+               if (atomic_read(&session->terminate))
+                       break;
+@@ -306,9 +306,8 @@ static int cmtp_session(void *arg)
+ 
+               cmtp_process_transmit(session);
+ 
+-              schedule();
++              wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+       }
+-      __set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+ 
+       down_write(&cmtp_session_sem);
+@@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, 
struct socket *sock)
+               err = cmtp_attach_device(session);
+               if (err < 0) {
+                       atomic_inc(&session->terminate);
+-                      wake_up_process(session->task);
++                      wake_up_interruptible(sk_sleep(session->sock->sk));
+                       up_write(&cmtp_session_sem);
+                       return err;
+               }
+@@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
+ 
+               /* Stop session thread */
+               atomic_inc(&session->terminate);
+-              wake_up_process(session->task);
++
++              /* Ensure session->terminate is updated */
++              smp_mb__after_atomic();
++
++              wake_up_interruptible(sk_sleep(session->sock->sk));
+       } else
+               err = -ENOENT;
+ 
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 0bec4588c3c8..1fc076420d1e 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -36,6 +36,7 @@
+ #define VERSION "1.2"
+ 
+ static DECLARE_RWSEM(hidp_session_sem);
++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
+ static LIST_HEAD(hidp_session_list);
+ 
+ static unsigned char hidp_keycode[256] = {
+@@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session 
*session)
+  * Wake up session thread and notify it to stop. This is asynchronous and
+  * returns immediately. Call this whenever a runtime error occurs and you want
+  * the session to stop.
+- * Note: wake_up_process() performs any necessary memory-barriers for us.
++ * Note: wake_up_interruptible() performs any necessary memory-barriers for 
us.
+  */
+ static void hidp_session_terminate(struct hidp_session *session)
+ {
+       atomic_inc(&session->terminate);
+-      wake_up_process(session->task);
++      wake_up_interruptible(&hidp_session_wq);
+ }
+ 
+ /*
+@@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session 
*session)
+       struct sock *ctrl_sk = session->ctrl_sock->sk;
+       struct sock *intr_sk = session->intr_sock->sk;
+       struct sk_buff *skb;
++      DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
++      add_wait_queue(&hidp_session_wq, &wait);
+       for (;;) {
+               /*
+                * This thread can be woken up two ways:
+@@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session 
*session)
+                *    session->terminate flag and wakes this thread up.
+                *  - Via modifying the socket state of ctrl/intr_sock. This
+                *    thread is woken up by ->sk_state_changed().
+-               *
+-               * Note: set_current_state() performs any necessary
+-               * memory-barriers for us.
+                */
+-              set_current_state(TASK_INTERRUPTIBLE);
+ 
++              /* Ensure session->terminate is updated */
++              smp_mb__before_atomic();
+               if (atomic_read(&session->terminate))
+                       break;
+ 
+@@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session 
*session)
+               hidp_process_transmit(session, &session->ctrl_transmit,
+                                     session->ctrl_sock);
+ 
+-              schedule();
++              wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+       }
++      remove_wait_queue(&hidp_session_wq, &wait);
+ 
+       atomic_inc(&session->terminate);
+-      set_current_state(TASK_RUNNING);
++
++      /* Ensure session->terminate is updated */
++      smp_mb__after_atomic();
++}
++
++static int hidp_session_wake_function(wait_queue_t *wait,
++                                    unsigned int mode,
++                                    int sync, void *key)
++{
++      wake_up_interruptible(&hidp_session_wq);
++      return false;
+ }
+ 
+ /*
+@@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session 
*session)
+ static int hidp_session_thread(void *arg)
+ {
+       struct hidp_session *session = arg;
+-      wait_queue_t ctrl_wait, intr_wait;
++      DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
++      DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
+ 
+       BT_DBG("session %p", session);
+ 
+@@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg)
+       set_user_nice(current, -15);
+       hidp_set_timer(session);
+ 
+-      init_waitqueue_entry(&ctrl_wait, current);
+-      init_waitqueue_entry(&intr_wait, current);
+       add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
+       add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+       /* This memory barrier is paired with wq_has_sleeper(). See
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 9fe25bf63296..b68168fcc06a 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -24,6 +24,7 @@
+ #include <net/checksum.h>
+ 
+ #include <net/inet_sock.h>
++#include <net/inet_common.h>
+ #include <net/sock.h>
+ #include <net/xfrm.h>
+ 
+@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
+ 
+ EXPORT_SYMBOL_GPL(dccp_packet_name);
+ 
++static void dccp_sk_destruct(struct sock *sk)
++{
++      struct dccp_sock *dp = dccp_sk(sk);
++
++      ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
++      dp->dccps_hc_tx_ccid = NULL;
++      inet_sock_destruct(sk);
++}
++
+ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
+ {
+       struct dccp_sock *dp = dccp_sk(sk);
+@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 
ctl_sock_initialized)
+       icsk->icsk_syn_retries  = sysctl_dccp_request_retries;
+       sk->sk_state            = DCCP_CLOSED;
+       sk->sk_write_space      = dccp_write_space;
++      sk->sk_destruct         = dccp_sk_destruct;
+       icsk->icsk_sync_mss     = dccp_sync_mss;
+       dp->dccps_mss_cache     = 536;
+       dp->dccps_rate_last     = jiffies;
+@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
+ {
+       struct dccp_sock *dp = dccp_sk(sk);
+ 
+-      /*
+-       * DCCP doesn't use sk_write_queue, just sk_send_head
+-       * for retransmissions
+-       */
++      __skb_queue_purge(&sk->sk_write_queue);
+       if (sk->sk_send_head != NULL) {
+               kfree_skb(sk->sk_send_head);
+               sk->sk_send_head = NULL;
+@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
+               dp->dccps_hc_rx_ackvec = NULL;
+       }
+       ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+-      ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+-      dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
++      dp->dccps_hc_rx_ccid = NULL;
+ 
+       /* clean up feature negotiation state */
+       dccp_feat_list_purge(&dp->dccps_featneg);
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index ce7bc2e5175a..ac9a8fbbacfd 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1033,15 +1033,17 @@ struct fib_info *fib_create_info(struct fib_config 
*cfg)
+       fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
+       if (!fi)
+               goto failure;
+-      fib_info_cnt++;
+       if (cfg->fc_mx) {
+               fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
+-              if (!fi->fib_metrics)
+-                      goto failure;
++              if (unlikely(!fi->fib_metrics)) {
++                      kfree(fi);
++                      return ERR_PTR(err);
++              }
+               atomic_set(&fi->fib_metrics->refcnt, 1);
+-      } else
++      } else {
+               fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
+-
++      }
++      fib_info_cnt++;
+       fi->fib_net = net;
+       fi->fib_protocol = cfg->fc_protocol;
+       fi->fib_scope = cfg->fc_scope;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 3db1adb6b7a0..abdbe79ee175 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1007,10 +1007,18 @@ int igmp_rcv(struct sk_buff *skb)
+ {
+       /* This basically follows the spec line by line -- see RFC1112 */
+       struct igmphdr *ih;
+-      struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
++      struct net_device *dev = skb->dev;
++      struct in_device *in_dev;
+       int len = skb->len;
+       bool dropped = true;
+ 
++      if (netif_is_l3_master(dev)) {
++              dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif);
++              if (!dev)
++                      goto drop;
++      }
++
++      in_dev = __in_dev_get_rcu(dev);
+       if (!in_dev)
+               goto drop;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6883b3d4ba8f..22ba873546c3 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1268,7 +1268,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
+       if (mtu)
+               return mtu;
+ 
+-      mtu = dst->dev->mtu;
++      mtu = READ_ONCE(dst->dev->mtu);
+ 
+       if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
+               if (rt->rt_uses_gateway && mtu > 576)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 57bcae81fe42..fbaac4423a99 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3007,8 +3007,7 @@ void tcp_rearm_rto(struct sock *sk)
+                       /* delta may not be positive if the socket is locked
+                        * when the retrans timer fires and is rescheduled.
+                        */
+-                      if (delta > 0)
+-                              rto = delta;
++                      rto = max(delta, 1);
+               }
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+                                         TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index e4e9f752ebbf..cd8dd8c4e819 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -912,6 +912,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct 
rt6_info *rt,
+               }
+               nsiblings = iter->rt6i_nsiblings;
+               fib6_purge_rt(iter, fn, info->nl_net);
++              if (fn->rr_ptr == iter)
++                      fn->rr_ptr = NULL;
+               rt6_release(iter);
+ 
+               if (nsiblings) {
+@@ -924,6 +926,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct 
rt6_info *rt,
+                               if (rt6_qualify_for_ecmp(iter)) {
+                                       *ins = iter->dst.rt6_next;
+                                       fib6_purge_rt(iter, fn, info->nl_net);
++                                      if (fn->rr_ptr == iter)
++                                              fn->rr_ptr = NULL;
+                                       rt6_release(iter);
+                                       nsiblings--;
+                               } else {
+@@ -1012,7 +1016,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+                       /* Create subtree root node */
+                       sfn = node_alloc();
+                       if (!sfn)
+-                              goto st_failure;
++                              goto failure;
+ 
+                       sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
+                       
atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
+@@ -1028,12 +1032,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info 
*rt,
+ 
+                       if (IS_ERR(sn)) {
+                               /* If it is failed, discard just allocated
+-                                 root, and then (in st_failure) stale node
++                                 root, and then (in failure) stale node
+                                  in main tree.
+                                */
+                               node_free(sfn);
+                               err = PTR_ERR(sn);
+-                              goto st_failure;
++                              goto failure;
+                       }
+ 
+                       /* Now link new subtree to main tree */
+@@ -1047,7 +1051,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
+ 
+                       if (IS_ERR(sn)) {
+                               err = PTR_ERR(sn);
+-                              goto st_failure;
++                              goto failure;
+                       }
+               }
+ 
+@@ -1089,22 +1093,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info 
*rt,
+                       atomic_inc(&pn->leaf->rt6i_ref);
+               }
+ #endif
+-              if (!(rt->dst.flags & DST_NOCACHE))
+-                      dst_free(&rt->dst);
++              goto failure;
+       }
+       return err;
+ 
+-#ifdef CONFIG_IPV6_SUBTREES
+-      /* Subtree creation failed, probably main tree node
+-         is orphan. If it is, shoot it.
++failure:
++      /* fn->leaf could be NULL if fn is an intermediate node and we
++       * failed to add the new route to it in both subtree creation
++       * failure and fib6_add_rt2node() failure case.
++       * In both cases, fib6_repair_tree() should be called to fix
++       * fn->leaf.
+        */
+-st_failure:
+       if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
+               fib6_repair_tree(info->nl_net, fn);
+       if (!(rt->dst.flags & DST_NOCACHE))
+               dst_free(&rt->dst);
+       return err;
+-#endif
+ }
+ 
+ /*
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 8d77ad5cadaf..4cadc29f547c 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2225,7 +2225,7 @@ static int irda_getsockopt(struct socket *sock, int 
level, int optname,
+ {
+       struct sock *sk = sock->sk;
+       struct irda_sock *self = irda_sk(sk);
+-      struct irda_device_list list;
++      struct irda_device_list list = { 0 };
+       struct irda_device_info *discoveries;
+       struct irda_ias_set *   ias_opt;        /* IAS get/query params */
+       struct ias_object *     ias_obj;        /* Object in IAS */
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index b1432b668033..166e32c93038 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct 
sk_buff **skb2,
+ #define BROADCAST_ONE         1
+ #define BROADCAST_REGISTERED  2
+ #define BROADCAST_PROMISC_ONLY        4
+-static int pfkey_broadcast(struct sk_buff *skb,
++static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
+                          int broadcast_flags, struct sock *one_sk,
+                          struct net *net)
+ {
+@@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
+       rcu_read_unlock();
+ 
+       if (one_sk != NULL)
+-              err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
++              err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+ 
+       kfree_skb(skb2);
+       kfree_skb(skb);
+@@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
+               hdr = (struct sadb_msg *) pfk->dump.skb->data;
+               hdr->sadb_msg_seq = 0;
+               hdr->sadb_msg_errno = rc;
+-              pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
++              pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+                               &pfk->sk, sock_net(&pfk->sk));
+               pfk->dump.skb = NULL;
+       }
+@@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int 
err, struct sock *sk)
+       hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
+                            sizeof(uint64_t));
+ 
+-      pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
++      pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
+ 
+       return 0;
+ }
+@@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff 
*skb, const struct sadb_
+ 
+       xfrm_state_put(x);
+ 
+-      pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
++      pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
+ 
+       return 0;
+ }
+@@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const 
struct km_event *c)
+       hdr->sadb_msg_seq = c->seq;
+       hdr->sadb_msg_pid = c->portid;
+ 
+-      pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
++      pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
+ 
+       return 0;
+ }
+@@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff 
*skb, const struct sadb_msg
+       out_hdr->sadb_msg_reserved = 0;
+       out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
+       out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
+-      pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
++      pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+ 
+       return 0;
+ }
+@@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct 
sk_buff *skb, const struct sad
+               return -ENOBUFS;
+       }
+ 
+-      pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
+-
++      pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
++                      sock_net(sk));
+       return 0;
+ }
+ 
+@@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const 
struct sadb_msg *ihdr)
+       hdr->sadb_msg_errno = (uint8_t) 0;
+       hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+ 
+-      return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
++      return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
++                             sock_net(sk));
+ }
+ 
+ static int key_notify_sa_flush(const struct km_event *c)
+@@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
+       hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+       hdr->sadb_msg_reserved = 0;
+ 
+-      pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
++      pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ 
+       return 0;
+ }
+@@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void 
*ptr)
+       out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
+ 
+       if (pfk->dump.skb)
+-              pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
++              pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+                               &pfk->sk, sock_net(&pfk->sk));
+       pfk->dump.skb = out_skb;
+ 
+@@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff 
*skb, const struct sadb
+               new_hdr->sadb_msg_errno = 0;
+       }
+ 
+-      pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
++      pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+       return 0;
+ }
+ 
+@@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int 
dir, const struct km_ev
+       out_hdr->sadb_msg_errno = 0;
+       out_hdr->sadb_msg_seq = c->seq;
+       out_hdr->sadb_msg_pid = c->portid;
+-      pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
++      pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
+       return 0;
+ 
+ }
+@@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct 
xfrm_policy *xp, const struc
+       out_hdr->sadb_msg_errno = 0;
+       out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
+       out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
+-      pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
++      pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
+       err = 0;
+ 
+ out:
+@@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int 
count, void *ptr)
+       out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
+ 
+       if (pfk->dump.skb)
+-              pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
++              pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+                               &pfk->sk, sock_net(&pfk->sk));
+       pfk->dump.skb = out_skb;
+ 
+@@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event 
*c)
+       hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
+       hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+       hdr->sadb_msg_reserved = 0;
+-      pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
++      pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+       return 0;
+ 
+ }
+@@ -2816,7 +2817,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff 
*skb, const struct sadb
+       void *ext_hdrs[SADB_EXT_MAX];
+       int err;
+ 
+-      pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
++      pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+                       BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
+ 
+       memset(ext_hdrs, 0, sizeof(ext_hdrs));
+@@ -3038,7 +3039,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, 
const struct km_event *c)
+       out_hdr->sadb_msg_seq = 0;
+       out_hdr->sadb_msg_pid = 0;
+ 
+-      pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
++      pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
++                      xs_net(x));
+       return 0;
+ }
+ 
+@@ -3228,7 +3230,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, 
struct xfrm_tmpl *t, struct
+                      xfrm_ctx->ctx_len);
+       }
+ 
+-      return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
++      return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
++                             xs_net(x));
+ }
+ 
+ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
+@@ -3426,7 +3429,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, 
xfrm_address_t *ipaddr,
+       n_port->sadb_x_nat_t_port_port = sport;
+       n_port->sadb_x_nat_t_port_reserved = 0;
+ 
+-      return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
++      return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
++                             xs_net(x));
+ }
+ 
+ #ifdef CONFIG_NET_KEY_MIGRATE
+@@ -3618,7 +3622,7 @@ static int pfkey_send_migrate(const struct xfrm_selector 
*sel, u8 dir, u8 type,
+       }
+ 
+       /* broadcast migrate message to sockets */
+-      pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
++      pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
+ 
+       return 0;
+ 
+diff --git a/net/netfilter/nf_conntrack_expect.c 
b/net/netfilter/nf_conntrack_expect.c
+index e03d16ed550d..899c2c36da13 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct 
nf_conntrack_expect *expect)
+       h = nf_ct_expect_dst_hash(net, &expect->tuple);
+       hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
+               if (expect_matches(i, expect)) {
+-                      if (nf_ct_remove_expect(expect))
++                      if (nf_ct_remove_expect(i))
+                               break;
+               } else if (expect_clash(i, expect)) {
+                       ret = -EBUSY;
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 6c72922d20ca..b93a46ef812d 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -222,20 +222,21 @@ find_appropriate_src(struct net *net,
+               .tuple = tuple,
+               .zone = zone
+       };
+-      struct rhlist_head *hl;
++      struct rhlist_head *hl, *h;
+ 
+       hl = rhltable_lookup(&nf_nat_bysource_table, &key,
+                            nf_nat_bysource_params);
+-      if (!hl)
+-              return 0;
+ 
+-      ct = container_of(hl, typeof(*ct), nat_bysource);
++      rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
++              nf_ct_invert_tuplepr(result,
++                                   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++              result->dst = tuple->dst;
+ 
+-      nf_ct_invert_tuplepr(result,
+-                           &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+-      result->dst = tuple->dst;
++              if (in_range(l3proto, l4proto, result, range))
++                      return 1;
++      }
+ 
+-      return in_range(l3proto, l4proto, result, range);
++      return 0;
+ }
+ 
+ /* For [FUTURE] fragmentation handling, we want the least-used
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 80f5ecf2c3d7..ff1f4ce6fba4 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -463,8 +463,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, 
struct nlmsghdr *nlh)
+       if (msglen > skb->len)
+               msglen = skb->len;
+ 
+-      if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+-          skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
++      if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+               return;
+ 
+       err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy,
+@@ -491,7 +490,8 @@ static void nfnetlink_rcv(struct sk_buff *skb)
+ {
+       struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ 
+-      if (nlh->nlmsg_len < NLMSG_HDRLEN ||
++      if (skb->len < NLMSG_HDRLEN ||
++          nlh->nlmsg_len < NLMSG_HDRLEN ||
+           skb->len < nlh->nlmsg_len)
+               return;
+ 
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index e4610676299b..a54a556fcdb5 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -1337,6 +1337,7 @@ int ovs_execute_actions(struct datapath *dp, struct 
sk_buff *skb,
+               goto out;
+       }
+ 
++      OVS_CB(skb)->acts_origlen = acts->orig_len;
+       err = do_execute_actions(dp, skb, key,
+                                acts->actions, acts->actions_len);
+ 
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 7b17da9a94a0..57ce10b6cf6b 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -381,7 +381,7 @@ static int queue_gso_packets(struct datapath *dp, struct 
sk_buff *skb,
+ }
+ 
+ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
+-                            unsigned int hdrlen)
++                            unsigned int hdrlen, int actions_attrlen)
+ {
+       size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+               + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+@@ -398,7 +398,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info 
*upcall_info,
+ 
+       /* OVS_PACKET_ATTR_ACTIONS */
+       if (upcall_info->actions_len)
+-              size += nla_total_size(upcall_info->actions_len);
++              size += nla_total_size(actions_attrlen);
+ 
+       /* OVS_PACKET_ATTR_MRU */
+       if (upcall_info->mru)
+@@ -465,7 +465,8 @@ static int queue_userspace_packet(struct datapath *dp, 
struct sk_buff *skb,
+       else
+               hlen = skb->len;
+ 
+-      len = upcall_msg_size(upcall_info, hlen - cutlen);
++      len = upcall_msg_size(upcall_info, hlen - cutlen,
++                            OVS_CB(skb)->acts_origlen);
+       user_skb = genlmsg_new(len, GFP_ATOMIC);
+       if (!user_skb) {
+               err = -ENOMEM;
+diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
+index da931bdef8a7..98a28f78aff2 100644
+--- a/net/openvswitch/datapath.h
++++ b/net/openvswitch/datapath.h
+@@ -98,12 +98,14 @@ struct datapath {
+  * @input_vport: The original vport packet came in on. This value is cached
+  * when a packet is received by OVS.
+  * @mru: The maximum received fragement size; 0 if the packet is not
++ * @acts_origlen: The netlink size of the flow actions applied to this skb.
+  * @cutlen: The number of bytes from the packet end to be removed.
+  * fragmented.
+  */
+ struct ovs_skb_cb {
+       struct vport            *input_vport;
+       u16                     mru;
++      u16                     acts_origlen;
+       u32                     cutlen;
+ };
+ #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index d516ba8178b8..541707802a23 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -41,6 +41,7 @@ static int ipt_init_target(struct net *net, struct 
xt_entry_target *t,
+ {
+       struct xt_tgchk_param par;
+       struct xt_target *target;
++      struct ipt_entry e = {};
+       int ret = 0;
+ 
+       target = xt_request_find_target(AF_INET, t->u.user.name,
+@@ -52,6 +53,7 @@ static int ipt_init_target(struct net *net, struct 
xt_entry_target *t,
+       memset(&par, 0, sizeof(par));
+       par.net       = net;
+       par.table     = table;
++      par.entryinfo = &e;
+       par.target    = target;
+       par.targinfo  = t->data;
+       par.hook_mask = hook;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index cfdbfa18a95e..fdbbdfd8e9a8 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc 
*root, u32 handle)
+ void qdisc_hash_add(struct Qdisc *q, bool invisible)
+ {
+       if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+-              struct Qdisc *root = qdisc_dev(q)->qdisc;
+-
+-              WARN_ON_ONCE(root == &noop_qdisc);
+               ASSERT_RTNL();
+               hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
+               if (invisible)
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 332d94be6e1c..22451a9eb89d 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -435,6 +435,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct 
sk_buff **to_free)
+               qdisc_drop(head, sch, to_free);
+ 
+               slot_queue_add(slot, skb);
++              qdisc_tree_reduce_backlog(sch, 0, delta);
+               return NET_XMIT_CN;
+       }
+ 
+@@ -466,8 +467,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 
struct sk_buff **to_free)
+       /* Return Congestion Notification only if we dropped a packet
+        * from this flow.
+        */
+-      if (qlen != slot->qlen)
++      if (qlen != slot->qlen) {
++              qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
+               return NET_XMIT_CN;
++      }
+ 
+       /* As we dropped a packet, better let upper stack know this */
+       qdisc_tree_reduce_backlog(sch, 1, dropped);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index f5b45b8b8b16..0de5f5f8ddbc 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct 
in6_addr *saddr,
+ {
+       addr->sa.sa_family = AF_INET6;
+       addr->v6.sin6_port = port;
++      addr->v6.sin6_flowinfo = 0;
+       addr->v6.sin6_addr = *saddr;
++      addr->v6.sin6_scope_id = 0;
+ }
+ 
+ /* Compare addresses exactly.
+diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
+index 2b720fa35c4f..e18500151236 100644
+--- a/net/sunrpc/svcsock.c
++++ b/net/sunrpc/svcsock.c
+@@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk)
+               dprintk("svc: socket %p(inet %p), busy=%d\n",
+                       svsk, sk,
+                       test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
++
++              /* Refer to svc_setup_socket() for details. */
++              rmb();
+               svsk->sk_odata(sk);
+               if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
+                       svc_xprt_enqueue(&svsk->sk_xprt);
+@@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk)
+       if (svsk) {
+               dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
+                       svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
++
++              /* Refer to svc_setup_socket() for details. */
++              rmb();
+               svsk->sk_owspace(sk);
+               svc_xprt_enqueue(&svsk->sk_xprt);
+       }
+@@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
+       dprintk("svc: socket %p TCP (listen) state change %d\n",
+               sk, sk->sk_state);
+ 
+-      if (svsk)
++      if (svsk) {
++              /* Refer to svc_setup_socket() for details. */
++              rmb();
+               svsk->sk_odata(sk);
++      }
++
+       /*
+        * This callback may called twice when a new connection
+        * is established as a child socket inherits everything
+@@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk)
+       if (!svsk)
+               printk("svc: socket %p: no user data\n", sk);
+       else {
++              /* Refer to svc_setup_socket() for details. */
++              rmb();
+               svsk->sk_ostate(sk);
+               if (sk->sk_state != TCP_ESTABLISHED) {
+                       set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+@@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct 
svc_serv *serv,
+               return ERR_PTR(err);
+       }
+ 
+-      inet->sk_user_data = svsk;
+       svsk->sk_sock = sock;
+       svsk->sk_sk = inet;
+       svsk->sk_ostate = inet->sk_state_change;
+       svsk->sk_odata = inet->sk_data_ready;
+       svsk->sk_owspace = inet->sk_write_space;
++      /*
++       * This barrier is necessary in order to prevent race condition
++       * with svc_data_ready(), svc_listen_data_ready() and others
++       * when calling callbacks above.
++       */
++      wmb();
++      inet->sk_user_data = svsk;
+ 
+       /* Initialize the socket */
+       if (sock->type == SOCK_DGRAM)
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 9bfe886ab330..750949dfc1d7 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct 
tipc_nl_compat_cmd_dump *cmd,
+       arg = nlmsg_new(0, GFP_KERNEL);
+       if (!arg) {
+               kfree_skb(msg->rep);
++              msg->rep = NULL;
+               return -ENOMEM;
+       }
+ 
+       err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+-      if (err)
++      if (err) {
+               kfree_skb(msg->rep);
+-
++              msg->rep = NULL;
++      }
+       kfree_skb(arg);
+ 
+       return err;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index c109b82eef4b..7b43b0f74b84 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1157,7 +1157,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol 
*kcontrol,
+               mutex_lock(&ue->card->user_ctl_lock);
+               change = ue->tlv_data_size != size;
+               if (!change)
+-                      change = memcmp(ue->tlv_data, new_data, size);
++                      change = memcmp(ue->tlv_data, new_data, size) != 0;
+               kfree(ue->tlv_data);
+               ue->tlv_data = new_data;
+               ue->tlv_data_size = size;
+diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
+index f0e4d502d604..066b5df666f4 100644
+--- a/sound/firewire/iso-resources.c
++++ b/sound/firewire/iso-resources.c
+@@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
+  */
+ void fw_iso_resources_free(struct fw_iso_resources *r)
+ {
+-      struct fw_card *card = fw_parent_device(r->unit)->card;
++      struct fw_card *card;
+       int bandwidth, channel;
+ 
++      /* Not initialized. */
++      if (r->unit == NULL)
++              return;
++      card = fw_parent_device(r->unit)->card;
++
+       mutex_lock(&r->mutex);
+ 
+       if (r->allocated) {
+diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
+index bf779cfeef0d..59a270406353 100644
+--- a/sound/firewire/motu/motu.c
++++ b/sound/firewire/motu/motu.c
+@@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work)
+       return;
+ error:
+       snd_motu_transaction_unregister(motu);
++      snd_motu_stream_destroy_duplex(motu);
+       snd_card_free(motu->card);
+       dev_info(&motu->unit->device,
+                "Sound card registration failed: %d\n", err);
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 8c1289963c80..a81aacf684b2 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -947,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+       SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", 
CXT_FIXUP_STEREO_DMIC),
++      SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 6a03f9697039..5d2a63248b1d 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, 
unsigned int pipe,
+           && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               mdelay(20);
+ 
+-      /* Zoom R16/24 needs a tiny delay here, otherwise requests like
+-       * get/set frequency return as failed despite actually succeeding.
++      /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
++       * otherwise requests like get/set frequency return as failed despite
++       * actually succeeding.
+        */
+-      if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
++      if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
++           chip->usb_id == USB_ID(0x046d, 0x0a46) ||
++           chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               mdelay(1);
+ }
+diff --git a/tools/testing/selftests/ntb/ntb_test.sh 
b/tools/testing/selftests/ntb/ntb_test.sh
+index 13f5198ba0ee..b3c48fc6ea4b 100755
+--- a/tools/testing/selftests/ntb/ntb_test.sh
++++ b/tools/testing/selftests/ntb/ntb_test.sh
+@@ -326,6 +326,10 @@ function ntb_tool_tests()
+       link_test $LOCAL_TOOL $REMOTE_TOOL
+       link_test $REMOTE_TOOL $LOCAL_TOOL
+ 
++      #Ensure the link is up on both sides before continuing
++      write_file Y $LOCAL_TOOL/link_event
++      write_file Y $REMOTE_TOOL/link_event
++
+       for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
+               PT=$(basename $PEER_TRANS)
+               write_file $MW_SIZE $LOCAL_TOOL/$PT

Reply via email to