commit:     e5428f0f7a5db952b6fc85c2ce3f447fdffccafe
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 16 10:24:30 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 16 10:24:30 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e5428f0f

Linux patch 4.14.41

 0000_README              |    4 +
 1040_linux-4.14.41.patch | 2289 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2293 insertions(+)

diff --git a/0000_README b/0000_README
index 35b5351..196f1c1 100644
--- a/0000_README
+++ b/0000_README
@@ -203,6 +203,10 @@ Patch:  1039_linux-4.14.40.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.40
 
+Patch:  1040_linux-4.14.41.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.41
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1040_linux-4.14.41.patch b/1040_linux-4.14.41.patch
new file mode 100644
index 0000000..dea50ad
--- /dev/null
+++ b/1040_linux-4.14.41.patch
@@ -0,0 +1,2289 @@
+diff --git a/Documentation/arm64/silicon-errata.txt 
b/Documentation/arm64/silicon-errata.txt
+index f3d0d316d5f1..e4fe6adc372b 100644
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -55,6 +55,7 @@ stable kernels.
+ | ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220   
     |
+ | ARM            | Cortex-A72      | #853709         | N/A                    
     |
+ | ARM            | Cortex-A73      | #858921         | ARM64_ERRATUM_858921   
     |
++| ARM            | Cortex-A55      | #1024718        | ARM64_ERRATUM_1024718  
     |
+ | ARM            | MMU-500         | #841119,#826419 | N/A                    
     |
+ |                |                 |                 |                        
     |
+ | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375   
     |
+diff --git a/Makefile b/Makefile
+index 6eede39f898a..c23d0b0c6c45 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 40
++SUBLEVEL = 41
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c2abb4e88ff2..2d5f7aca156d 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -443,6 +443,20 @@ config ARM64_ERRATUM_843419
+ 
+         If unsure, say Y.
+ 
++config ARM64_ERRATUM_1024718
++      bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before 
make might result in incorrect update"
++      default y
++      help
++        This option adds work around for Arm Cortex-A55 Erratum 1024718.
++
++        Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
++        update of the hardware dirty bit when the DBM/AP bits are updated
++        without a break-before-make. The work around is to disable the usage
++        of hardware DBM locally on the affected cores. CPUs not affected by
++        erratum will continue to use the feature.
++
++        If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+       bool "Cavium erratum 22375, 24313"
+       default y
+diff --git a/arch/arm64/include/asm/assembler.h 
b/arch/arm64/include/asm/assembler.h
+index 463619dcadd4..25b2a4161c7a 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -25,6 +25,7 @@
+ 
+ #include <asm/asm-offsets.h>
+ #include <asm/cpufeature.h>
++#include <asm/cputype.h>
+ #include <asm/page.h>
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/ptrace.h>
+@@ -495,4 +496,43 @@ alternative_endif
+       and     \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+       .endm
+ 
++/*
++ * Check the MIDR_EL1 of the current CPU for a given model and a range of
++ * variant/revision. See asm/cputype.h for the macros used below.
++ *
++ *    model:          MIDR_CPU_MODEL of CPU
++ *    rv_min:         Minimum of MIDR_CPU_VAR_REV()
++ *    rv_max:         Maximum of MIDR_CPU_VAR_REV()
++ *    res:            Result register.
++ *    tmp1, tmp2, tmp3: Temporary registers
++ *
++ * Corrupts: res, tmp1, tmp2, tmp3
++ * Returns:  0, if the CPU id doesn't match. Non-zero otherwise
++ */
++      .macro  cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
++      mrs             \res, midr_el1
++      mov_q           \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
++      mov_q           \tmp2, MIDR_CPU_MODEL_MASK
++      and             \tmp3, \res, \tmp2      // Extract model
++      and             \tmp1, \res, \tmp1      // rev & variant
++      mov_q           \tmp2, \model
++      cmp             \tmp3, \tmp2
++      cset            \res, eq
++      cbz             \res, .Ldone\@          // Model matches ?
++
++      .if (\rv_min != 0)                      // Skip min check if rv_min == 0
++      mov_q           \tmp3, \rv_min
++      cmp             \tmp1, \tmp3
++      cset            \res, ge
++      .endif                                  // \rv_min != 0
++      /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
++      .if ((\rv_min != \rv_max) || \rv_min == 0)
++      mov_q           \tmp2, \rv_max
++      cmp             \tmp1, \tmp2
++      cset            \tmp2, le
++      and             \res, \res, \tmp2
++      .endif
++.Ldone\@:
++      .endm
++
+ #endif        /* __ASM_ASSEMBLER_H */
+diff --git a/arch/arm64/include/asm/cputype.h 
b/arch/arm64/include/asm/cputype.h
+index be7bd19c87ec..30da0918d046 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -78,6 +78,7 @@
+ 
+ #define ARM_CPU_PART_AEM_V8           0xD0F
+ #define ARM_CPU_PART_FOUNDATION               0xD00
++#define ARM_CPU_PART_CORTEX_A55               0xD05
+ #define ARM_CPU_PART_CORTEX_A57               0xD07
+ #define ARM_CPU_PART_CORTEX_A72               0xD08
+ #define ARM_CPU_PART_CORTEX_A53               0xD03
+@@ -98,6 +99,7 @@
+ #define QCOM_CPU_PART_KRYO            0x200
+ 
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A53)
++#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A55)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A57)
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A72)
+ #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, 
ARM_CPU_PART_CORTEX_A73)
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 139320a7f7a2..e338165000e6 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -438,6 +438,11 @@ ENTRY(__cpu_setup)
+       cbz     x9, 2f
+       cmp     x9, #2
+       b.lt    1f
++#ifdef CONFIG_ARM64_ERRATUM_1024718
++      /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
++      cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), 
MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
++      cbnz    x1, 1f
++#endif
+       orr     x10, x10, #TCR_HD               // hardware Dirty flag update
+ 1:    orr     x10, x10, #TCR_HA               // hardware Access flag update
+ 2:
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c 
b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+index c5d7435455f1..27a41695fcfd 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
+@@ -19,6 +19,9 @@
+ #include <asm/pgalloc.h>
+ #include <asm/pte-walk.h>
+ 
++static void mark_pages_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot,
++                           unsigned long gfn, unsigned int order);
++
+ /*
+  * Supported radix tree geometry.
+  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
+@@ -195,6 +198,12 @@ static void kvmppc_pte_free(pte_t *ptep)
+       kmem_cache_free(kvm_pte_cache, ptep);
+ }
+ 
++/* Like pmd_huge() and pmd_large(), but works regardless of config options */
++static inline int pmd_is_leaf(pmd_t pmd)
++{
++      return !!(pmd_val(pmd) & _PAGE_PTE);
++}
++
+ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
+                            unsigned int level, unsigned long mmu_seq)
+ {
+@@ -219,7 +228,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, 
unsigned long gpa,
+       else
+               new_pmd = pmd_alloc_one(kvm->mm, gpa);
+ 
+-      if (level == 0 && !(pmd && pmd_present(*pmd)))
++      if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
+               new_ptep = kvmppc_pte_alloc();
+ 
+       /* Check if we might have been invalidated; let the guest retry if so */
+@@ -244,12 +253,30 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, 
unsigned long gpa,
+               new_pmd = NULL;
+       }
+       pmd = pmd_offset(pud, gpa);
+-      if (pmd_large(*pmd)) {
+-              /* Someone else has instantiated a large page here; retry */
+-              ret = -EAGAIN;
+-              goto out_unlock;
+-      }
+-      if (level == 1 && !pmd_none(*pmd)) {
++      if (pmd_is_leaf(*pmd)) {
++              unsigned long lgpa = gpa & PMD_MASK;
++
++              /*
++               * If we raced with another CPU which has just put
++               * a 2MB pte in after we saw a pte page, try again.
++               */
++              if (level == 0 && !new_ptep) {
++                      ret = -EAGAIN;
++                      goto out_unlock;
++              }
++              /* Valid 2MB page here already, remove it */
++              old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
++                                            ~0UL, 0, lgpa, PMD_SHIFT);
++              kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
++              if (old & _PAGE_DIRTY) {
++                      unsigned long gfn = lgpa >> PAGE_SHIFT;
++                      struct kvm_memory_slot *memslot;
++                      memslot = gfn_to_memslot(kvm, gfn);
++                      if (memslot)
++                              mark_pages_dirty(kvm, memslot, gfn,
++                                               PMD_SHIFT - PAGE_SHIFT);
++              }
++      } else if (level == 1 && !pmd_none(*pmd)) {
+               /*
+                * There's a page table page here, but we wanted
+                * to install a large page.  Tell the caller and let
+@@ -412,28 +439,24 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
+       } else {
+               page = pages[0];
+               pfn = page_to_pfn(page);
+-              if (PageHuge(page)) {
+-                      page = compound_head(page);
+-                      pte_size <<= compound_order(page);
++              if (PageCompound(page)) {
++                      pte_size <<= compound_order(compound_head(page));
+                       /* See if we can insert a 2MB large-page PTE here */
+                       if (pte_size >= PMD_SIZE &&
+-                          (gpa & PMD_MASK & PAGE_MASK) ==
+-                          (hva & PMD_MASK & PAGE_MASK)) {
++                          (gpa & (PMD_SIZE - PAGE_SIZE)) ==
++                          (hva & (PMD_SIZE - PAGE_SIZE))) {
+                               level = 1;
+                               pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
+                       }
+               }
+               /* See if we can provide write access */
+               if (writing) {
+-                      /*
+-                       * We assume gup_fast has set dirty on the host PTE.
+-                       */
+                       pgflags |= _PAGE_WRITE;
+               } else {
+                       local_irq_save(flags);
+                       ptep = find_current_mm_pte(current->mm->pgd,
+                                                  hva, NULL, NULL);
+-                      if (ptep && pte_write(*ptep) && pte_dirty(*ptep))
++                      if (ptep && pte_write(*ptep))
+                               pgflags |= _PAGE_WRITE;
+                       local_irq_restore(flags);
+               }
+@@ -459,18 +482,15 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 
struct kvm_vcpu *vcpu,
+               pte = pfn_pte(pfn, __pgprot(pgflags));
+               ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
+       }
+-      if (ret == 0 || ret == -EAGAIN)
+-              ret = RESUME_GUEST;
+ 
+       if (page) {
+-              /*
+-               * We drop pages[0] here, not page because page might
+-               * have been set to the head page of a compound, but
+-               * we have to drop the reference on the correct tail
+-               * page to match the get inside gup()
+-               */
+-              put_page(pages[0]);
++              if (!ret && (pgflags & _PAGE_WRITE))
++                      set_page_dirty_lock(page);
++              put_page(page);
+       }
++
++      if (ret == 0 || ret == -EAGAIN)
++              ret = RESUME_GUEST;
+       return ret;
+ }
+ 
+@@ -676,7 +696,7 @@ void kvmppc_free_radix(struct kvm *kvm)
+                               continue;
+                       pmd = pmd_offset(pud, 0);
+                       for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
+-                              if (pmd_huge(*pmd)) {
++                              if (pmd_is_leaf(*pmd)) {
+                                       pmd_clear(pmd);
+                                       continue;
+                               }
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index e094dc90ff1b..377d1420bd02 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2847,7 +2847,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore 
*vc)
+        */
+       trace_hardirqs_on();
+ 
+-      guest_enter();
++      guest_enter_irqoff();
+ 
+       srcu_idx = srcu_read_lock(&vc->kvm->srcu);
+ 
+@@ -2855,8 +2855,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore 
*vc)
+ 
+       srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+ 
+-      guest_exit();
+-
+       trace_hardirqs_off();
+       set_irq_happened(trap);
+ 
+@@ -2890,6 +2888,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore 
*vc)
+       kvmppc_set_host_core(pcpu);
+ 
+       local_irq_enable();
++      guest_exit();
+ 
+       /* Let secondaries go back to the offline loop */
+       for (i = 0; i < controlled_threads; ++i) {
+@@ -3619,15 +3618,17 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu 
*vcpu)
+               goto up_out;
+ 
+       psize = vma_kernel_pagesize(vma);
+-      porder = __ilog2(psize);
+ 
+       up_read(&current->mm->mmap_sem);
+ 
+       /* We can handle 4k, 64k or 16M pages in the VRMA */
+-      err = -EINVAL;
+-      if (!(psize == 0x1000 || psize == 0x10000 ||
+-            psize == 0x1000000))
+-              goto out_srcu;
++      if (psize >= 0x1000000)
++              psize = 0x1000000;
++      else if (psize >= 0x10000)
++              psize = 0x10000;
++      else
++              psize = 0x1000;
++      porder = __ilog2(psize);
+ 
+       senc = slb_pgsize_encoding(psize);
+       kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S 
b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 2b3194b9608f..663a398449b7 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -308,7 +308,6 @@ kvm_novcpu_exit:
+       stw     r12, STACK_SLOT_TRAP(r1)
+       bl      kvmhv_commence_exit
+       nop
+-      lwz     r12, STACK_SLOT_TRAP(r1)
+       b       kvmhv_switch_to_host
+ 
+ /*
+@@ -1136,6 +1135,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ 
+ secondary_too_late:
+       li      r12, 0
++      stw     r12, STACK_SLOT_TRAP(r1)
+       cmpdi   r4, 0
+       beq     11f
+       stw     r12, VCPU_TRAP(r4)
+@@ -1445,12 +1445,12 @@ mc_cont:
+ 1:
+ #endif /* CONFIG_KVM_XICS */
+ 
++      stw     r12, STACK_SLOT_TRAP(r1)
+       mr      r3, r12
+       /* Increment exit count, poke other threads to exit */
+       bl      kvmhv_commence_exit
+       nop
+       ld      r9, HSTATE_KVM_VCPU(r13)
+-      lwz     r12, VCPU_TRAP(r9)
+ 
+       /* Stop others sending VCPU interrupts to this physical CPU */
+       li      r0, -1
+@@ -1816,6 +1816,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
+        * POWER7/POWER8 guest -> host partition switch code.
+        * We don't have to lock against tlbies but we do
+        * have to coordinate the hardware threads.
++       * Here STACK_SLOT_TRAP(r1) contains the trap number.
+        */
+ kvmhv_switch_to_host:
+       /* Secondary threads wait for primary to do partition switch */
+@@ -1868,11 +1869,11 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 
+       /* If HMI, call kvmppc_realmode_hmi_handler() */
++      lwz     r12, STACK_SLOT_TRAP(r1)
+       cmpwi   r12, BOOK3S_INTERRUPT_HMI
+       bne     27f
+       bl      kvmppc_realmode_hmi_handler
+       nop
+-      li      r12, BOOK3S_INTERRUPT_HMI
+       /*
+        * At this point kvmppc_realmode_hmi_handler would have resync-ed
+        * the TB. Hence it is not required to subtract guest timebase
+@@ -1950,6 +1951,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
+       li      r0, KVM_GUEST_MODE_NONE
+       stb     r0, HSTATE_IN_GUEST(r13)
+ 
++      lwz     r12, STACK_SLOT_TRAP(r1)        /* return trap # in r12 */
+       ld      r0, SFS+PPC_LR_STKOFF(r1)
+       addi    r1, r1, SFS
+       mtlr    r0
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 589af1eec7c1..011a47b4587c 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -27,6 +27,7 @@
+ #include <linux/cpu.h>
+ #include <linux/bitops.h>
+ #include <linux/device.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/apic.h>
+ #include <asm/stacktrace.h>
+@@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct 
perf_event *event)
+ 
+       config = attr->config;
+ 
+-      cache_type = (config >>  0) & 0xff;
++      cache_type = (config >> 0) & 0xff;
+       if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+               return -EINVAL;
++      cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
+ 
+       cache_op = (config >>  8) & 0xff;
+       if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+               return -EINVAL;
++      cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
+ 
+       cache_result = (config >> 16) & 0xff;
+       if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+               return -EINVAL;
++      cache_result = array_index_nospec(cache_result, 
PERF_COUNT_HW_CACHE_RESULT_MAX);
+ 
+       val = hw_cache_event_ids[cache_type][cache_op][cache_result];
+ 
+@@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event)
+       if (attr->config >= x86_pmu.max_events)
+               return -EINVAL;
+ 
++      attr->config = array_index_nospec((unsigned long)attr->config, 
x86_pmu.max_events);
++
+       /*
+        * The generic map:
+        */
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 72db0664a53d..357e82dc0e2a 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -91,6 +91,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+ #include "../perf_event.h"
+@@ -301,6 +302,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
+       } else if (event->pmu == &cstate_pkg_pmu) {
+               if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
+                       return -EINVAL;
++              cfg = array_index_nospec((unsigned long)cfg, 
PERF_CSTATE_PKG_EVENT_MAX);
+               if (!pkg_msr[cfg].attr)
+                       return -EINVAL;
+               event->hw.event_base = pkg_msr[cfg].msr;
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index 14efaa0e8684..81dd57280441 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/intel-family.h>
+ 
+ enum perf_msr_id {
+@@ -145,9 +146,6 @@ static int msr_event_init(struct perf_event *event)
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+ 
+-      if (cfg >= PERF_MSR_EVENT_MAX)
+-              return -EINVAL;
+-
+       /* unsupported modes and filters */
+       if (event->attr.exclude_user   ||
+           event->attr.exclude_kernel ||
+@@ -158,6 +156,11 @@ static int msr_event_init(struct perf_event *event)
+           event->attr.sample_period) /* no sampling */
+               return -EINVAL;
+ 
++      if (cfg >= PERF_MSR_EVENT_MAX)
++              return -EINVAL;
++
++      cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
++
+       if (!msr[cfg].attr)
+               return -EINVAL;
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 8cfdb6484fd0..ab8993fe58cc 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1418,23 +1418,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
+       local_irq_restore(flags);
+ }
+ 
+-static void start_sw_period(struct kvm_lapic *apic)
+-{
+-      if (!apic->lapic_timer.period)
+-              return;
+-
+-      if (apic_lvtt_oneshot(apic) &&
+-          ktime_after(ktime_get(),
+-                      apic->lapic_timer.target_expiration)) {
+-              apic_timer_expired(apic);
+-              return;
+-      }
+-
+-      hrtimer_start(&apic->lapic_timer.timer,
+-              apic->lapic_timer.target_expiration,
+-              HRTIMER_MODE_ABS_PINNED);
+-}
+-
+ static bool set_target_expiration(struct kvm_lapic *apic)
+ {
+       ktime_t now;
+@@ -1491,6 +1474,26 @@ static void advance_periodic_target_expiration(struct 
kvm_lapic *apic)
+                               apic->lapic_timer.period);
+ }
+ 
++static void start_sw_period(struct kvm_lapic *apic)
++{
++      if (!apic->lapic_timer.period)
++              return;
++
++      if (ktime_after(ktime_get(),
++                      apic->lapic_timer.target_expiration)) {
++              apic_timer_expired(apic);
++
++              if (apic_lvtt_oneshot(apic))
++                      return;
++
++              advance_periodic_target_expiration(apic);
++      }
++
++      hrtimer_start(&apic->lapic_timer.timer,
++              apic->lapic_timer.target_expiration,
++              HRTIMER_MODE_ABS_PINNED);
++}
++
+ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
+ {
+       if (!lapic_in_kernel(vcpu))
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 4e4640bb82b9..815ee1075574 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -158,16 +158,16 @@ static int alg_bind(struct socket *sock, struct sockaddr 
*uaddr, int addr_len)
+       void *private;
+       int err;
+ 
+-      /* If caller uses non-allowed flag, return error. */
+-      if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+-              return -EINVAL;
+-
+       if (sock->state == SS_CONNECTED)
+               return -EINVAL;
+ 
+       if (addr_len < sizeof(*sa))
+               return -EINVAL;
+ 
++      /* If caller uses non-allowed flag, return error. */
++      if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
++              return -EINVAL;
++
+       sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+       sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index c6fe2974b336..473f150d6b22 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4539,6 +4539,9 @@ static const struct ata_blacklist_entry 
ata_device_blacklist [] = {
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM |
+                                               ATA_HORKAGE_NOLPM, },
+ 
++      /* Sandisk devices which are known to not handle LPM well */
++      { "SanDisk SD7UB3Q*G1001",      NULL,   ATA_HORKAGE_NOLPM, },
++
+       /* devices that don't properly handle queued TRIM commands */
+       { "Micron_M500_*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index 1ef67db03c8e..9c9a22958717 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -28,6 +28,7 @@
+ #include <asm/io.h>
+ #include <linux/atomic.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ #include "uPD98401.h"
+ #include "uPD98402.h"
+@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int 
cmd,void __user *arg)
+                                       return -EFAULT;
+                               if (pool < 0 || pool > ZATM_LAST_POOL)
+                                       return -EINVAL;
++                              pool = array_index_nospec(pool,
++                                                        ZATM_LAST_POOL + 1);
+                               spin_lock_irqsave(&zatm_dev->lock, flags);
+                               info = zatm_dev->pool_info[pool];
+                               if (cmd == ZATM_GETPOOLZ) {
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index e9dff868c028..7fcc4d7f4909 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -235,6 +235,7 @@ static const struct usb_device_id blacklist_table[] = {
+       { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
++      { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+@@ -267,7 +268,6 @@ static const struct usb_device_id blacklist_table[] = {
+       { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ 
+       /* QCA ROME chipset */
+-      { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
+       { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+       { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+       { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
+@@ -395,6 +395,13 @@ static const struct dmi_system_id 
btusb_needs_reset_resume_table[] = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
+               },
+       },
++      {
++              /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
++              },
++      },
+       {}
+ };
+ 
+@@ -2895,6 +2902,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
+ }
+ #endif
+ 
++static void btusb_check_needs_reset_resume(struct usb_interface *intf)
++{
++      if (dmi_check_system(btusb_needs_reset_resume_table))
++              interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
++}
++
+ static int btusb_probe(struct usb_interface *intf,
+                      const struct usb_device_id *id)
+ {
+@@ -3030,9 +3043,6 @@ static int btusb_probe(struct usb_interface *intf,
+       hdev->send   = btusb_send_frame;
+       hdev->notify = btusb_notify;
+ 
+-      if (dmi_check_system(btusb_needs_reset_resume_table))
+-              interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+-
+ #ifdef CONFIG_PM
+       err = btusb_config_oob_wake(hdev);
+       if (err)
+@@ -3119,6 +3129,7 @@ static int btusb_probe(struct usb_interface *intf,
+       if (id->driver_info & BTUSB_QCA_ROME) {
+               data->setup_on_usb = btusb_setup_qca;
+               hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
++              btusb_check_needs_reset_resume(intf);
+       }
+ 
+ #ifdef CONFIG_BT_HCIBTUSB_RTL
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index bfc53995064a..f03fe916eb9d 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -375,7 +375,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, 
bool set)
+       if (set)
+               reg |= bit;
+       else
+-              reg &= bit;
++              reg &= ~bit;
+       iowrite32(reg, addr);
+ 
+       spin_unlock_irqrestore(&gpio->lock, flags);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index b4c8b25453a6..68ea6e712bf9 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -443,7 +443,7 @@ static int linehandle_create(struct gpio_device *gdev, 
void __user *ip)
+       struct gpiohandle_request handlereq;
+       struct linehandle_state *lh;
+       struct file *file;
+-      int fd, i, ret;
++      int fd, i, count = 0, ret;
+ 
+       if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
+               return -EFAULT;
+@@ -489,6 +489,7 @@ static int linehandle_create(struct gpio_device *gdev, 
void __user *ip)
+               if (ret)
+                       goto out_free_descs;
+               lh->descs[i] = desc;
++              count = i;
+ 
+               if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+                       set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+@@ -555,7 +556,7 @@ static int linehandle_create(struct gpio_device *gdev, 
void __user *ip)
+ out_put_unused_fd:
+       put_unused_fd(fd);
+ out_free_descs:
+-      for (; i >= 0; i--)
++      for (i = 0; i < count; i++)
+               gpiod_free(lh->descs[i]);
+       kfree(lh->label);
+ out_free_lh:
+@@ -812,7 +813,7 @@ static int lineevent_create(struct gpio_device *gdev, void 
__user *ip)
+       desc = &gdev->descs[offset];
+       ret = gpiod_request(desc, le->label);
+       if (ret)
+-              goto out_free_desc;
++              goto out_free_label;
+       le->desc = desc;
+       le->eflags = eflags;
+ 
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index 2fd383d7253a..0d8a417e2cd6 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -151,6 +151,8 @@ void drm_atomic_state_default_clear(struct 
drm_atomic_state *state)
+                                                      
state->connectors[i].state);
+               state->connectors[i].ptr = NULL;
+               state->connectors[i].state = NULL;
++              state->connectors[i].old_state = NULL;
++              state->connectors[i].new_state = NULL;
+               drm_connector_put(connector);
+       }
+ 
+@@ -172,6 +174,8 @@ void drm_atomic_state_default_clear(struct 
drm_atomic_state *state)
+               state->crtcs[i].commit = NULL;
+               state->crtcs[i].ptr = NULL;
+               state->crtcs[i].state = NULL;
++              state->crtcs[i].old_state = NULL;
++              state->crtcs[i].new_state = NULL;
+       }
+ 
+       for (i = 0; i < config->num_total_plane; i++) {
+@@ -184,6 +188,8 @@ void drm_atomic_state_default_clear(struct 
drm_atomic_state *state)
+                                                  state->planes[i].state);
+               state->planes[i].ptr = NULL;
+               state->planes[i].state = NULL;
++              state->planes[i].old_state = NULL;
++              state->planes[i].new_state = NULL;
+       }
+ 
+       for (i = 0; i < state->num_private_objs; i++) {
+@@ -196,6 +202,8 @@ void drm_atomic_state_default_clear(struct 
drm_atomic_state *state)
+                                                state->private_objs[i].state);
+               state->private_objs[i].ptr = NULL;
+               state->private_objs[i].state = NULL;
++              state->private_objs[i].old_state = NULL;
++              state->private_objs[i].new_state = NULL;
+       }
+       state->num_private_objs = 0;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c 
b/drivers/gpu/drm/i915/intel_lvds.c
+index 8e215777c7f4..240308f1b6dd 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -317,7 +317,8 @@ static void intel_enable_lvds(struct intel_encoder 
*encoder,
+ 
+       I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
+       POSTING_READ(lvds_encoder->reg);
+-      if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
++
++      if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+               DRM_ERROR("timed out waiting for panel to power on\n");
+ 
+       intel_panel_enable_backlight(pipe_config, conn_state);
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c 
b/drivers/gpu/drm/nouveau/nv50_display.c
+index 6e196bc01118..a29474528e85 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3216,10 +3216,11 @@ nv50_mstm_destroy_connector(struct 
drm_dp_mst_topology_mgr *mgr,
+ 
+       drm_connector_unregister(&mstc->connector);
+ 
+-      drm_modeset_lock_all(drm->dev);
+       drm_fb_helper_remove_one_connector(&drm->fbcon->helper, 
&mstc->connector);
++
++      drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
+       mstc->port = NULL;
+-      drm_modeset_unlock_all(drm->dev);
++      drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
+ 
+       drm_connector_unreference(&mstc->connector);
+ }
+@@ -3229,9 +3230,7 @@ nv50_mstm_register_connector(struct drm_connector 
*connector)
+ {
+       struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ 
+-      drm_modeset_lock_all(drm->dev);
+       drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+-      drm_modeset_unlock_all(drm->dev);
+ 
+       drm_connector_register(connector);
+ }
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 2968b3ebb895..77c56264c05b 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -535,7 +535,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
+        * the scl fields here.
+        */
+       if (num_planes == 1) {
+-              scl0 = vc4_get_scl_field(state, 1);
++              scl0 = vc4_get_scl_field(state, 0);
+               scl1 = scl0;
+       } else {
+               scl0 = vc4_get_scl_field(state, 1);
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 3cc2052f972c..cbc56372ff97 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2439,7 +2439,7 @@ static void dm_integrity_free_journal_scatterlist(struct 
dm_integrity_c *ic, str
+       unsigned i;
+       for (i = 0; i < ic->journal_sections; i++)
+               kvfree(sl[i]);
+-      kfree(sl);
++      kvfree(sl);
+ }
+ 
+ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct 
dm_integrity_c *ic, struct page_list *pl)
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index 5590c559a8ca..53e320c92a8b 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -91,6 +91,7 @@
+ #define HI3110_STAT_BUSOFF BIT(2)
+ #define HI3110_STAT_ERRP BIT(3)
+ #define HI3110_STAT_ERRW BIT(4)
++#define HI3110_STAT_TXMTY BIT(7)
+ 
+ #define HI3110_BTR0_SJW_SHIFT 6
+ #define HI3110_BTR0_BRP_SHIFT 0
+@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct 
net_device *net,
+       struct hi3110_priv *priv = netdev_priv(net);
+       struct spi_device *spi = priv->spi;
+ 
++      mutex_lock(&priv->hi3110_lock);
+       bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
+       bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
++      mutex_unlock(&priv->hi3110_lock);
+ 
+       return 0;
+ }
+@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+                       }
+               }
+ 
+-              if (intf == 0)
+-                      break;
+-
+-              if (intf & HI3110_INT_TXCPLT) {
++              if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+                       net->stats.tx_packets++;
+                       net->stats.tx_bytes += priv->tx_len - 1;
+                       can_led_event(net, CAN_LED_EVENT_TX);
+@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+                       }
+                       netif_wake_queue(net);
+               }
++
++              if (intf == 0)
++                      break;
+       }
+       mutex_unlock(&priv->hi3110_lock);
+       return IRQ_HANDLED;
+diff --git a/drivers/net/can/usb/kvaser_usb.c 
b/drivers/net/can/usb/kvaser_usb.c
+index 63587b8e6825..daed57d3d209 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct 
kvaser_usb *dev,
+ 
+       skb = alloc_can_skb(priv->netdev, &cf);
+       if (!skb) {
+-              stats->tx_dropped++;
++              stats->rx_dropped++;
+               return;
+       }
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 044af553204c..7ef0a8e1c3e8 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -80,6 +80,11 @@ enum nvme_quirks {
+        * Supports the LighNVM command set if indicated in vs[1].
+        */
+       NVME_QUIRK_LIGHTNVM                     = (1 << 6),
++
++      /*
++       * Set MEDIUM priority on SQ creation
++       */
++      NVME_QUIRK_MEDIUM_PRIO_SQ               = (1 << 7),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index cdd2fd509ddc..eab17405e815 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -947,9 +947,18 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+                                               struct nvme_queue *nvmeq)
+ {
++      struct nvme_ctrl *ctrl = &dev->ctrl;
+       struct nvme_command c;
+       int flags = NVME_QUEUE_PHYS_CONTIG;
+ 
++      /*
++       * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
++       * set. Since URGENT priority is zeroes, it makes all queues
++       * URGENT.
++       */
++      if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
++              flags |= NVME_SQ_PRIO_MEDIUM;
++
+       /*
+        * Note: we (ab)use the fact the the prp fields survive if no data
+        * is attached to the request.
+@@ -2523,7 +2532,8 @@ static const struct pci_device_id nvme_id_table[] = {
+               .driver_data = NVME_QUIRK_STRIPE_SIZE |
+                               NVME_QUIRK_DEALLOCATE_ZEROES, },
+       { PCI_VDEVICE(INTEL, 0xf1a5),   /* Intel 600P/P3100 */
+-              .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
++              .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++                              NVME_QUIRK_MEDIUM_PRIO_SQ },
+       { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
+               .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+       { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 74f1c57ab93b..62a0677b32f1 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1892,7 +1892,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
+ EXPORT_SYMBOL(pci_pme_active);
+ 
+ /**
+- * pci_enable_wake - enable PCI device as wakeup event source
++ * __pci_enable_wake - enable PCI device as wakeup event source
+  * @dev: PCI device affected
+  * @state: PCI state from which device will issue wakeup events
+  * @enable: True to enable event generation; false to disable
+@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL(pci_pme_active);
+  * Error code depending on the platform is returned if both the platform and
+  * the native mechanism fail to enable the generation of wake-up events
+  */
+-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
++static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool 
enable)
+ {
+       int ret = 0;
+ 
+@@ -1951,6 +1951,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t 
state, bool enable)
+ 
+       return ret;
+ }
++
++/**
++ * pci_enable_wake - change wakeup settings for a PCI device
++ * @pci_dev: Target device
++ * @state: PCI state from which device will issue wakeup events
++ * @enable: Whether or not to enable event generation
++ *
++ * If @enable is set, check device_may_wakeup() for the device before calling
++ * __pci_enable_wake() for it.
++ */
++int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
++{
++      if (enable && !device_may_wakeup(&pci_dev->dev))
++              return -EINVAL;
++
++      return __pci_enable_wake(pci_dev, state, enable);
++}
+ EXPORT_SYMBOL(pci_enable_wake);
+ 
+ /**
+@@ -1963,9 +1980,9 @@ EXPORT_SYMBOL(pci_enable_wake);
+  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
+  * ordering constraints.
+  *
+- * This function only returns error code if the device is not capable of
+- * generating PME# from both D3_hot and D3_cold, and the platform is unable to
+- * enable wake-up power for it.
++ * This function only returns error code if the device is not allowed to wake
++ * up the system from sleep or it is not capable of generating PME# from both
++ * D3_hot and D3_cold and the platform is unable to enable wake-up power for 
it.
+  */
+ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+ {
+@@ -2096,7 +2113,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
+ 
+       dev->runtime_d3cold = target_state == PCI_D3cold;
+ 
+-      pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
++      __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
+ 
+       error = pci_set_power_state(dev, target_state);
+ 
+@@ -2120,16 +2137,16 @@ bool pci_dev_run_wake(struct pci_dev *dev)
+ {
+       struct pci_bus *bus = dev->bus;
+ 
+-      if (device_can_wakeup(&dev->dev))
+-              return true;
+-
+       if (!dev->pme_support)
+               return false;
+ 
+       /* PME-capable in principle, but not from the target power state */
+-      if (!pci_pme_capable(dev, pci_target_state(dev, false)))
++      if (!pci_pme_capable(dev, pci_target_state(dev, true)))
+               return false;
+ 
++      if (device_can_wakeup(&dev->dev))
++              return true;
++
+       while (bus->parent) {
+               struct pci_dev *bridge = bus->self;
+ 
+diff --git a/drivers/thermal/samsung/exynos_tmu.c 
b/drivers/thermal/samsung/exynos_tmu.c
+index ed805c7c5ace..ac83f721db24 100644
+--- a/drivers/thermal/samsung/exynos_tmu.c
++++ b/drivers/thermal/samsung/exynos_tmu.c
+@@ -185,6 +185,7 @@
+  * @regulator: pointer to the TMU regulator structure.
+  * @reg_conf: pointer to structure to register with core thermal.
+  * @ntrip: number of supported trip points.
++ * @enabled: current status of TMU device
+  * @tmu_initialize: SoC specific TMU initialization method
+  * @tmu_control: SoC specific TMU control method
+  * @tmu_read: SoC specific TMU temperature read method
+@@ -205,6 +206,7 @@ struct exynos_tmu_data {
+       struct regulator *regulator;
+       struct thermal_zone_device *tzd;
+       unsigned int ntrip;
++      bool enabled;
+ 
+       int (*tmu_initialize)(struct platform_device *pdev);
+       void (*tmu_control)(struct platform_device *pdev, bool on);
+@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device 
*pdev, bool on)
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+       data->tmu_control(pdev, on);
++      data->enabled = on;
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+ }
+@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device 
*pdev, bool on)
+ static int exynos_get_temp(void *p, int *temp)
+ {
+       struct exynos_tmu_data *data = p;
++      int value, ret = 0;
+ 
+-      if (!data || !data->tmu_read)
++      if (!data || !data->tmu_read || !data->enabled)
+               return -EINVAL;
+ 
+       mutex_lock(&data->lock);
+       clk_enable(data->clk);
+ 
+-      *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
++      value = data->tmu_read(data);
++      if (value < 0)
++              ret = value;
++      else
++              *temp = code_to_temp(data, value) * MCELSIUS;
+ 
+       clk_disable(data->clk);
+       mutex_unlock(&data->lock);
+ 
+-      return 0;
++      return ret;
+ }
+ 
+ #ifdef CONFIG_THERMAL_EMULATION
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 0024d3e61bcd..6d653235e323 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -873,6 +873,11 @@ ceph_direct_read_write(struct kiocb *iocb, struct 
iov_iter *iter,
+               size_t start = 0;
+               ssize_t len;
+ 
++              if (write)
++                      size = min_t(u64, size, fsc->mount_options->wsize);
++              else
++                      size = min_t(u64, size, fsc->mount_options->rsize);
++
+               vino = ceph_vino(inode);
+               req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+                                           vino, pos, &size, 0,
+@@ -888,11 +893,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct 
iov_iter *iter,
+                       break;
+               }
+ 
+-              if (write)
+-                      size = min_t(u64, size, fsc->mount_options->wsize);
+-              else
+-                      size = min_t(u64, size, fsc->mount_options->rsize);
+-
+               len = size;
+               pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
+               if (IS_ERR(pages)) {
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index dbcd2e066066..490c5fc9e69c 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1045,6 +1045,18 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+       return rc;
+ }
+ 
++/*
++ * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
++ * is a dummy operation.
++ */
++static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int 
datasync)
++{
++      cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
++               file, datasync);
++
++      return 0;
++}
++
+ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+                               struct file *dst_file, loff_t destoff,
+                               size_t len, unsigned int flags)
+@@ -1173,6 +1185,7 @@ const struct file_operations cifs_dir_ops = {
+       .copy_file_range = cifs_copy_file_range,
+       .clone_file_range = cifs_clone_file_range,
+       .llseek = generic_file_llseek,
++      .fsync = cifs_dir_fsync,
+ };
+ 
+ static void
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 8a7ef9378bf6..3244932f4d5c 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1940,7 +1940,7 @@ void wb_workfn(struct work_struct *work)
+       }
+ 
+       if (!list_empty(&wb->work_list))
+-              mod_delayed_work(bdi_wq, &wb->dwork, 0);
++              wb_wakeup(wb);
+       else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
+               wb_wakeup_delayed(wb);
+ 
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 5bad038ac012..6adac113e96d 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct 
mm_struct *mm)
+       return 0;
+ }
+ 
++void __oom_reap_task_mm(struct mm_struct *mm);
++
+ extern unsigned long oom_badness(struct task_struct *p,
+               struct mem_cgroup *memcg, const nodemask_t *nodemask,
+               unsigned long totalpages);
+diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
+index af0d495430d7..bc96d90bcafd 100644
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -259,4 +259,21 @@ int wait_on_atomic_t(atomic_t *val, int 
(*action)(atomic_t *), unsigned mode)
+       return out_of_line_wait_on_atomic_t(val, action, mode);
+ }
+ 
++/**
++ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
++ *
++ * @bit: the bit of the word being waited on
++ * @word: the word being waited on, a kernel virtual address
++ *
++ * You can use this helper if bitflags are manipulated atomically rather than
++ * non-atomically under a lock.
++ */
++static inline void clear_and_wake_up_bit(int bit, void *word)
++{
++      clear_bit_unlock(bit, word);
++      /* See wake_up_bit() for which memory barrier you need to use. */
++      smp_mb__after_atomic();
++      wake_up_bit(word, bit);
++}
++
+ #endif /* _LINUX_WAIT_BIT_H */
+diff --git a/include/net/inet_timewait_sock.h 
b/include/net/inet_timewait_sock.h
+index 1356fa6a7566..fb439db7fa45 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -43,6 +43,7 @@ struct inet_timewait_sock {
+ #define tw_family             __tw_common.skc_family
+ #define tw_state              __tw_common.skc_state
+ #define tw_reuse              __tw_common.skc_reuse
++#define tw_reuseport          __tw_common.skc_reuseport
+ #define tw_ipv6only           __tw_common.skc_ipv6only
+ #define tw_bound_dev_if               __tw_common.skc_bound_dev_if
+ #define tw_node                       __tw_common.skc_nulls_node
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 36bb794f5cd6..902ff382a6dc 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -7,7 +7,7 @@
+ 
+ static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
+ {
+-      return remaining >= sizeof(*rtnh) &&
++      return remaining >= (int)sizeof(*rtnh) &&
+              rtnh->rtnh_len >= sizeof(*rtnh) &&
+              rtnh->rtnh_len <= remaining;
+ }
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 772e038d04d9..7e83733d4c95 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct 
compat_timex __user *utp)
+ {
+       struct compat_timex tx32;
+ 
++      memset(txc, 0, sizeof(struct timex));
+       if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
+               return -EFAULT;
+ 
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index fa4f47a0a631..b23294e21e76 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -131,14 +131,8 @@ int get_callchain_buffers(int event_max_stack)
+               goto exit;
+       }
+ 
+-      if (count > 1) {
+-              /* If the allocation failed, give up */
+-              if (!callchain_cpus_entries)
+-                      err = -ENOMEM;
+-              goto exit;
+-      }
+-
+-      err = alloc_callchain_buffers();
++      if (count == 1)
++              err = alloc_callchain_buffers();
+ exit:
+       if (err)
+               atomic_dec(&nr_callchain_events);
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index f684d8e5fa2b..c573c7339223 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/circ_buf.h>
+ #include <linux/poll.h>
++#include <linux/nospec.h>
+ 
+ #include "internal.h"
+ 
+@@ -863,8 +864,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long 
pgoff)
+                       return NULL;
+ 
+               /* AUX space */
+-              if (pgoff >= rb->aux_pgoff)
+-                      return virt_to_page(rb->aux_pages[pgoff - 
rb->aux_pgoff]);
++              if (pgoff >= rb->aux_pgoff) {
++                      int aux_pgoff = array_index_nospec(pgoff - 
rb->aux_pgoff, rb->aux_nr_pages);
++                      return virt_to_page(rb->aux_pages[aux_pgoff]);
++              }
+       }
+ 
+       return __perf_mmap_to_page(rb, pgoff);
+diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
+index a43df5193538..1d179ab9ef8d 100644
+--- a/kernel/sched/autogroup.c
++++ b/kernel/sched/autogroup.c
+@@ -7,6 +7,7 @@
+ #include <linux/utsname.h>
+ #include <linux/security.h>
+ #include <linux/export.h>
++#include <linux/nospec.h>
+ 
+ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+ static struct autogroup autogroup_default;
+@@ -213,7 +214,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, 
int nice)
+       static unsigned long next = INITIAL_JIFFIES;
+       struct autogroup *ag;
+       unsigned long shares;
+-      int err;
++      int err, idx;
+ 
+       if (nice < MIN_NICE || nice > MAX_NICE)
+               return -EINVAL;
+@@ -231,7 +232,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, 
int nice)
+ 
+       next = HZ / 10 + jiffies;
+       ag = autogroup_task_get(p);
+-      shares = scale_load(sched_prio_to_weight[nice + 20]);
++
++      idx = array_index_nospec(nice + 20, 40);
++      shares = scale_load(sched_prio_to_weight[idx]);
+ 
+       down_write(&ag->lock);
+       err = sched_group_set_shares(ag->tg, shares);
+diff --git a/kernel/sched/cpufreq_schedutil.c 
b/kernel/sched/cpufreq_schedutil.c
+index d6717a3331a1..81eb7899c7c8 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -282,7 +282,8 @@ static void sugov_update_single(struct update_util_data 
*hook, u64 time,
+                * Do not reduce the frequency if the CPU has not been idle
+                * recently, as the reduction is likely to be premature then.
+                */
+-              if (busy && next_f < sg_policy->next_freq) {
++              if (busy && next_f < sg_policy->next_freq &&
++                  sg_policy->next_freq != UINT_MAX) {
+                       next_f = sg_policy->next_freq;
+ 
+                       /* Reset cached freq as next_freq has changed */
+diff --git a/kernel/trace/trace_events_filter.c 
b/kernel/trace/trace_events_filter.c
+index a764aec3c9a1..55008fa93097 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -338,6 +338,9 @@ static int regex_match_full(char *str, struct regex *r, 
int len)
+ 
+ static int regex_match_front(char *str, struct regex *r, int len)
+ {
++      if (len < r->len)
++              return 0;
++
+       if (strncmp(str, r->pattern, r->len) == 0)
+               return 1;
+       return 0;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 14d3af6a2953..7197ff9f0bbd 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -152,6 +152,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs 
*regs,
+               return;
+ 
+       ret = strncpy_from_user(dst, src, maxlen);
++      if (ret == maxlen)
++              dst[--ret] = '\0';
+ 
+       if (ret < 0) {  /* Failed to fetch string */
+               ((u8 *)get_rloc_data(dest))[0] = '\0';
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index e19606bb41a0..dee049a0ec5b 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -381,7 +381,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
+        * the barrier provided by test_and_clear_bit() above.
+        */
+       smp_wmb();
+-      clear_bit(WB_shutting_down, &wb->state);
++      clear_and_wake_up_bit(WB_shutting_down, &wb->state);
+ }
+ 
+ static void wb_exit(struct bdi_writeback *wb)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 66e7efabf0a1..546cd481a2ca 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4187,6 +4187,9 @@ static void free_mem_cgroup_per_node_info(struct 
mem_cgroup *memcg, int node)
+ {
+       struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+ 
++      if (!pn)
++              return;
++
+       free_percpu(pn->lruvec_stat);
+       kfree(pn);
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 0de87a376aaa..11f96fad5271 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2982,6 +2982,32 @@ void exit_mmap(struct mm_struct *mm)
+       /* mm's last user has gone, and its about to be pulled down */
+       mmu_notifier_release(mm);
+ 
++      if (unlikely(mm_is_oom_victim(mm))) {
++              /*
++               * Manually reap the mm to free as much memory as possible.
++               * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
++               * this mm from further consideration.  Taking mm->mmap_sem for
++               * write after setting MMF_OOM_SKIP will guarantee that the oom
++               * reaper will not run on this mm again after mmap_sem is
++               * dropped.
++               *
++               * Nothing can be holding mm->mmap_sem here and the above call
++               * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
++               * __oom_reap_task_mm() will not block.
++               *
++               * This needs to be done before calling munlock_vma_pages_all(),
++               * which clears VM_LOCKED, otherwise the oom reaper cannot
++               * reliably test it.
++               */
++              mutex_lock(&oom_lock);
++              __oom_reap_task_mm(mm);
++              mutex_unlock(&oom_lock);
++
++              set_bit(MMF_OOM_SKIP, &mm->flags);
++              down_write(&mm->mmap_sem);
++              up_write(&mm->mmap_sem);
++      }
++
+       if (mm->locked_vm) {
+               vma = mm->mmap;
+               while (vma) {
+@@ -3003,24 +3029,6 @@ void exit_mmap(struct mm_struct *mm)
+       /* update_hiwater_rss(mm) here? but nobody should be looking */
+       /* Use -1 here to ensure all VMAs in the mm are unmapped */
+       unmap_vmas(&tlb, vma, 0, -1);
+-
+-      if (unlikely(mm_is_oom_victim(mm))) {
+-              /*
+-               * Wait for oom_reap_task() to stop working on this
+-               * mm. Because MMF_OOM_SKIP is already set before
+-               * calling down_read(), oom_reap_task() will not run
+-               * on this "mm" post up_write().
+-               *
+-               * mm_is_oom_victim() cannot be set from under us
+-               * either because victim->mm is already set to NULL
+-               * under task_lock before calling mmput and oom_mm is
+-               * set not NULL by the OOM killer only if victim->mm
+-               * is found not NULL while holding the task_lock.
+-               */
+-              set_bit(MMF_OOM_SKIP, &mm->flags);
+-              down_write(&mm->mmap_sem);
+-              up_write(&mm->mmap_sem);
+-      }
+       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+       tlb_finish_mmu(&tlb, 0, -1);
+ 
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index 10aed8d8c080..58977f634ced 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -456,7 +456,6 @@ bool process_shares_mm(struct task_struct *p, struct 
mm_struct *mm)
+       return false;
+ }
+ 
+-
+ #ifdef CONFIG_MMU
+ /*
+  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
+@@ -467,16 +466,51 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
+ static struct task_struct *oom_reaper_list;
+ static DEFINE_SPINLOCK(oom_reaper_lock);
+ 
+-static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++void __oom_reap_task_mm(struct mm_struct *mm)
+ {
+-      struct mmu_gather tlb;
+       struct vm_area_struct *vma;
++
++      /*
++       * Tell all users of get_user/copy_from_user etc... that the content
++       * is no longer stable. No barriers really needed because unmapping
++       * should imply barriers already and the reader would hit a page fault
++       * if it stumbled over a reaped memory.
++       */
++      set_bit(MMF_UNSTABLE, &mm->flags);
++
++      for (vma = mm->mmap ; vma; vma = vma->vm_next) {
++              if (!can_madv_dontneed_vma(vma))
++                      continue;
++
++              /*
++               * Only anonymous pages have a good chance to be dropped
++               * without additional steps which we cannot afford as we
++               * are OOM already.
++               *
++               * We do not even care about fs backed pages because all
++               * which are reclaimable have already been reclaimed and
++               * we do not want to block exit_mmap by keeping mm ref
++               * count elevated without a good reason.
++               */
++              if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
++                      struct mmu_gather tlb;
++
++                      tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
++                      unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
++                                       NULL);
++                      tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
++              }
++      }
++}
++
++static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++{
+       bool ret = true;
+ 
+       /*
+        * We have to make sure to not race with the victim exit path
+        * and cause premature new oom victim selection:
+-       * __oom_reap_task_mm           exit_mm
++       * oom_reap_task_mm             exit_mm
+        *   mmget_not_zero
+        *                                mmput
+        *                                  atomic_dec_and_test
+@@ -524,35 +558,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, 
struct mm_struct *mm)
+ 
+       trace_start_task_reaping(tsk->pid);
+ 
+-      /*
+-       * Tell all users of get_user/copy_from_user etc... that the content
+-       * is no longer stable. No barriers really needed because unmapping
+-       * should imply barriers already and the reader would hit a page fault
+-       * if it stumbled over a reaped memory.
+-       */
+-      set_bit(MMF_UNSTABLE, &mm->flags);
+-
+-      for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+-              if (!can_madv_dontneed_vma(vma))
+-                      continue;
++      __oom_reap_task_mm(mm);
+ 
+-              /*
+-               * Only anonymous pages have a good chance to be dropped
+-               * without additional steps which we cannot afford as we
+-               * are OOM already.
+-               *
+-               * We do not even care about fs backed pages because all
+-               * which are reclaimable have already been reclaimed and
+-               * we do not want to block exit_mmap by keeping mm ref
+-               * count elevated without a good reason.
+-               */
+-              if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+-                      tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
+-                      unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
+-                                       NULL);
+-                      tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+-              }
+-      }
+       pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, 
file-rss:%lukB, shmem-rss:%lukB\n",
+                       task_pid_nr(tsk), tsk->comm,
+                       K(get_mm_counter(mm, MM_ANONPAGES)),
+@@ -573,13 +580,12 @@ static void oom_reap_task(struct task_struct *tsk)
+       struct mm_struct *mm = tsk->signal->oom_mm;
+ 
+       /* Retry the down_read_trylock(mmap_sem) a few times */
+-      while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, 
mm))
++      while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
+               schedule_timeout_idle(HZ/10);
+ 
+       if (attempts <= MAX_OOM_REAP_RETRIES)
+               goto done;
+ 
+-
+       pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
+               task_pid_nr(tsk), tsk->comm);
+       debug_show_all_locks();
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 30e56a100ee8..76ed2f4a8a3e 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -661,7 +661,7 @@ void offline_mem_sections(unsigned long start_pfn, 
unsigned long end_pfn)
+       unsigned long pfn;
+ 
+       for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+-              unsigned long section_nr = pfn_to_section_nr(start_pfn);
++              unsigned long section_nr = pfn_to_section_nr(pfn);
+               struct mem_section *ms;
+ 
+               /*
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 39e19125d6a0..ddfb20cfd9af 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -144,7 +144,8 @@ enum z3fold_page_flags {
+       PAGE_HEADLESS = 0,
+       MIDDLE_CHUNK_MAPPED,
+       NEEDS_COMPACTING,
+-      PAGE_STALE
++      PAGE_STALE,
++      UNDER_RECLAIM
+ };
+ 
+ /*****************
+@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page 
*page,
+       clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+       clear_bit(NEEDS_COMPACTING, &page->private);
+       clear_bit(PAGE_STALE, &page->private);
++      clear_bit(UNDER_RECLAIM, &page->private);
+ 
+       spin_lock_init(&zhdr->page_lock);
+       kref_init(&zhdr->refcount);
+@@ -748,6 +750,10 @@ static void z3fold_free(struct z3fold_pool *pool, 
unsigned long handle)
+               atomic64_dec(&pool->pages_nr);
+               return;
+       }
++      if (test_bit(UNDER_RECLAIM, &page->private)) {
++              z3fold_page_unlock(zhdr);
++              return;
++      }
+       if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
+               z3fold_page_unlock(zhdr);
+               return;
+@@ -832,6 +838,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, 
unsigned int retries)
+                       kref_get(&zhdr->refcount);
+                       list_del_init(&zhdr->buddy);
+                       zhdr->cpu = -1;
++                      set_bit(UNDER_RECLAIM, &page->private);
++                      break;
+               }
+ 
+               list_del_init(&page->lru);
+@@ -879,25 +887,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, 
unsigned int retries)
+                               goto next;
+               }
+ next:
+-              spin_lock(&pool->lock);
+               if (test_bit(PAGE_HEADLESS, &page->private)) {
+                       if (ret == 0) {
+-                              spin_unlock(&pool->lock);
+                               free_z3fold_page(page);
+                               return 0;
+                       }
+-              } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+-                      atomic64_dec(&pool->pages_nr);
++                      spin_lock(&pool->lock);
++                      list_add(&page->lru, &pool->lru);
++                      spin_unlock(&pool->lock);
++              } else {
++                      z3fold_page_lock(zhdr);
++                      clear_bit(UNDER_RECLAIM, &page->private);
++                      if (kref_put(&zhdr->refcount,
++                                      release_z3fold_page_locked)) {
++                              atomic64_dec(&pool->pages_nr);
++                              return 0;
++                      }
++                      /*
++                       * if we are here, the page is still not completely
++                       * free. Take the global pool lock then to be able
++                       * to add it back to the lru list
++                       */
++                      spin_lock(&pool->lock);
++                      list_add(&page->lru, &pool->lru);
+                       spin_unlock(&pool->lock);
+-                      return 0;
++                      z3fold_page_unlock(zhdr);
+               }
+ 
+-              /*
+-               * Add to the beginning of LRU.
+-               * Pool lock has to be kept here to ensure the page has
+-               * not already been released
+-               */
+-              list_add(&page->lru, &pool->lru);
++              /* We started off locked to we need to lock the pool back */
++              spin_lock(&pool->lock);
+       }
+       spin_unlock(&pool->lock);
+       return -EAGAIN;
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index a3d93a1bb133..5741b6474dd9 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 
0x00, 0x00 };
+ #include <linux/module.h>
+ #include <linux/init.h>
+ 
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "lec.h"
+ #include "lec_arpc.h"
+ #include "resources.h"
+@@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void 
__user *arg)
+       bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
+       if (bytes_left != 0)
+               pr_info("copy from user failed for %d bytes\n", bytes_left);
+-      if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
+-          !dev_lec[ioc_data.dev_num])
++      if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
++              return -EINVAL;
++      ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
++      if (!dev_lec[ioc_data.dev_num])
+               return -EINVAL;
+       vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
+       if (!vpriv)
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 014a73b46064..2800c4c4978c 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1819,13 +1819,14 @@ static int compat_table_info(const struct 
ebt_table_info *info,
+ {
+       unsigned int size = info->entries_size;
+       const void *entries = info->entries;
+-      int ret;
+ 
+       newinfo->entries_size = size;
+-
+-      ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
+-      if (ret)
+-              return ret;
++      if (info->nentries) {
++              int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
++                                               info->nentries);
++              if (ret)
++                      return ret;
++      }
+ 
+       return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
+                                                       entries, newinfo);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index c0548d268e1a..e3e6a3e2ca22 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+               return -EINVAL;
+ 
+       list_for_each_entry(ha, &list->list, list) {
+-              if (!memcmp(ha->addr, addr, addr_len) &&
+-                  ha->type == addr_type) {
++              if (ha->type == addr_type &&
++                  !memcmp(ha->addr, addr, addr_len)) {
+                       if (global) {
+                               /* check if addr is already used as global */
+                               if (ha->global_use)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 564beb7e6d1c..ef734ad1d852 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -857,6 +857,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, 
struct sk_buff *skb)
+       n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+       n->cloned = 1;
+       n->nohdr = 0;
++      n->peeked = 0;
+       n->destructor = NULL;
+       C(tail);
+       C(end);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index e65fcb45c3f6..b08feb219b44 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -614,6 +614,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff 
*skb)
+       ireq = inet_rsk(req);
+       sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+       sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
++      ireq->ir_mark = inet_request_mark(sk, skb);
+       ireq->ireq_family = AF_INET;
+       ireq->ir_iif = sk->sk_bound_dev_if;
+ 
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 5df7857fc0f3..6344f1b18a6a 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -351,6 +351,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct 
sk_buff *skb)
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+       ireq->ireq_family = AF_INET6;
++      ireq->ir_mark = inet_request_mark(sk, skb);
+ 
+       if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+           np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index d451b9f19b59..2341c1401681 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -179,6 +179,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct 
sock *sk,
+               tw->tw_dport        = inet->inet_dport;
+               tw->tw_family       = sk->sk_family;
+               tw->tw_reuse        = sk->sk_reuse;
++              tw->tw_reuseport    = sk->sk_reuseport;
+               tw->tw_hash         = sk->sk_hash;
+               tw->tw_ipv6only     = 0;
+               tw->tw_transparent  = inet->transparent;
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index b20c8ac64081..64007ce87273 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -210,6 +210,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+               p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
+               if (p) {
+                       p->daddr = *daddr;
++                      p->dtime = (__u32)jiffies;
+                       refcount_set(&p->refcnt, 2);
+                       atomic_set(&p->rid, 0);
+                       p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9ff06c5051ae..5ea559f8c456 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2288,13 +2288,14 @@ struct rtable *ip_route_output_key_hash(struct net 
*net, struct flowi4 *fl4,
+                                       const struct sk_buff *skb)
+ {
+       __u8 tos = RT_FL_TOS(fl4);
+-      struct fib_result res;
++      struct fib_result res = {
++              .type           = RTN_UNSPEC,
++              .fi             = NULL,
++              .table          = NULL,
++              .tclassid       = 0,
++      };
+       struct rtable *rth;
+ 
+-      res.tclassid    = 0;
+-      res.fi          = NULL;
+-      res.table       = NULL;
+-
+       fl4->flowi4_iif = LOOPBACK_IFINDEX;
+       fl4->flowi4_tos = tos & IPTOS_RT_MASK;
+       fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index d023f879e7bb..b694fbf44a35 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2601,7 +2601,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+       case TCP_REPAIR_QUEUE:
+               if (!tp->repair)
+                       err = -EPERM;
+-              else if (val < TCP_QUEUES_NR)
++              else if ((unsigned int)val < TCP_QUEUES_NR)
+                       tp->repair_queue = val;
+               else
+                       err = -EINVAL;
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 9db49805b7be..01a4ff3df60b 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1425,6 +1425,7 @@ static int kcm_attach(struct socket *sock, struct socket 
*csock,
+        */
+       if (csk->sk_user_data) {
+               write_unlock_bh(&csk->sk_callback_lock);
++              strp_stop(&psock->strp);
+               strp_done(&psock->strp);
+               kmem_cache_free(kcm_psockp, psock);
+               err = -EALREADY;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index b3245f9a37d1..e8f1556fa446 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2387,11 +2387,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user 
*user, unsigned int len)
+                       strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+                               sizeof(cfg.mcast_ifn));
+                       cfg.syncid = dm->syncid;
+-                      rtnl_lock();
+-                      mutex_lock(&ipvs->sync_mutex);
+                       ret = start_sync_thread(ipvs, &cfg, dm->state);
+-                      mutex_unlock(&ipvs->sync_mutex);
+-                      rtnl_unlock();
+               } else {
+                       mutex_lock(&ipvs->sync_mutex);
+                       ret = stop_sync_thread(ipvs, dm->state);
+@@ -3484,12 +3480,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs 
*ipvs, struct nlattr **attrs)
+       if (ipvs->mixed_address_family_dests > 0)
+               return -EINVAL;
+ 
+-      rtnl_lock();
+-      mutex_lock(&ipvs->sync_mutex);
+       ret = start_sync_thread(ipvs, &c,
+                               nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+-      mutex_unlock(&ipvs->sync_mutex);
+-      rtnl_unlock();
+       return ret;
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 13f740875507..5e07f7a6794e 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -49,6 +49,7 @@
+ #include <linux/kthread.h>
+ #include <linux/wait.h>
+ #include <linux/kernel.h>
++#include <linux/sched/signal.h>
+ 
+ #include <asm/unaligned.h>            /* Used for ntoh_seq and hton_seq */
+ 
+@@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
+ /*
+  *      Specifiy default interface for outgoing multicasts
+  */
+-static int set_mcast_if(struct sock *sk, char *ifname)
++static int set_mcast_if(struct sock *sk, struct net_device *dev)
+ {
+-      struct net_device *dev;
+       struct inet_sock *inet = inet_sk(sk);
+-      struct net *net = sock_net(sk);
+-
+-      dev = __dev_get_by_name(net, ifname);
+-      if (!dev)
+-              return -ENODEV;
+ 
+       if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+               return -EINVAL;
+@@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
+  *      in the in_addr structure passed in as a parameter.
+  */
+ static int
+-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
++join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device 
*dev)
+ {
+-      struct net *net = sock_net(sk);
+       struct ip_mreqn mreq;
+-      struct net_device *dev;
+       int ret;
+ 
+       memset(&mreq, 0, sizeof(mreq));
+       memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
+ 
+-      dev = __dev_get_by_name(net, ifname);
+-      if (!dev)
+-              return -ENODEV;
+       if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+               return -EINVAL;
+ 
+@@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr 
*addr, char *ifname)
+ 
+ #ifdef CONFIG_IP_VS_IPV6
+ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+-                           char *ifname)
++                           struct net_device *dev)
+ {
+-      struct net *net = sock_net(sk);
+-      struct net_device *dev;
+       int ret;
+ 
+-      dev = __dev_get_by_name(net, ifname);
+-      if (!dev)
+-              return -ENODEV;
+       if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+               return -EINVAL;
+ 
+@@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct 
in6_addr *addr,
+ }
+ #endif
+ 
+-static int bind_mcastif_addr(struct socket *sock, char *ifname)
++static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
+ {
+-      struct net *net = sock_net(sock->sk);
+-      struct net_device *dev;
+       __be32 addr;
+       struct sockaddr_in sin;
+ 
+-      dev = __dev_get_by_name(net, ifname);
+-      if (!dev)
+-              return -ENODEV;
+-
+       addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
+       if (!addr)
+               pr_err("You probably need to specify IP address on "
+                      "multicast interface.\n");
+ 
+       IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
+-                ifname, &addr);
++                dev->name, &addr);
+ 
+       /* Now bind the socket with the address of multicast interface */
+       sin.sin_family       = AF_INET;
+@@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, 
int *salen,
+ /*
+  *      Set up sending multicast socket over UDP
+  */
+-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
++static int make_send_sock(struct netns_ipvs *ipvs, int id,
++                        struct net_device *dev, struct socket **sock_ret)
+ {
+       /* multicast addr */
+       union ipvs_sockaddr mcast_addr;
+@@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs 
*ipvs, int id)
+                                 IPPROTO_UDP, &sock);
+       if (result < 0) {
+               pr_err("Error during creation of socket; terminating\n");
+-              return ERR_PTR(result);
++              goto error;
+       }
+-      result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
++      *sock_ret = sock;
++      result = set_mcast_if(sock->sk, dev);
+       if (result < 0) {
+               pr_err("Error setting outbound mcast interface\n");
+               goto error;
+@@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs 
*ipvs, int id)
+               set_sock_size(sock->sk, 1, result);
+ 
+       if (AF_INET == ipvs->mcfg.mcast_af)
+-              result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
++              result = bind_mcastif_addr(sock, dev);
+       else
+               result = 0;
+       if (result < 0) {
+@@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs 
*ipvs, int id)
+               goto error;
+       }
+ 
+-      return sock;
++      return 0;
+ 
+ error:
+-      sock_release(sock);
+-      return ERR_PTR(result);
++      return result;
+ }
+ 
+ 
+ /*
+  *      Set up receiving multicast socket over UDP
+  */
+-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+-                                      int ifindex)
++static int make_receive_sock(struct netns_ipvs *ipvs, int id,
++                           struct net_device *dev, struct socket **sock_ret)
+ {
+       /* multicast addr */
+       union ipvs_sockaddr mcast_addr;
+@@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct 
netns_ipvs *ipvs, int id,
+                                 IPPROTO_UDP, &sock);
+       if (result < 0) {
+               pr_err("Error during creation of socket; terminating\n");
+-              return ERR_PTR(result);
++              goto error;
+       }
++      *sock_ret = sock;
+       /* it is equivalent to the REUSEADDR option in user-space */
+       sock->sk->sk_reuse = SK_CAN_REUSE;
+       result = sysctl_sync_sock_size(ipvs);
+@@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct 
netns_ipvs *ipvs, int id,
+               set_sock_size(sock->sk, 0, result);
+ 
+       get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+-      sock->sk->sk_bound_dev_if = ifindex;
++      sock->sk->sk_bound_dev_if = dev->ifindex;
+       result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
+       if (result < 0) {
+               pr_err("Error binding to the multicast addr\n");
+@@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct 
netns_ipvs *ipvs, int id,
+ #ifdef CONFIG_IP_VS_IPV6
+       if (ipvs->bcfg.mcast_af == AF_INET6)
+               result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
+-                                         ipvs->bcfg.mcast_ifn);
++                                         dev);
+       else
+ #endif
+               result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
+-                                        ipvs->bcfg.mcast_ifn);
++                                        dev);
+       if (result < 0) {
+               pr_err("Error joining to the multicast group\n");
+               goto error;
+       }
+ 
+-      return sock;
++      return 0;
+ 
+ error:
+-      sock_release(sock);
+-      return ERR_PTR(result);
++      return result;
+ }
+ 
+ 
+@@ -1781,13 +1761,12 @@ static int sync_thread_backup(void *data)
+ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+                     int state)
+ {
+-      struct ip_vs_sync_thread_data *tinfo;
++      struct ip_vs_sync_thread_data *tinfo = NULL;
+       struct task_struct **array = NULL, *task;
+-      struct socket *sock;
+       struct net_device *dev;
+       char *name;
+       int (*threadfn)(void *data);
+-      int id, count, hlen;
++      int id = 0, count, hlen;
+       int result = -ENOMEM;
+       u16 mtu, min_mtu;
+ 
+@@ -1795,6 +1774,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct 
ipvs_sync_daemon_cfg *c,
+       IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
+                 sizeof(struct ip_vs_sync_conn_v0));
+ 
++      /* Do not hold one mutex and then to block on another */
++      for (;;) {
++              rtnl_lock();
++              if (mutex_trylock(&ipvs->sync_mutex))
++                      break;
++              rtnl_unlock();
++              mutex_lock(&ipvs->sync_mutex);
++              if (rtnl_trylock())
++                      break;
++              mutex_unlock(&ipvs->sync_mutex);
++      }
++
+       if (!ipvs->sync_state) {
+               count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
+               ipvs->threads_mask = count - 1;
+@@ -1813,7 +1804,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct 
ipvs_sync_daemon_cfg *c,
+       dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
+       if (!dev) {
+               pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
+-              return -ENODEV;
++              result = -ENODEV;
++              goto out_early;
+       }
+       hlen = (AF_INET6 == c->mcast_af) ?
+              sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
+@@ -1830,26 +1822,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct 
ipvs_sync_daemon_cfg *c,
+               c->sync_maxlen = mtu - hlen;
+ 
+       if (state == IP_VS_STATE_MASTER) {
++              result = -EEXIST;
+               if (ipvs->ms)
+-                      return -EEXIST;
++                      goto out_early;
+ 
+               ipvs->mcfg = *c;
+               name = "ipvs-m:%d:%d";
+               threadfn = sync_thread_master;
+       } else if (state == IP_VS_STATE_BACKUP) {
++              result = -EEXIST;
+               if (ipvs->backup_threads)
+-                      return -EEXIST;
++                      goto out_early;
+ 
+               ipvs->bcfg = *c;
+               name = "ipvs-b:%d:%d";
+               threadfn = sync_thread_backup;
+       } else {
+-              return -EINVAL;
++              result = -EINVAL;
++              goto out_early;
+       }
+ 
+       if (state == IP_VS_STATE_MASTER) {
+               struct ipvs_master_sync_state *ms;
+ 
++              result = -ENOMEM;
+               ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
+               if (!ipvs->ms)
+                       goto out;
+@@ -1865,39 +1861,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct 
ipvs_sync_daemon_cfg *c,
+       } else {
+               array = kcalloc(count, sizeof(struct task_struct *),
+                               GFP_KERNEL);
++              result = -ENOMEM;
+               if (!array)
+                       goto out;
+       }
+ 
+-      tinfo = NULL;
+       for (id = 0; id < count; id++) {
+-              if (state == IP_VS_STATE_MASTER)
+-                      sock = make_send_sock(ipvs, id);
+-              else
+-                      sock = make_receive_sock(ipvs, id, dev->ifindex);
+-              if (IS_ERR(sock)) {
+-                      result = PTR_ERR(sock);
+-                      goto outtinfo;
+-              }
++              result = -ENOMEM;
+               tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+               if (!tinfo)
+-                      goto outsocket;
++                      goto out;
+               tinfo->ipvs = ipvs;
+-              tinfo->sock = sock;
++              tinfo->sock = NULL;
+               if (state == IP_VS_STATE_BACKUP) {
+                       tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
+                                            GFP_KERNEL);
+                       if (!tinfo->buf)
+-                              goto outtinfo;
++                              goto out;
+               } else {
+                       tinfo->buf = NULL;
+               }
+               tinfo->id = id;
++              if (state == IP_VS_STATE_MASTER)
++                      result = make_send_sock(ipvs, id, dev, &tinfo->sock);
++              else
++                      result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
++              if (result < 0)
++                      goto out;
+ 
+               task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
+               if (IS_ERR(task)) {
+                       result = PTR_ERR(task);
+-                      goto outtinfo;
++                      goto out;
+               }
+               tinfo = NULL;
+               if (state == IP_VS_STATE_MASTER)
+@@ -1914,20 +1909,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct 
ipvs_sync_daemon_cfg *c,
+       ipvs->sync_state |= state;
+       spin_unlock_bh(&ipvs->sync_buff_lock);
+ 
++      mutex_unlock(&ipvs->sync_mutex);
++      rtnl_unlock();
++
+       /* increase the module use count */
+       ip_vs_use_count_inc();
+ 
+       return 0;
+ 
+-outsocket:
+-      sock_release(sock);
+-
+-outtinfo:
+-      if (tinfo) {
+-              sock_release(tinfo->sock);
+-              kfree(tinfo->buf);
+-              kfree(tinfo);
+-      }
++out:
++      /* We do not need RTNL lock anymore, release it here so that
++       * sock_release below and in the kthreads can use rtnl_lock
++       * to leave the mcast group.
++       */
++      rtnl_unlock();
+       count = id;
+       while (count-- > 0) {
+               if (state == IP_VS_STATE_MASTER)
+@@ -1935,13 +1930,23 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct 
ipvs_sync_daemon_cfg *c,
+               else
+                       kthread_stop(array[count]);
+       }
+-      kfree(array);
+-
+-out:
+       if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+               kfree(ipvs->ms);
+               ipvs->ms = NULL;
+       }
++      mutex_unlock(&ipvs->sync_mutex);
++      if (tinfo) {
++              if (tinfo->sock)
++                      sock_release(tinfo->sock);
++              kfree(tinfo->buf);
++              kfree(tinfo);
++      }
++      kfree(array);
++      return result;
++
++out_early:
++      mutex_unlock(&ipvs->sync_mutex);
++      rtnl_unlock();
+       return result;
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1b86eccf94b6..b3932846f6c4 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1813,6 +1813,8 @@ static int netlink_sendmsg(struct socket *sock, struct 
msghdr *msg, size_t len)
+ 
+       if (msg->msg_namelen) {
+               err = -EINVAL;
++              if (msg->msg_namelen < sizeof(struct sockaddr_nl))
++                      goto out;
+               if (addr->nl_family != AF_NETLINK)
+                       goto out;
+               dst_portid = addr->nl_pid;
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 41bd496531d4..00192a996be0 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device 
*pdev)
+ 
+       ret = rfkill_register(rfkill->rfkill_dev);
+       if (ret < 0)
+-              return ret;
++              goto err_destroy;
+ 
+       platform_set_drvdata(pdev, rfkill);
+ 
+       dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
+ 
+       return 0;
++
++err_destroy:
++      rfkill_destroy(rfkill->rfkill_dev);
++
++      return ret;
+ }
+ 
+ static int rfkill_gpio_remove(struct platform_device *pdev)

Reply via email to