commit:     84f693cf79638d3350eda3ac45f50eebb618b0af
Author:     Thomas Deutschmann <whissi <AT> gentoo <DOT> org>
AuthorDate: Wed Aug  5 14:45:25 2020 +0000
Commit:     Thomas Deutschmann <whissi <AT> gentoo <DOT> org>
CommitDate: Wed Aug  5 14:45:25 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=84f693cf

Linux patch 5.4.56

Signed-off-by: Thomas Deutschmann <whissi <AT> gentoo.org>

 0000_README             |    4 +
 1055_linux-5.4.56.patch | 2697 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2701 insertions(+)

diff --git a/0000_README b/0000_README
index 9289fff..0a219d2 100644
--- a/0000_README
+++ b/0000_README
@@ -263,6 +263,10 @@ Patch:  1054_linux-5.4.55.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.4.55
 
+Patch:  1055_linux-5.4.56.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.4.56
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1055_linux-5.4.56.patch b/1055_linux-5.4.56.patch
new file mode 100644
index 0000000..fd7909a
--- /dev/null
+++ b/1055_linux-5.4.56.patch
@@ -0,0 +1,2697 @@
+diff --git a/Makefile b/Makefile
+index 072fe0eaa740..c33fb4eebd4d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 55
++SUBLEVEL = 56
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm/boot/dts/armada-38x.dtsi 
b/arch/arm/boot/dts/armada-38x.dtsi
+index 3f4bb44d85f0..669da3a33d82 100644
+--- a/arch/arm/boot/dts/armada-38x.dtsi
++++ b/arch/arm/boot/dts/armada-38x.dtsi
+@@ -339,7 +339,8 @@
+ 
+                       comphy: phy@18300 {
+                               compatible = "marvell,armada-380-comphy";
+-                              reg = <0x18300 0x100>;
++                              reg-names = "comphy", "conf";
++                              reg = <0x18300 0x100>, <0x18460 4>;
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi 
b/arch/arm/boot/dts/imx6qdl-icore.dtsi
+index 7814f1ef0804..fde56f98398d 100644
+--- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
+@@ -384,7 +384,7 @@
+ 
+       pinctrl_usbotg: usbotggrp {
+               fsl,pins = <
+-                      MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
++                      MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+               >;
+       };
+ 
+@@ -396,6 +396,7 @@
+                       MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
+                       MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
+                       MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
++                      MX6QDL_PAD_GPIO_1__GPIO1_IO01  0x1b0b0
+               >;
+       };
+ 
+diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts 
b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+index 315044ccd65f..e4719566133c 100644
+--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
++++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+@@ -99,7 +99,7 @@
+ &fec2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet2>;
+-      phy-mode = "rgmii";
++      phy-mode = "rgmii-id";
+       phy-handle = <&ethphy0>;
+       fsl,magic-packet;
+       status = "okay";
+diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi 
b/arch/arm/boot/dts/imx6sx-sdb.dtsi
+index f6972deb5e39..865528b134d8 100644
+--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
++++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
+@@ -213,7 +213,7 @@
+ &fec2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet2>;
+-      phy-mode = "rgmii";
++      phy-mode = "rgmii-id";
+       phy-handle = <&ethphy2>;
+       status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi 
b/arch/arm/boot/dts/sun4i-a10.dtsi
+index 4c268b70b735..e0a9b371c248 100644
+--- a/arch/arm/boot/dts/sun4i-a10.dtsi
++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
+@@ -198,7 +198,7 @@
+               default-pool {
+                       compatible = "shared-dma-pool";
+                       size = <0x6000000>;
+-                      alloc-ranges = <0x4a000000 0x6000000>;
++                      alloc-ranges = <0x40000000 0x10000000>;
+                       reusable;
+                       linux,cma-default;
+               };
+diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
+index 6befa236ba99..fd31da8fd311 100644
+--- a/arch/arm/boot/dts/sun5i.dtsi
++++ b/arch/arm/boot/dts/sun5i.dtsi
+@@ -117,7 +117,7 @@
+               default-pool {
+                       compatible = "shared-dma-pool";
+                       size = <0x6000000>;
+-                      alloc-ranges = <0x4a000000 0x6000000>;
++                      alloc-ranges = <0x40000000 0x10000000>;
+                       reusable;
+                       linux,cma-default;
+               };
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi 
b/arch/arm/boot/dts/sun7i-a20.dtsi
+index 8aebefd6accf..1f8b45f07e58 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -180,7 +180,7 @@
+               default-pool {
+                       compatible = "shared-dma-pool";
+                       size = <0x6000000>;
+-                      alloc-ranges = <0x4a000000 0x6000000>;
++                      alloc-ranges = <0x40000000 0x10000000>;
+                       reusable;
+                       linux,cma-default;
+               };
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index b0c195e3a06d..5f95e4b911a0 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -680,6 +680,12 @@ static void disable_single_step(struct perf_event *bp)
+       arch_install_hw_breakpoint(bp);
+ }
+ 
++static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
++                                     struct arch_hw_breakpoint *info)
++{
++      return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
++}
++
+ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
+                              struct pt_regs *regs)
+ {
+@@ -739,16 +745,27 @@ static void watchpoint_handler(unsigned long addr, 
unsigned int fsr,
+               }
+ 
+               pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
++
++              /*
++               * If we triggered a user watchpoint from a uaccess routine,
++               * then handle the stepping ourselves since userspace really
++               * can't help us with this.
++               */
++              if (watchpoint_fault_on_uaccess(regs, info))
++                      goto step;
++
+               perf_bp_event(wp, regs);
+ 
+               /*
+-               * If no overflow handler is present, insert a temporary
+-               * mismatch breakpoint so we can single-step over the
+-               * watchpoint trigger.
++               * Defer stepping to the overflow handler if one is installed.
++               * Otherwise, insert a temporary mismatch breakpoint so that
++               * we can single-step over the watchpoint trigger.
+                */
+-              if (is_default_overflow_handler(wp))
+-                      enable_single_step(wp, instruction_pointer(regs));
++              if (!is_default_overflow_handler(wp))
++                      goto unlock;
+ 
++step:
++              enable_single_step(wp, instruction_pointer(regs));
+ unlock:
+               rcu_read_unlock();
+       }
+diff --git a/arch/arm64/include/asm/alternative.h 
b/arch/arm64/include/asm/alternative.h
+index 12f0eb56a1cc..619db9b4c9d5 100644
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, 
size_t length) { }
+       "663:\n\t"                                                      \
+       newinstr "\n"                                                   \
+       "664:\n\t"                                                      \
+-      ".previous\n\t"                                                 \
+       ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
+-      ".org   . - (662b-661b) + (664b-663b)\n"                        \
++      ".org   . - (662b-661b) + (664b-663b)\n\t"                      \
++      ".previous\n"                                                   \
+       ".endif\n"
+ 
+ #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb)      \
+diff --git a/arch/arm64/include/asm/checksum.h 
b/arch/arm64/include/asm/checksum.h
+index d064a50deb5f..5665a3fc14be 100644
+--- a/arch/arm64/include/asm/checksum.h
++++ b/arch/arm64/include/asm/checksum.h
+@@ -19,16 +19,17 @@ static inline __sum16 ip_fast_csum(const void *iph, 
unsigned int ihl)
+ {
+       __uint128_t tmp;
+       u64 sum;
++      int n = ihl; /* we want it signed */
+ 
+       tmp = *(const __uint128_t *)iph;
+       iph += 16;
+-      ihl -= 4;
++      n -= 4;
+       tmp += ((tmp >> 64) | (tmp << 64));
+       sum = tmp >> 64;
+       do {
+               sum += *(const u32 *)iph;
+               iph += 4;
+-      } while (--ihl);
++      } while (--n > 0);
+ 
+       sum += ((sum >> 32) | (sum << 32));
+       return csum_fold((__force u32)(sum >> 32));
+diff --git a/arch/parisc/include/asm/cmpxchg.h 
b/arch/parisc/include/asm/cmpxchg.h
+index ab5c215cf46c..068958575871 100644
+--- a/arch/parisc/include/asm/cmpxchg.h
++++ b/arch/parisc/include/asm/cmpxchg.h
+@@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
+ extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
+                                  unsigned int new_);
+ extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
++extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
+ 
+ /* don't worry...optimizer will get rid of most of this */
+ static inline unsigned long
+@@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned 
long new_, int size)
+ #endif
+       case 4: return __cmpxchg_u32((unsigned int *)ptr,
+                                    (unsigned int)old, (unsigned int)new_);
++      case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+       }
+       __cmpxchg_called_with_bad_pointer();
+       return old;
+diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
+index 70ffbcf889b8..2e4d1f05a926 100644
+--- a/arch/parisc/lib/bitops.c
++++ b/arch/parisc/lib/bitops.c
+@@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, 
unsigned int old, unsign
+       _atomic_spin_unlock_irqrestore(ptr, flags);
+       return (unsigned long)prev;
+ }
++
++u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
++{
++      unsigned long flags;
++      u8 prev;
++
++      _atomic_spin_lock_irqsave(ptr, flags);
++      if ((prev = *ptr) == old)
++              *ptr = new;
++      _atomic_spin_unlock_irqrestore(ptr, flags);
++      return prev;
++}
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 319812923012..b1eb6a041118 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -115,9 +115,9 @@ void __init setup_bootmem(void)
+       /* Reserve from the start of the kernel to the end of the kernel */
+       memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+ 
+-      set_max_mapnr(PFN_DOWN(mem_size));
+       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       max_low_pfn = max_pfn;
++      set_max_mapnr(max_low_pfn);
+ 
+ #ifdef CONFIG_BLK_DEV_INITRD
+       setup_initrd();
+diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
+index 22d968bfe9bb..d770da3f8b6f 100644
+--- a/arch/sh/include/asm/pgalloc.h
++++ b/arch/sh/include/asm/pgalloc.h
+@@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+ extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
+ extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
++#define __pmd_free_tlb(tlb, pmdp, addr)               pmd_free((tlb)->mm, 
(pmdp))
+ #endif
+ 
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+@@ -33,13 +34,4 @@ do {                                                        
\
+       tlb_remove_page((tlb), (pte));                  \
+ } while (0)
+ 
+-#if CONFIG_PGTABLE_LEVELS > 2
+-#define __pmd_free_tlb(tlb, pmdp, addr)                       \
+-do {                                                  \
+-      struct page *page = virt_to_page(pmdp);         \
+-      pgtable_pmd_page_dtor(page);                    \
+-      tlb_remove_page((tlb), page);                   \
+-} while (0);
+-#endif
+-
+ #endif /* __ASM_SH_PGALLOC_H */
+diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
+index d31f66e82ce5..4a8ec9e40cc2 100644
+--- a/arch/sh/kernel/entry-common.S
++++ b/arch/sh/kernel/entry-common.S
+@@ -199,7 +199,7 @@ syscall_trace_entry:
+       mov.l   @(OFF_R7,r15), r7   ! arg3
+       mov.l   @(OFF_R3,r15), r3   ! syscall_nr
+       !
+-      mov.l   2f, r10                 ! Number of syscalls
++      mov.l   6f, r10                 ! Number of syscalls
+       cmp/hs  r10, r3
+       bf      syscall_call
+       mov     #-ENOSYS, r0
+@@ -353,7 +353,7 @@ ENTRY(system_call)
+       tst     r9, r8
+       bf      syscall_trace_entry
+       !
+-      mov.l   2f, r8                  ! Number of syscalls
++      mov.l   6f, r8                  ! Number of syscalls
+       cmp/hs  r8, r3
+       bt      syscall_badsys
+       !
+@@ -392,7 +392,7 @@ syscall_exit:
+ #if !defined(CONFIG_CPU_SH2)
+ 1:    .long   TRA
+ #endif
+-2:    .long   NR_syscalls
++6:    .long   NR_syscalls
+ 3:    .long   sys_call_table
+ 7:    .long   do_syscall_trace_enter
+ 8:    .long   do_syscall_trace_leave
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index 519649ddf100..fe522691ac71 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -207,7 +207,7 @@ spurious_8259A_irq:
+                * lets ACK and report it. [once per IRQ]
+                */
+               if (!(spurious_irq_mask & irqmask)) {
+-                      printk(KERN_DEBUG
++                      printk_deferred(KERN_DEBUG
+                              "spurious 8259A interrupt: IRQ%d.\n", irq);
+                       spurious_irq_mask |= irqmask;
+               }
+diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
+index 2d6898c2cb64..6d83b4b857e6 100644
+--- a/arch/x86/kernel/stacktrace.c
++++ b/arch/x86/kernel/stacktrace.c
+@@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn 
consume_entry,
+                        * or a page fault), which can make frame pointers
+                        * unreliable.
+                        */
+-
+                       if (IS_ENABLED(CONFIG_FRAME_POINTER))
+                               return -EINVAL;
+               }
+@@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn 
consume_entry,
+       if (unwind_error(&state))
+               return -EINVAL;
+ 
+-      /* Success path for non-user tasks, i.e. kthreads and idle tasks */
+-      if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
+-              return -EINVAL;
+-
+       return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
+index aa0f39dc8129..187a86e0e753 100644
+--- a/arch/x86/kernel/unwind_orc.c
++++ b/arch/x86/kernel/unwind_orc.c
+@@ -431,8 +431,11 @@ bool unwind_next_frame(struct unwind_state *state)
+       /*
+        * Find the orc_entry associated with the text address.
+        *
+-       * Decrement call return addresses by one so they work for sibling
+-       * calls and calls to noreturn functions.
++       * For a call frame (as opposed to a signal frame), state->ip points to
++       * the instruction after the call.  That instruction's stack layout
++       * could be different from the call instruction's layout, for example
++       * if the call was to a noreturn function.  So get the ORC data for the
++       * call instruction itself.
+        */
+       orc = orc_find(state->signal ? state->ip : state->ip - 1);
+       if (!orc) {
+@@ -653,6 +656,7 @@ void __unwind_start(struct unwind_state *state, struct 
task_struct *task,
+               state->sp = task->thread.sp;
+               state->bp = READ_ONCE_NOCHECK(frame->bp);
+               state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
++              state->signal = (void *)state->ip == ret_from_fork;
+       }
+ 
+       if (get_stack_info((unsigned long *)state->sp, state->task,
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 5d2587005d0e..6920f1d3b66f 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2085,7 +2085,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu 
*vcpu, u64 data)
+ {
+       struct kvm_lapic *apic = vcpu->arch.apic;
+ 
+-      if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
++      if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
+                       apic_lvtt_period(apic))
+               return;
+ 
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index c8da8eb160da..422193690fd4 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -1777,8 +1777,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct 
ccp_cmd *cmd)
+                              LSB_ITEM_SIZE);
+                       break;
+               default:
++                      kfree(hmac_buf);
+                       ret = -EINVAL;
+-                      goto e_ctx;
++                      goto e_data;
+               }
+ 
+               memset(&hmac_cmd, 0, sizeof(hmac_cmd));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index a73206784cba..2a7da26008a2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -667,9 +667,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void 
*data, struct drm_file
+               return n ? -EFAULT : 0;
+       }
+       case AMDGPU_INFO_DEV_INFO: {
+-              struct drm_amdgpu_info_device dev_info = {};
++              struct drm_amdgpu_info_device dev_info;
+               uint64_t vm_size;
+ 
++              memset(&dev_info, 0, sizeof(dev_info));
+               dev_info.device_id = dev->pdev->device;
+               dev_info.chip_rev = adev->rev_id;
+               dev_info.external_rev = adev->external_rev_id;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index b66554b40db4..3f744e72912f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -691,7 +691,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device 
*dev,
+               tmp_str++;
+       while (isspace(*++tmp_str));
+ 
+-      while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
++      while (tmp_str[0]) {
++              sub_str = strsep(&tmp_str, delimiter);
+               ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
+               if (ret)
+                       return -EINVAL;
+@@ -882,7 +883,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t 
count, uint32_t *mask)
+       memcpy(buf_cpy, buf, bytes);
+       buf_cpy[bytes] = '\0';
+       tmp = buf_cpy;
+-      while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
++      while (tmp[0]) {
++              sub_str = strsep(&tmp, delimiter);
+               if (strlen(sub_str)) {
+                       ret = kstrtol(sub_str, 0, &level);
+                       if (ret)
+@@ -1298,7 +1300,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct 
device *dev,
+                       i++;
+               memcpy(buf_cpy, buf, count-i);
+               tmp_str = buf_cpy;
+-              while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
++              while (tmp_str[0]) {
++                      sub_str = strsep(&tmp_str, delimiter);
+                       ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
+                       if (ret) {
+                               count = -EINVAL;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index c7d8edf450d3..6091194a3955 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7464,20 +7464,38 @@ static int amdgpu_dm_atomic_check(struct drm_device 
*dev,
+                * the same resource. If we have a new DC context as part of
+                * the DM atomic state from validation we need to free it and
+                * retain the existing one instead.
++               *
++               * Furthermore, since the DM atomic state only contains the DC
++               * context and can safely be annulled, we can free the state
++               * and clear the associated private object now to free
++               * some memory and avoid a possible use-after-free later.
+                */
+-              struct dm_atomic_state *new_dm_state, *old_dm_state;
+ 
+-              new_dm_state = dm_atomic_get_new_state(state);
+-              old_dm_state = dm_atomic_get_old_state(state);
++              for (i = 0; i < state->num_private_objs; i++) {
++                      struct drm_private_obj *obj = 
state->private_objs[i].ptr;
+ 
+-              if (new_dm_state && old_dm_state) {
+-                      if (new_dm_state->context)
+-                              dc_release_state(new_dm_state->context);
++                      if (obj->funcs == adev->dm.atomic_obj.funcs) {
++                              int j = state->num_private_objs-1;
+ 
+-                      new_dm_state->context = old_dm_state->context;
++                              dm_atomic_destroy_state(obj,
++                                              state->private_objs[i].state);
++
++                              /* If i is not at the end of the array then the
++                               * last element needs to be moved to where i was
++                               * before the array can safely be truncated.
++                               */
++                              if (i != j)
++                                      state->private_objs[i] =
++                                              state->private_objs[j];
+ 
+-                      if (old_dm_state->context)
+-                              dc_retain_state(old_dm_state->context);
++                              state->private_objs[j].ptr = NULL;
++                              state->private_objs[j].state = NULL;
++                              state->private_objs[j].old_state = NULL;
++                              state->private_objs[j].new_state = NULL;
++
++                              state->num_private_objs = j;
++                              break;
++                      }
+               }
+       }
+ 
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 6854f5867d51..46ad14470d06 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -872,9 +872,6 @@ err:
+  * @file_priv: drm file-private structure
+  *
+  * Open an object using the global name, returning a handle and the size.
+- *
+- * This handle (of course) holds a reference to the object, so the object
+- * will not go away until the handle is deleted.
+  */
+ int
+ drm_gem_open_ioctl(struct drm_device *dev, void *data,
+@@ -899,14 +896,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ 
+       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
+-      drm_gem_object_put_unlocked(obj);
+       if (ret)
+-              return ret;
++              goto err;
+ 
+       args->handle = handle;
+       args->size = obj->size;
+ 
+-      return 0;
++err:
++      drm_gem_object_put_unlocked(obj);
++      return ret;
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
+index a05e64e3d80b..4042f5b39765 100644
+--- a/drivers/gpu/drm/drm_mipi_dbi.c
++++ b/drivers/gpu/drm/drm_mipi_dbi.c
+@@ -937,7 +937,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, 
int dc,
+                       }
+               }
+ 
+-              tr.len = chunk;
++              tr.len = chunk * 2;
+               len -= chunk;
+ 
+               ret = spi_sync(spi, &m);
+diff --git a/drivers/i2c/busses/i2c-cadence.c 
b/drivers/i2c/busses/i2c-cadence.c
+index 9d71ce15db05..a51d3b795770 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -377,10 +377,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
+        * Check for the message size against FIFO depth and set the
+        * 'hold bus' bit if it is greater than FIFO depth.
+        */
+-      if ((id->recv_count > CDNS_I2C_FIFO_DEPTH)  || id->bus_hold_flag)
++      if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+               ctrl_reg |= CDNS_I2C_CR_HOLD;
+-      else
+-              ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+ 
+       cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+ 
+@@ -437,11 +435,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
+        * Check for the message size against FIFO depth and set the
+        * 'hold bus' bit if it is greater than FIFO depth.
+        */
+-      if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
++      if (id->send_count > CDNS_I2C_FIFO_DEPTH)
+               ctrl_reg |= CDNS_I2C_CR_HOLD;
+-      else
+-              ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+-
+       cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
+ 
+       /* Clear the interrupts in interrupt status register. */
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c 
b/drivers/infiniband/sw/rdmavt/qp.c
+index 19556c62c7ea..d14ad523f96c 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -898,8 +898,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct 
rvt_qp *qp,
+       qp->s_tail_ack_queue = 0;
+       qp->s_acked_ack_queue = 0;
+       qp->s_num_rd_atomic = 0;
+-      if (qp->r_rq.kwq)
+-              qp->r_rq.kwq->count = qp->r_rq.size;
+       qp->r_sge.num_sge = 0;
+       atomic_set(&qp->s_reserved_used, 0);
+ }
+@@ -2352,31 +2350,6 @@ bad_lkey:
+       return 0;
+ }
+ 
+-/**
+- * get_count - count numbers of request work queue entries
+- * in circular buffer
+- * @rq: data structure for request queue entry
+- * @tail: tail indices of the circular buffer
+- * @head: head indices of the circular buffer
+- *
+- * Return - total number of entries in the circular buffer
+- */
+-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
+-{
+-      u32 count;
+-
+-      count = head;
+-
+-      if (count >= rq->size)
+-              count = 0;
+-      if (count < tail)
+-              count += rq->size - tail;
+-      else
+-              count -= tail;
+-
+-      return count;
+-}
+-
+ /**
+  * get_rvt_head - get head indices of the circular buffer
+  * @rq: data structure for request queue entry
+@@ -2451,7 +2424,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
+ 
+       if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
+               head = get_rvt_head(rq, ip);
+-              kwq->count = get_count(rq, tail, head);
++              kwq->count = rvt_get_rq_count(rq, head, tail);
+       }
+       if (unlikely(kwq->count == 0)) {
+               ret = 0;
+@@ -2486,7 +2459,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
+                * the number of remaining WQEs.
+                */
+               if (kwq->count < srq->limit) {
+-                      kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
++                      kwq->count =
++                              rvt_get_rq_count(rq,
++                                               get_rvt_head(rq, ip), tail);
+                       if (kwq->count < srq->limit) {
+                               struct ib_event ev;
+ 
+diff --git a/drivers/infiniband/sw/rdmavt/rc.c 
b/drivers/infiniband/sw/rdmavt/rc.c
+index 890d7b760d2e..27415185d862 100644
+--- a/drivers/infiniband/sw/rdmavt/rc.c
++++ b/drivers/infiniband/sw/rdmavt/rc.c
+@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
+                        * not atomic, which is OK, since the fuzziness is
+                        * resolved as further ACKs go out.
+                        */
+-                      credits = head - tail;
+-                      if ((int)credits < 0)
+-                              credits += qp->r_rq.size;
++                      credits = rvt_get_rq_count(&qp->r_rq, head, tail);
+               }
+               /*
+                * Binary search the credit table to find the code to
+diff --git a/drivers/media/pci/cx23885/cx23888-ir.c 
b/drivers/media/pci/cx23885/cx23888-ir.c
+index e880afe37f15..d59ca3601785 100644
+--- a/drivers/media/pci/cx23885/cx23888-ir.c
++++ b/drivers/media/pci/cx23885/cx23888-ir.c
+@@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
+               return -ENOMEM;
+ 
+       spin_lock_init(&state->rx_kfifo_lock);
+-      if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
++      if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
++                      GFP_KERNEL)) {
++              kfree(state);
+               return -ENOMEM;
++      }
+ 
+       state->dev = dev;
+       sd = &state->sd;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c 
b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 506170fe3a8b..049f1bbe27ab 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2441,6 +2441,7 @@ static inline int uld_send(struct adapter *adap, struct 
sk_buff *skb,
+       txq_info = adap->sge.uld_txq_info[tx_uld_type];
+       if (unlikely(!txq_info)) {
+               WARN_ON(true);
++              kfree_skb(skb);
+               return NET_XMIT_DROP;
+       }
+ 
+diff --git a/drivers/net/ethernet/cortina/gemini.c 
b/drivers/net/ethernet/cortina/gemini.c
+index 2814b96751b4..01ae113f122a 100644
+--- a/drivers/net/ethernet/cortina/gemini.c
++++ b/drivers/net/ethernet/cortina/gemini.c
+@@ -2445,6 +2445,7 @@ static int gemini_ethernet_port_probe(struct 
platform_device *pdev)
+       port->reset = devm_reset_control_get_exclusive(dev, NULL);
+       if (IS_ERR(port->reset)) {
+               dev_err(dev, "no reset\n");
++              clk_disable_unprepare(port->pclk);
+               return PTR_ERR(port->reset);
+       }
+       reset_control_reset(port->reset);
+@@ -2500,8 +2501,10 @@ static int gemini_ethernet_port_probe(struct 
platform_device *pdev)
+                                       IRQF_SHARED,
+                                       port_names[port->id],
+                                       port);
+-      if (ret)
++      if (ret) {
++              clk_disable_unprepare(port->pclk);
+               return ret;
++      }
+ 
+       ret = register_netdev(netdev);
+       if (!ret) {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 506381224559..a8ce6ca0f508 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4014,8 +4014,8 @@ static void hns3_link_status_change(struct hnae3_handle 
*handle, bool linkup)
+               return;
+ 
+       if (linkup) {
+-              netif_carrier_on(netdev);
+               netif_tx_wake_all_queues(netdev);
++              netif_carrier_on(netdev);
+               if (netif_msg_link(handle))
+                       netdev_info(netdev, "link up\n");
+       } else {
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c 
b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index d4652dea4569..6c3d13110993 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -5627,9 +5627,9 @@ static int hclge_add_fd_entry(struct hnae3_handle 
*handle,
+       /* to avoid rule conflict, when user configure rule by ethtool,
+        * we need to clear all arfs rules
+        */
++      spin_lock_bh(&hdev->fd_rule_lock);
+       hclge_clear_arfs_rules(handle);
+ 
+-      spin_lock_bh(&hdev->fd_rule_lock);
+       ret = hclge_fd_config_rule(hdev, rule);
+ 
+       spin_unlock_bh(&hdev->fd_rule_lock);
+@@ -5672,6 +5672,7 @@ static int hclge_del_fd_entry(struct hnae3_handle 
*handle,
+       return ret;
+ }
+ 
++/* make sure being called after lock up with fd_rule_lock */
+ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
+                                    bool clear_list)
+ {
+@@ -5684,7 +5685,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle 
*handle,
+       if (!hnae3_dev_fd_supported(hdev))
+               return;
+ 
+-      spin_lock_bh(&hdev->fd_rule_lock);
+       for_each_set_bit(location, hdev->fd_bmap,
+                        hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+               hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
+@@ -5701,8 +5701,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle 
*handle,
+               bitmap_zero(hdev->fd_bmap,
+                           hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
+       }
+-
+-      spin_unlock_bh(&hdev->fd_rule_lock);
+ }
+ 
+ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+@@ -6069,7 +6067,7 @@ static int hclge_add_fd_entry_by_arfs(struct 
hnae3_handle *handle, u16 queue_id,
+                                     u16 flow_id, struct flow_keys *fkeys)
+ {
+       struct hclge_vport *vport = hclge_get_vport(handle);
+-      struct hclge_fd_rule_tuples new_tuples;
++      struct hclge_fd_rule_tuples new_tuples = {};
+       struct hclge_dev *hdev = vport->back;
+       struct hclge_fd_rule *rule;
+       u16 tmp_queue_id;
+@@ -6079,20 +6077,18 @@ static int hclge_add_fd_entry_by_arfs(struct 
hnae3_handle *handle, u16 queue_id,
+       if (!hnae3_dev_fd_supported(hdev))
+               return -EOPNOTSUPP;
+ 
+-      memset(&new_tuples, 0, sizeof(new_tuples));
+-      hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+-
+-      spin_lock_bh(&hdev->fd_rule_lock);
+-
+       /* when there is already fd rule existed add by user,
+        * arfs should not work
+        */
++      spin_lock_bh(&hdev->fd_rule_lock);
+       if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+               spin_unlock_bh(&hdev->fd_rule_lock);
+ 
+               return -EOPNOTSUPP;
+       }
+ 
++      hclge_fd_get_flow_tuples(fkeys, &new_tuples);
++
+       /* check is there flow director filter existed for this flow,
+        * if not, create a new filter for it;
+        * if filter exist with different queue id, modify the filter;
+@@ -6177,6 +6173,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev 
*hdev)
+ #endif
+ }
+ 
++/* make sure being called after lock up with fd_rule_lock */
+ static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+ {
+ #ifdef CONFIG_RFS_ACCEL
+@@ -6221,10 +6218,14 @@ static void hclge_enable_fd(struct hnae3_handle 
*handle, bool enable)
+ 
+       hdev->fd_en = enable;
+       clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
+-      if (!enable)
++
++      if (!enable) {
++              spin_lock_bh(&hdev->fd_rule_lock);
+               hclge_del_all_fd_entries(handle, clear);
+-      else
++              spin_unlock_bh(&hdev->fd_rule_lock);
++      } else {
+               hclge_restore_fd_entries(handle);
++      }
+ }
+ 
+ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+@@ -6678,8 +6679,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
+       int i;
+ 
+       set_bit(HCLGE_STATE_DOWN, &hdev->state);
+-
++      spin_lock_bh(&hdev->fd_rule_lock);
+       hclge_clear_arfs_rules(handle);
++      spin_unlock_bh(&hdev->fd_rule_lock);
+ 
+       /* If it is not PF reset, the firmware will disable the MAC,
+        * so it only need to stop phy here.
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c 
b/drivers/net/ethernet/ibm/ibmvnic.c
+index d58597360699..2d20a48f0ba0 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -3086,7 +3086,7 @@ req_rx_irq_failed:
+ req_tx_irq_failed:
+       for (j = 0; j < i; j++) {
+               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+-              irq_dispose_mapping(adapter->rx_scrq[j]->irq);
++              irq_dispose_mapping(adapter->tx_scrq[j]->irq);
+       }
+       release_sub_crqs(adapter, 1);
+       return rc;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c 
b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 87c2e8de6102..942646fb2256 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -4354,12 +4354,14 @@ end:
+ static void mlx4_shutdown(struct pci_dev *pdev)
+ {
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
++      struct mlx4_dev *dev = persist->dev;
+ 
+       mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+       mutex_lock(&persist->interface_state_mutex);
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+               mlx4_unload_one(pdev);
+       mutex_unlock(&persist->interface_state_mutex);
++      mlx4_pci_disable_device(dev);
+ }
+ 
+ static const struct pci_error_handlers mlx4_err_handler = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index c133beb6a7a5..ee0d78f801af 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -432,7 +432,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+               err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
+                                       &rq->wq_ctrl);
+               if (err)
+-                      return err;
++                      goto err_rq_wq_destroy;
+ 
+               rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
+ 
+@@ -485,7 +485,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
+               err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
+                                        &rq->wq_ctrl);
+               if (err)
+-                      return err;
++                      goto err_rq_wq_destroy;
+ 
+               rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
+ 
+@@ -3038,6 +3038,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+       priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
+ 
++static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
++                                   enum mlx5_port_status state)
++{
++      struct mlx5_eswitch *esw = mdev->priv.eswitch;
++      int vport_admin_state;
++
++      mlx5_set_port_admin_status(mdev, state);
++
++      if (!MLX5_ESWITCH_MANAGER(mdev) ||  mlx5_eswitch_mode(esw) == 
MLX5_ESWITCH_OFFLOADS)
++              return;
++
++      if (state == MLX5_PORT_UP)
++              vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
++      else
++              vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
++
++      mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
++}
++
+ int mlx5e_open_locked(struct net_device *netdev)
+ {
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -3070,7 +3089,7 @@ int mlx5e_open(struct net_device *netdev)
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_open_locked(netdev);
+       if (!err)
+-              mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
++              mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
+       mutex_unlock(&priv->state_lock);
+ 
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+@@ -3107,7 +3126,7 @@ int mlx5e_close(struct net_device *netdev)
+               return -ENODEV;
+ 
+       mutex_lock(&priv->state_lock);
+-      mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
++      mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
+       err = mlx5e_close_locked(netdev);
+       mutex_unlock(&priv->state_lock);
+ 
+@@ -5172,7 +5191,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+ 
+       /* Marking the link as currently not needed by the Driver */
+       if (!netif_running(netdev))
+-              mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
++              mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
+ 
+       mlx5e_set_netdev_mtu_boundaries(priv);
+       mlx5e_set_dev_port_mtu(priv);
+@@ -5356,6 +5375,8 @@ err_cleanup_tx:
+       profile->cleanup_tx(priv);
+ 
+ out:
++      set_bit(MLX5E_STATE_DESTROYING, &priv->state);
++      cancel_work_sync(&priv->update_stats_work);
+       return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 9b232ef36d53..88b51f64a64e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1736,6 +1736,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv 
*priv)
+       INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
+                 mlx5e_tc_reoffload_flows_work);
+ 
++      mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
++                                    0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+       mlx5_lag_add(mdev, netdev);
+       priv->events_nb.notifier_call = uplink_rep_async_event;
+       mlx5_notifier_register(mdev, &priv->events_nb);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index c6ed4b7f4f97..8e6ab8201939 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1919,7 +1919,7 @@ abort:
+               mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+               mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
+       }
+-
++      esw_destroy_tsar(esw);
+       return err;
+ }
+ 
+@@ -2094,6 +2094,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch 
*esw,
+                                u16 vport, int link_state)
+ {
+       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
++      int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
++      int other_vport = 1;
+       int err = 0;
+ 
+       if (!ESW_ALLOWED(esw))
+@@ -2101,15 +2103,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch 
*esw,
+       if (IS_ERR(evport))
+               return PTR_ERR(evport);
+ 
++      if (vport == MLX5_VPORT_UPLINK) {
++              opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
++              other_vport = 0;
++              vport = 0;
++      }
+       mutex_lock(&esw->state_lock);
+ 
+-      err = mlx5_modify_vport_admin_state(esw->dev,
+-                                          MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
+-                                          vport, 1, link_state);
++      err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, 
other_vport, link_state);
+       if (err) {
+-              mlx5_core_warn(esw->dev,
+-                             "Failed to set vport %d link state, err = %d",
+-                             vport, err);
++              mlx5_core_warn(esw->dev, "Failed to set vport %d link state, 
opmod = %d, err = %d",
++                             vport, opmod, err);
+               goto unlock;
+       }
+ 
+@@ -2151,8 +2155,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch 
*esw,
+       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       int err = 0;
+ 
+-      if (!ESW_ALLOWED(esw))
+-              return -EPERM;
+       if (IS_ERR(evport))
+               return PTR_ERR(evport);
+       if (vlan > 4095 || qos > 7)
+@@ -2180,6 +2182,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+       u8 set_flags = 0;
+       int err;
+ 
++      if (!ESW_ALLOWED(esw))
++              return -EPERM;
++
+       if (vlan || qos)
+               set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 6bd6f5895244..0ddbae1e64fa 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -606,6 +606,8 @@ static inline int  mlx5_eswitch_enable(struct mlx5_eswitch 
*esw, int mode) { ret
+ static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+ static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct 
mlx5_core_dev *dev1) { return true; }
+ static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { 
return false; }
++static inline
++int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int 
link_state) { return 0; }
+ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
+ {
+       return ERR_PTR(-EOPNOTSUPP);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c 
b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+index 43f97601b500..75fc283cacc3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+@@ -388,10 +388,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
+       return 0;
+ }
+ 
++enum {
++      MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
++      MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
++};
++
+ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+                          enum ptp_pin_function func, unsigned int chan)
+ {
+-      return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
++      struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
++                                              ptp_info);
++
++      switch (func) {
++      case PTP_PF_NONE:
++              return 0;
++      case PTP_PF_EXTTS:
++              return !(clock->pps_info.pin_caps[pin] &
++                       MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
++      case PTP_PF_PEROUT:
++              return !(clock->pps_info.pin_caps[pin] &
++                       MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
++      default:
++              return -EOPNOTSUPP;
++      }
++
++      return -EOPNOTSUPP;
+ }
+ 
+ static const struct ptp_clock_info mlx5_ptp_clock_info = {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c 
b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index 1b204ce30ee4..c7c3fc7d1126 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -1577,7 +1577,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core 
*mlxsw_core,
+       err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+                                   bulk_list, cb, cb_priv, tid);
+       if (err) {
+-              kfree(trans);
++              kfree_rcu(trans, rcu);
+               return err;
+       }
+       return 0;
+@@ -1802,11 +1802,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core 
*mlxsw_core, struct sk_buff *skb,
+                       break;
+               }
+       }
+-      rcu_read_unlock();
+-      if (!found)
++      if (!found) {
++              rcu_read_unlock();
+               goto drop;
++      }
+ 
+       rxl->func(skb, local_port, rxl_item->priv);
++      rcu_read_unlock();
+       return;
+ 
+ drop:
+diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
+index 2761f3a3ae50..56f285985b43 100644
+--- a/drivers/net/ethernet/ni/nixge.c
++++ b/drivers/net/ethernet/ni/nixge.c
+@@ -1318,19 +1318,21 @@ static int nixge_probe(struct platform_device *pdev)
+       netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
+       err = nixge_of_get_resources(pdev);
+       if (err)
+-              return err;
++              goto free_netdev;
+       __nixge_hw_set_mac_address(ndev);
+ 
+       priv->tx_irq = platform_get_irq_byname(pdev, "tx");
+       if (priv->tx_irq < 0) {
+               netdev_err(ndev, "could not find 'tx' irq");
+-              return priv->tx_irq;
++              err = priv->tx_irq;
++              goto free_netdev;
+       }
+ 
+       priv->rx_irq = platform_get_irq_byname(pdev, "rx");
+       if (priv->rx_irq < 0) {
+               netdev_err(ndev, "could not find 'rx' irq");
+-              return priv->rx_irq;
++              err = priv->rx_irq;
++              goto free_netdev;
+       }
+ 
+       priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c 
b/drivers/net/ethernet/qlogic/qed/qed_int.c
+index 8d106063e927..666e43748a5f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
+@@ -1180,7 +1180,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+                       index, attn_bits, attn_acks, asserted_bits,
+                       deasserted_bits, p_sb_attn_sw->known_attn);
+       } else if (asserted_bits == 0x100) {
+-              DP_INFO(p_hwfn, "MFW indication via attention\n");
++              DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
++                         "MFW indication via attention\n");
+       } else {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+                          "MFW indication [deassertion]\n");
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c 
b/drivers/net/ethernet/renesas/ravb_main.c
+index 3f165c137236..30cdabf64ccc 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1444,6 +1444,7 @@ static void ravb_tx_timeout_work(struct work_struct 
*work)
+       struct ravb_private *priv = container_of(work, struct ravb_private,
+                                                work);
+       struct net_device *ndev = priv->ndev;
++      int error;
+ 
+       netif_tx_stop_all_queues(ndev);
+ 
+@@ -1452,15 +1453,36 @@ static void ravb_tx_timeout_work(struct work_struct 
*work)
+               ravb_ptp_stop(ndev);
+ 
+       /* Wait for DMA stopping */
+-      ravb_stop_dma(ndev);
++      if (ravb_stop_dma(ndev)) {
++              /* If ravb_stop_dma() fails, the hardware is still operating
++               * for TX and/or RX. So, this should not call the following
++               * functions because ravb_dmac_init() is possible to fail too.
++               * Also, this should not retry ravb_stop_dma() again and again
++               * here because it's possible to wait forever. So, this just
++               * re-enables the TX and RX and skip the following
++               * re-initialization procedure.
++               */
++              ravb_rcv_snd_enable(ndev);
++              goto out;
++      }
+ 
+       ravb_ring_free(ndev, RAVB_BE);
+       ravb_ring_free(ndev, RAVB_NC);
+ 
+       /* Device init */
+-      ravb_dmac_init(ndev);
++      error = ravb_dmac_init(ndev);
++      if (error) {
++              /* If ravb_dmac_init() fails, descriptors are freed. So, this
++               * should return here to avoid re-enabling the TX and RX in
++               * ravb_emac_init().
++               */
++              netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
++                         __func__, error);
++              return;
++      }
+       ravb_emac_init(ndev);
+ 
++out:
+       /* Initialise PTP Clock driver */
+       if (priv->chip_id == RCAR_GEN2)
+               ravb_ptp_init(ndev, priv->pdev);
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index 74849da031fa..66a8b835aa94 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -1389,8 +1389,9 @@ static void hso_serial_set_termios(struct tty_struct 
*tty, struct ktermios *old)
+       unsigned long flags;
+ 
+       if (old)
+-              hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
+-                      tty->termios.c_cflag, old->c_cflag);
++              hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
++                      (unsigned int)tty->termios.c_cflag,
++                      (unsigned int)old->c_cflag);
+ 
+       /* the actual setup */
+       spin_lock_irqsave(&serial->serial_lock, flags);
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 0170a441208a..1da99abc6ed1 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3767,6 +3767,11 @@ static int lan78xx_probe(struct usb_interface *intf,
+       netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
+       netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
+ 
++      if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
++              ret = -ENODEV;
++              goto out3;
++      }
++
+       dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+       dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+       dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+@@ -3791,6 +3796,7 @@ static int lan78xx_probe(struct usb_interface *intf,
+                       usb_fill_int_urb(dev->urb_intr, dev->udev,
+                                        dev->pipe_intr, buf, maxp,
+                                        intr_complete, dev, period);
++                      dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
+               }
+       }
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 03434db36b5c..b49b6f0cee50 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2863,8 +2863,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool 
do_all)
+                       if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
+                               continue;
+                       /* the all_zeros_mac entry is deleted at vxlan_uninit */
+-                      if (!is_zero_ether_addr(f->eth_addr))
+-                              vxlan_fdb_destroy(vxlan, f, true, true);
++                      if (is_zero_ether_addr(f->eth_addr) &&
++                          f->vni == vxlan->cfg.vni)
++                              continue;
++                      vxlan_fdb_destroy(vxlan, f, true, true);
+               }
+               spin_unlock_bh(&vxlan->hash_lock[h]);
+       }
+diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
+index bf78073ee7fd..e2a83f4cd3bb 100644
+--- a/drivers/net/wan/hdlc_x25.c
++++ b/drivers/net/wan/hdlc_x25.c
+@@ -62,8 +62,10 @@ static int x25_data_indication(struct net_device *dev, 
struct sk_buff *skb)
+ {
+       unsigned char *ptr;
+ 
+-      if (skb_cow(skb, 1))
++      if (skb_cow(skb, 1)) {
++              kfree_skb(skb);
+               return NET_RX_DROP;
++      }
+ 
+       skb_push(skb, 1);
+       skb_reset_network_header(skb);
+diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
+index 5a6f27298b90..134e4dd916c1 100644
+--- a/drivers/net/wan/lapbether.c
++++ b/drivers/net/wan/lapbether.c
+@@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device 
*dev, struct sk_buff *skb)
+ {
+       unsigned char *ptr;
+ 
+-      skb_push(skb, 1);
+-
+-      if (skb_cow(skb, 1))
++      if (skb_cow(skb, 1)) {
++              kfree_skb(skb);
+               return NET_RX_DROP;
++      }
++
++      skb_push(skb, 1);
+ 
+       ptr  = skb->data;
+       *ptr = X25_IFACE_DATA;
+diff --git a/drivers/net/wireless/ath/ath10k/hw.c 
b/drivers/net/wireless/ath/ath10k/hw.c
+index c415e971735b..004af89a02b8 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.c
++++ b/drivers/net/wireless/ath/ath10k/hw.c
+@@ -1145,6 +1145,7 @@ static bool 
ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+ const struct ath10k_hw_ops qca99x0_ops = {
+       .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+       .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
++      .is_rssi_enable = ath10k_htt_tx_rssi_enable,
+ };
+ 
+ const struct ath10k_hw_ops qca6174_ops = {
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 482c6c8b0fb7..88280057e032 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 
0644);
+ MODULE_PARM_DESC(max_queues,
+                "Maximum number of queues per virtual interface");
+ 
++#define XENNET_TIMEOUT  (5 * HZ)
++
+ static const struct ethtool_ops xennet_ethtool_ops;
+ 
+ struct netfront_cb {
+@@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct 
xenbus_device *dev)
+ 
+       netif_carrier_off(netdev);
+ 
+-      xenbus_switch_state(dev, XenbusStateInitialising);
+-      wait_event(module_wq,
+-                 xenbus_read_driver_state(dev->otherend) !=
+-                 XenbusStateClosed &&
+-                 xenbus_read_driver_state(dev->otherend) !=
+-                 XenbusStateUnknown);
++      do {
++              xenbus_switch_state(dev, XenbusStateInitialising);
++              err = wait_event_timeout(module_wq,
++                               xenbus_read_driver_state(dev->otherend) !=
++                               XenbusStateClosed &&
++                               xenbus_read_driver_state(dev->otherend) !=
++                               XenbusStateUnknown, XENNET_TIMEOUT);
++      } while (!err);
++
+       return netdev;
+ 
+  exit:
+@@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = 
{
+ };
+ #endif /* CONFIG_SYSFS */
+ 
+-static int xennet_remove(struct xenbus_device *dev)
++static void xennet_bus_close(struct xenbus_device *dev)
+ {
+-      struct netfront_info *info = dev_get_drvdata(&dev->dev);
+-
+-      dev_dbg(&dev->dev, "%s\n", dev->nodename);
++      int ret;
+ 
+-      if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
++      if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
++              return;
++      do {
+               xenbus_switch_state(dev, XenbusStateClosing);
+-              wait_event(module_wq,
+-                         xenbus_read_driver_state(dev->otherend) ==
+-                         XenbusStateClosing ||
+-                         xenbus_read_driver_state(dev->otherend) ==
+-                         XenbusStateUnknown);
++              ret = wait_event_timeout(module_wq,
++                                 xenbus_read_driver_state(dev->otherend) ==
++                                 XenbusStateClosing ||
++                                 xenbus_read_driver_state(dev->otherend) ==
++                                 XenbusStateClosed ||
++                                 xenbus_read_driver_state(dev->otherend) ==
++                                 XenbusStateUnknown,
++                                 XENNET_TIMEOUT);
++      } while (!ret);
++
++      if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
++              return;
+ 
++      do {
+               xenbus_switch_state(dev, XenbusStateClosed);
+-              wait_event(module_wq,
+-                         xenbus_read_driver_state(dev->otherend) ==
+-                         XenbusStateClosed ||
+-                         xenbus_read_driver_state(dev->otherend) ==
+-                         XenbusStateUnknown);
+-      }
++              ret = wait_event_timeout(module_wq,
++                                 xenbus_read_driver_state(dev->otherend) ==
++                                 XenbusStateClosed ||
++                                 xenbus_read_driver_state(dev->otherend) ==
++                                 XenbusStateUnknown,
++                                 XENNET_TIMEOUT);
++      } while (!ret);
++}
++
++static int xennet_remove(struct xenbus_device *dev)
++{
++      struct netfront_info *info = dev_get_drvdata(&dev->dev);
+ 
++      xennet_bus_close(dev);
+       xennet_disconnect_backend(info);
+ 
+       if (info->netdev->reg_state == NETREG_REGISTERED)
+diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
+index 91d4d5b28a7d..ba6c486d6465 100644
+--- a/drivers/nfc/s3fwrn5/core.c
++++ b/drivers/nfc/s3fwrn5/core.c
+@@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct 
sk_buff *skb,
+       case S3FWRN5_MODE_FW:
+               return s3fwrn5_fw_recv_frame(ndev, skb);
+       default:
++              kfree_skb(skb);
+               return -ENODEV;
+       }
+ }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 7900814355c2..53e113a18a54 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1319,6 +1319,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
+               }
+       }
+ 
++      /* Set 10 seconds timeout for icresp recvmsg */
++      queue->sock->sk->sk_rcvtimeo = 10 * HZ;
++
+       queue->sock->sk->sk_allocation = GFP_ATOMIC;
+       if (!qid)
+               n = 0;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index a1ec8a1977d3..4ac4b28e0ebb 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, 
quirk_disable_aspm_l0s);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+ 
++static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
++{
++      pci_info(dev, "Disabling ASPM L0s/L1\n");
++      pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
++}
++
++/*
++ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
++ * upstream PCIe root port when ASPM is enabled. At least L0s mode is 
affected;
++ * disable both L0s and L1 for now to be safe.
++ */
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, 
quirk_disable_aspm_l0s_l1);
++
+ /*
+  * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+  * Link bit cleared after starting the link retrain process to allow this
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 206c9f53e9e7..e6944e1cba2b 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -568,6 +568,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
+       scsi_del_cmd_from_list(cmd);
+ }
+ 
++static void scsi_run_queue_async(struct scsi_device *sdev)
++{
++      if (scsi_target(sdev)->single_lun ||
++          !list_empty(&sdev->host->starved_list))
++              kblockd_schedule_work(&sdev->requeue_work);
++      else
++              blk_mq_run_hw_queues(sdev->request_queue, true);
++}
++
+ /* Returns false when no more bytes to process, true if there are more */
+ static bool scsi_end_request(struct request *req, blk_status_t error,
+               unsigned int bytes)
+@@ -612,11 +621,7 @@ static bool scsi_end_request(struct request *req, 
blk_status_t error,
+ 
+       __blk_mq_end_request(req, error);
+ 
+-      if (scsi_target(sdev)->single_lun ||
+-          !list_empty(&sdev->host->starved_list))
+-              kblockd_schedule_work(&sdev->requeue_work);
+-      else
+-              blk_mq_run_hw_queues(q, true);
++      scsi_run_queue_async(sdev);
+ 
+       percpu_ref_put(&q->q_usage_counter);
+       return false;
+@@ -1729,6 +1734,7 @@ out_put_budget:
+                */
+               if (req->rq_flags & RQF_DONTPREP)
+                       scsi_mq_uninit_cmd(cmd);
++              scsi_run_queue_async(sdev);
+               break;
+       }
+       return ret;
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 88ce114790d7..f63f84a25725 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
+                       continue;
+               }
+ 
+-              switch (v_req.type) {
++              switch (vhost32_to_cpu(vq, v_req.type)) {
+               case VIRTIO_SCSI_T_TMF:
+                       vc.req = &v_req.tmf;
+                       vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 7aaf150f89ba..1e444826a66e 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -529,10 +529,14 @@ static int init_vqs(struct virtio_balloon *vb)
+ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
+ {
+       if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
+-                             &vb->config_read_bitmap))
++                             &vb->config_read_bitmap)) {
+               virtio_cread(vb->vdev, struct virtio_balloon_config,
+                            free_page_report_cmd_id,
+                            &vb->cmd_id_received_cache);
++              /* Legacy balloon config space is LE, unlike all other devices. 
*/
++              if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
++                      vb->cmd_id_received_cache = le32_to_cpu((__force 
__le32)vb->cmd_id_received_cache);
++      }
+ 
+       return vb->cmd_id_received_cache;
+ }
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index acd859ea09d4..aba56077cfda 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -4177,6 +4177,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
+ enum {
+       MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT  = 0x0,
+       MLX5_VPORT_STATE_OP_MOD_ESW_VPORT   = 0x1,
++      MLX5_VPORT_STATE_OP_MOD_UPLINK      = 0x2,
+ };
+ 
+ struct mlx5_ifc_arm_monitor_counter_in_bits {
+diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
+index beb9a9da1699..c5bf21261bb1 100644
+--- a/include/linux/rhashtable.h
++++ b/include/linux/rhashtable.h
+@@ -349,11 +349,11 @@ static inline void rht_unlock(struct bucket_table *tbl,
+       local_bh_enable();
+ }
+ 
+-static inline struct rhash_head __rcu *__rht_ptr(
+-      struct rhash_lock_head *const *bkt)
++static inline struct rhash_head *__rht_ptr(
++      struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+ {
+-      return (struct rhash_head __rcu *)
+-              ((unsigned long)*bkt & ~BIT(0) ?:
++      return (struct rhash_head *)
++              ((unsigned long)p & ~BIT(0) ?:
+                (unsigned long)RHT_NULLS_MARKER(bkt));
+ }
+ 
+@@ -365,25 +365,26 @@ static inline struct rhash_head __rcu *__rht_ptr(
+  *            access is guaranteed, such as when destroying the table.
+  */
+ static inline struct rhash_head *rht_ptr_rcu(
+-      struct rhash_lock_head *const *bkt)
++      struct rhash_lock_head *const *p)
+ {
+-      struct rhash_head __rcu *p = __rht_ptr(bkt);
+-
+-      return rcu_dereference(p);
++      struct rhash_lock_head __rcu *const *bkt = (void *)p;
++      return __rht_ptr(rcu_dereference(*bkt), bkt);
+ }
+ 
+ static inline struct rhash_head *rht_ptr(
+-      struct rhash_lock_head *const *bkt,
++      struct rhash_lock_head *const *p,
+       struct bucket_table *tbl,
+       unsigned int hash)
+ {
+-      return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
++      struct rhash_lock_head __rcu *const *bkt = (void *)p;
++      return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+ }
+ 
+ static inline struct rhash_head *rht_ptr_exclusive(
+-      struct rhash_lock_head *const *bkt)
++      struct rhash_lock_head *const *p)
+ {
+-      return rcu_dereference_protected(__rht_ptr(bkt), 1);
++      struct rhash_lock_head __rcu *const *bkt = (void *)p;
++      return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+ }
+ 
+ static inline void rht_assign_locked(struct rhash_lock_head **bkt,
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index fb391c00c19a..12aa6e15e43f 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -945,7 +945,7 @@ struct xfrm_dst {
+ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
+ {
+ #ifdef CONFIG_XFRM
+-      if (dst->xfrm) {
++      if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
+               const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
+ 
+               return xdst->path;
+@@ -957,7 +957,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct 
dst_entry *dst)
+ static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
+ {
+ #ifdef CONFIG_XFRM
+-      if (dst->xfrm) {
++      if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
+               struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+               return xdst->child;
+       }
+@@ -1636,13 +1636,16 @@ int xfrm_policy_walk(struct net *net, struct 
xfrm_policy_walk *walk,
+                    void *);
+ void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
+ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
+-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 
if_id,
+-                                        u8 type, int dir,
++struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
++                                        const struct xfrm_mark *mark,
++                                        u32 if_id, u8 type, int dir,
+                                         struct xfrm_selector *sel,
+                                         struct xfrm_sec_ctx *ctx, int delete,
+                                         int *err);
+-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
+-                                   int dir, u32 id, int delete, int *err);
++struct xfrm_policy *xfrm_policy_byid(struct net *net,
++                                   const struct xfrm_mark *mark, u32 if_id,
++                                   u8 type, int dir, u32 id, int delete,
++                                   int *err);
+ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
+ void xfrm_policy_hash_rebuild(struct net *net);
+ u32 xfrm_get_acqseq(void);
+diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
+index b550ae89bf85..6dd3b5284fd1 100644
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -278,6 +278,25 @@ struct rvt_rq {
+       spinlock_t lock ____cacheline_aligned_in_smp;
+ };
+ 
++/**
++ * rvt_get_rq_count - count numbers of request work queue entries
++ * in circular buffer
++ * @rq: data structure for request queue entry
++ * @head: head indices of the circular buffer
++ * @tail: tail indices of the circular buffer
++ *
++ * Return - total number of entries in the Receive Queue
++ */
++
++static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
++{
++      u32 count = head - tail;
++
++      if ((s32)count < 0)
++              count += rq->size;
++      return count;
++}
++
+ /*
+  * This structure holds the information that the send tasklet needs
+  * to send a RDMA read response or atomic operation.
+diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
+index 86eca3208b6b..a2c006a364e0 100644
+--- a/include/uapi/linux/wireless.h
++++ b/include/uapi/linux/wireless.h
+@@ -74,6 +74,8 @@
+ #include <linux/socket.h>             /* for "struct sockaddr" et al  */
+ #include <linux/if.h>                 /* for IFNAMSIZ and co... */
+ 
++#include <stddef.h>                     /* for offsetof */
++
+ /***************************** VERSION *****************************/
+ /*
+  * This constant is used to know the availability of the wireless
+@@ -1090,8 +1092,7 @@ struct iw_event {
+ /* iw_point events are special. First, the payload (extra data) come at
+  * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second,
+  * we omit the pointer, so start at an offset. */
+-#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \
+-                        (char *) NULL)
++#define IW_EV_POINT_OFF offsetof(struct iw_point, length)
+ #define IW_EV_POINT_LEN       (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
+                        IW_EV_POINT_OFF)
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 22066a62c8c9..039d64b1bfb7 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -675,15 +675,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
+       preempt_enable();
+ }
+ 
+-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
++static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
+ {
+       struct bpf_map *map = &htab->map;
++      void *ptr;
+ 
+       if (map->ops->map_fd_put_ptr) {
+-              void *ptr = fd_htab_map_get_ptr(map, l);
+-
++              ptr = fd_htab_map_get_ptr(map, l);
+               map->ops->map_fd_put_ptr(ptr);
+       }
++}
++
++static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
++{
++      htab_put_fd_value(htab, l);
+ 
+       if (htab_is_prealloc(htab)) {
+               __pcpu_freelist_push(&htab->freelist, &l->fnode);
+@@ -735,6 +740,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab 
*htab, void *key,
+                        */
+                       pl_new = this_cpu_ptr(htab->extra_elems);
+                       l_new = *pl_new;
++                      htab_put_fd_value(htab, old_elem);
+                       *pl_new = old_elem;
+               } else {
+                       struct pcpu_freelist_node *l;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 1f5731768222..18c1f5830074 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2438,7 +2438,7 @@ static struct file *do_async_mmap_readahead(struct 
vm_fault *vmf,
+       pgoff_t offset = vmf->pgoff;
+ 
+       /* If we don't want any read-ahead, don't bother */
+-      if (vmf->vma->vm_flags & VM_RAND_READ)
++      if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
+               return fpin;
+       if (ra->mmap_miss > 0)
+               ra->mmap_miss--;
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index 13cd683a658a..3f67803123be 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work)
+               if (m->rreq->status == REQ_STATUS_SENT) {
+                       list_del(&m->rreq->req_list);
+                       p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
++              } else if (m->rreq->status == REQ_STATUS_FLSHD) {
++                      /* Ignore replies associated with a cancelled request. 
*/
++                      p9_debug(P9_DEBUG_TRANS,
++                               "Ignore replies associated with a cancelled 
request\n");
+               } else {
+                       spin_unlock(&m->client->lock);
+                       p9_debug(P9_DEBUG_ERROR,
+@@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, 
struct p9_req_t *req)
+ {
+       p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+ 
++      spin_lock(&client->lock);
++      /* Ignore cancelled request if message has been received
++       * before lock.
++       */
++      if (req->status == REQ_STATUS_RCVD) {
++              spin_unlock(&client->lock);
++              return 0;
++      }
++
+       /* we haven't received a response for oldreq,
+        * remove it from the list.
+        */
+-      spin_lock(&client->lock);
+       list_del(&req->req_list);
++      req->status = REQ_STATUS_FLSHD;
+       spin_unlock(&client->lock);
+       p9_req_put(req);
+ 
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 88cd410e5728..44385252d7b6 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -1274,6 +1274,9 @@ static void store_pending_adv_report(struct hci_dev 
*hdev, bdaddr_t *bdaddr,
+ {
+       struct discovery_state *d = &hdev->discovery;
+ 
++      if (len > HCI_MAX_AD_LENGTH)
++              return;
++
+       bacpy(&d->last_adv_addr, bdaddr);
+       d->last_adv_addr_type = bdaddr_type;
+       d->last_adv_rssi = rssi;
+@@ -5231,7 +5234,8 @@ static struct hci_conn *check_pending_le_conn(struct 
hci_dev *hdev,
+ 
+ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t 
*bdaddr,
+                              u8 bdaddr_type, bdaddr_t *direct_addr,
+-                             u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
++                             u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
++                             bool ext_adv)
+ {
+       struct discovery_state *d = &hdev->discovery;
+       struct smp_irk *irk;
+@@ -5253,6 +5257,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 
type, bdaddr_t *bdaddr,
+               return;
+       }
+ 
++      if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
++              bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
++              return;
++      }
++
+       /* Find the end of the data in case the report contains padded zero
+        * bytes at the end causing an invalid length value.
+        *
+@@ -5312,7 +5321,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 
type, bdaddr_t *bdaddr,
+        */
+       conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
+                                                               direct_addr);
+-      if (conn && type == LE_ADV_IND) {
++      if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) 
{
+               /* Store report for later inclusion by
+                * mgmt_device_connected
+                */
+@@ -5366,7 +5375,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 
type, bdaddr_t *bdaddr,
+        * event or send an immediate device found event if the data
+        * should not be stored for later.
+        */
+-      if (!has_pending_adv_report(hdev)) {
++      if (!ext_adv && !has_pending_adv_report(hdev)) {
+               /* If the report will trigger a SCAN_REQ store it for
+                * later merging.
+                */
+@@ -5401,7 +5410,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 
type, bdaddr_t *bdaddr,
+               /* If the new report will trigger a SCAN_REQ store it for
+                * later merging.
+                */
+-              if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
++              if (!ext_adv && (type == LE_ADV_IND ||
++                               type == LE_ADV_SCAN_IND)) {
+                       store_pending_adv_report(hdev, bdaddr, bdaddr_type,
+                                                rssi, flags, data, len);
+                       return;
+@@ -5441,7 +5451,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, 
struct sk_buff *skb)
+                       rssi = ev->data[ev->length];
+                       process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
+                                          ev->bdaddr_type, NULL, 0, rssi,
+-                                         ev->data, ev->length);
++                                         ev->data, ev->length, false);
+               } else {
+                       bt_dev_err(hdev, "Dropping invalid advertising data");
+               }
+@@ -5515,7 +5525,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev 
*hdev, struct sk_buff *skb)
+               if (legacy_evt_type != LE_ADV_INVALID) {
+                       process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
+                                          ev->bdaddr_type, NULL, 0, ev->rssi,
+-                                         ev->data, ev->length);
++                                         ev->data, ev->length,
++                                         !(evt_type & LE_EXT_ADV_LEGACY_PDU));
+               }
+ 
+               ptr += sizeof(*ev) + ev->length;
+@@ -5713,7 +5724,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev 
*hdev,
+ 
+               process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
+                                  ev->bdaddr_type, &ev->direct_addr,
+-                                 ev->direct_addr_type, ev->rssi, NULL, 0);
++                                 ev->direct_addr_type, ev->rssi, NULL, 0,
++                                 false);
+ 
+               ptr += sizeof(*ev);
+       }
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index b67ed3a8486c..979c579afc63 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2400,7 +2400,7 @@ static int pfkey_spddelete(struct sock *sk, struct 
sk_buff *skb, const struct sa
+                       return err;
+       }
+ 
+-      xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
++      xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
+                                  pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
+                                  1, &err);
+       security_xfrm_policy_free(pol_ctx);
+@@ -2651,7 +2651,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff 
*skb, const struct sadb_
+               return -EINVAL;
+ 
+       delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
+-      xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
++      xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
+                             dir, pol->sadb_x_policy_id, delete, &err);
+       if (xp == NULL)
+               return -ENOENT;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 0daaf7e37a21..a9dda5c228f6 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2140,6 +2140,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, 
struct net_device *dev)
+       ieee80211_stop_mesh(sdata);
+       mutex_lock(&sdata->local->mtx);
+       ieee80211_vif_release_channel(sdata);
++      kfree(sdata->u.mesh.ie);
+       mutex_unlock(&sdata->local->mtx);
+ 
+       return 0;
+diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
+index 117519bf33d6..aca608ae313f 100644
+--- a/net/mac80211/mesh_pathtbl.c
++++ b/net/mac80211/mesh_pathtbl.c
+@@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
+       del_timer_sync(&mpath->timer);
+       atomic_dec(&sdata->u.mesh.mpaths);
+       atomic_dec(&tbl->entries);
++      mesh_path_flush_pending(mpath);
+       kfree_rcu(mpath, rcu);
+ }
+ 
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index c8404971d5ab..aba4afe4dfed 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct 
rds_incoming *inc,
+ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
+ {
+       struct rds_notifier *notifier;
+-      struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
++      struct rds_rdma_notify cmsg;
+       unsigned int count = 0, max_messages = ~0U;
+       unsigned long flags;
+       LIST_HEAD(copy);
+       int err = 0;
+ 
++      memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
+ 
+       /* put_cmsg copies to user space and thus may sleep. We can't do this
+        * with rs_lock held, so first grab as many notifications as we can 
stuff
+diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
+index c9bacb3c930f..82035fa65b8f 100644
+--- a/net/sunrpc/sunrpc.h
++++ b/net/sunrpc/sunrpc.h
+@@ -56,4 +56,5 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
+ 
+ int rpc_clients_notifier_register(void);
+ void rpc_clients_notifier_unregister(void);
++void auth_domain_cleanup(void);
+ #endif /* _NET_SUNRPC_SUNRPC_H */
+diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
+index f9edaa9174a4..236fadc4a439 100644
+--- a/net/sunrpc/sunrpc_syms.c
++++ b/net/sunrpc/sunrpc_syms.c
+@@ -23,6 +23,7 @@
+ #include <linux/sunrpc/rpc_pipe_fs.h>
+ #include <linux/sunrpc/xprtsock.h>
+ 
++#include "sunrpc.h"
+ #include "netns.h"
+ 
+ unsigned int sunrpc_net_id;
+@@ -131,6 +132,7 @@ cleanup_sunrpc(void)
+       unregister_rpc_pipefs();
+       rpc_destroy_mempool();
+       unregister_pernet_subsys(&sunrpc_net_ops);
++      auth_domain_cleanup();
+ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+       rpc_unregister_sysctl();
+ #endif
+diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
+index 550b214cb001..998b196b6176 100644
+--- a/net/sunrpc/svcauth.c
++++ b/net/sunrpc/svcauth.c
+@@ -19,6 +19,10 @@
+ #include <linux/err.h>
+ #include <linux/hash.h>
+ 
++#include <trace/events/sunrpc.h>
++
++#include "sunrpc.h"
++
+ #define RPCDBG_FACILITY       RPCDBG_AUTH
+ 
+ 
+@@ -203,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
+       return NULL;
+ }
+ EXPORT_SYMBOL_GPL(auth_domain_find);
++
++/**
++ * auth_domain_cleanup - check that the auth_domain table is empty
++ *
++ * On module unload the auth_domain_table must be empty.  To make it
++ * easier to catch bugs which don't clean up domains properly, we
++ * warn if anything remains in the table at cleanup time.
++ *
++ * Note that we cannot proactively remove the domains at this stage.
++ * The ->release() function might be in a module that has already been
++ * unloaded.
++ */
++
++void auth_domain_cleanup(void)
++{
++      int h;
++      struct auth_domain *hp;
++
++      for (h = 0; h < DN_HASHMAX; h++)
++              hlist_for_each_entry(hp, &auth_domain_table[h], hash)
++                      pr_warn("svc: domain %s still present at module 
unload.\n",
++                              hp->name);
++}
+diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
+index 8aa415a38814..0285aaa1e93c 100644
+--- a/net/x25/x25_subr.c
++++ b/net/x25/x25_subr.c
+@@ -357,6 +357,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned 
char cause,
+               sk->sk_state_change(sk);
+               sock_set_flag(sk, SOCK_DEAD);
+       }
++      if (x25->neighbour) {
++              read_lock_bh(&x25_list_lock);
++              x25_neigh_put(x25->neighbour);
++              x25->neighbour = NULL;
++              read_unlock_bh(&x25_list_lock);
++      }
+ }
+ 
+ /*
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 6a1a21ae47bb..2917711ff8ab 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1430,14 +1430,10 @@ static void xfrm_policy_requeue(struct xfrm_policy 
*old,
+       spin_unlock_bh(&pq->hold_queue.lock);
+ }
+ 
+-static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
+-                                 struct xfrm_policy *pol)
++static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
++                                        struct xfrm_policy *pol)
+ {
+-      if (policy->mark.v == pol->mark.v &&
+-          policy->priority == pol->priority)
+-              return true;
+-
+-      return false;
++      return mark->v == pol->mark.v && mark->m == pol->mark.m;
+ }
+ 
+ static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
+@@ -1500,7 +1496,7 @@ static void xfrm_policy_insert_inexact_list(struct 
hlist_head *chain,
+               if (pol->type == policy->type &&
+                   pol->if_id == policy->if_id &&
+                   !selector_cmp(&pol->selector, &policy->selector) &&
+-                  xfrm_policy_mark_match(policy, pol) &&
++                  xfrm_policy_mark_match(&policy->mark, pol) &&
+                   xfrm_sec_ctx_match(pol->security, policy->security) &&
+                   !WARN_ON(delpol)) {
+                       delpol = pol;
+@@ -1535,7 +1531,7 @@ static struct xfrm_policy 
*xfrm_policy_insert_list(struct hlist_head *chain,
+               if (pol->type == policy->type &&
+                   pol->if_id == policy->if_id &&
+                   !selector_cmp(&pol->selector, &policy->selector) &&
+-                  xfrm_policy_mark_match(policy, pol) &&
++                  xfrm_policy_mark_match(&policy->mark, pol) &&
+                   xfrm_sec_ctx_match(pol->security, policy->security) &&
+                   !WARN_ON(delpol)) {
+                       if (excl)
+@@ -1607,9 +1603,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy 
*policy, int excl)
+ EXPORT_SYMBOL(xfrm_policy_insert);
+ 
+ static struct xfrm_policy *
+-__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
+-                      u8 type, int dir,
+-                      struct xfrm_selector *sel,
++__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark 
*mark,
++                      u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
+                       struct xfrm_sec_ctx *ctx)
+ {
+       struct xfrm_policy *pol;
+@@ -1620,7 +1615,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 
mark, u32 if_id,
+       hlist_for_each_entry(pol, chain, bydst) {
+               if (pol->type == type &&
+                   pol->if_id == if_id &&
+-                  (mark & pol->mark.m) == pol->mark.v &&
++                  xfrm_policy_mark_match(mark, pol) &&
+                   !selector_cmp(sel, &pol->selector) &&
+                   xfrm_sec_ctx_match(ctx, pol->security))
+                       return pol;
+@@ -1629,11 +1624,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 
mark, u32 if_id,
+       return NULL;
+ }
+ 
+-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 
if_id,
+-                                        u8 type, int dir,
+-                                        struct xfrm_selector *sel,
+-                                        struct xfrm_sec_ctx *ctx, int delete,
+-                                        int *err)
++struct xfrm_policy *
++xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 
if_id,
++                    u8 type, int dir, struct xfrm_selector *sel,
++                    struct xfrm_sec_ctx *ctx, int delete, int *err)
+ {
+       struct xfrm_pol_inexact_bin *bin = NULL;
+       struct xfrm_policy *pol, *ret = NULL;
+@@ -1700,9 +1694,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net 
*net, u32 mark, u32 if_id,
+ }
+ EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
+ 
+-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
+-                                   u8 type, int dir, u32 id, int delete,
+-                                   int *err)
++struct xfrm_policy *
++xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
++               u8 type, int dir, u32 id, int delete, int *err)
+ {
+       struct xfrm_policy *pol, *ret;
+       struct hlist_head *chain;
+@@ -1717,8 +1711,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, 
u32 mark, u32 if_id,
+       ret = NULL;
+       hlist_for_each_entry(pol, chain, byidx) {
+               if (pol->type == type && pol->index == id &&
+-                  pol->if_id == if_id &&
+-                  (mark & pol->mark.m) == pol->mark.v) {
++                  pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
+                       xfrm_pol_hold(pol);
+                       if (delete) {
+                               *err = security_xfrm_policy_delete(
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index e6cfaa680ef3..fbb7d9d06478 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+       struct km_event c;
+       int delete;
+       struct xfrm_mark m;
+-      u32 mark = xfrm_mark_get(attrs, &m);
+       u32 if_id = 0;
+ 
+       p = nlmsg_data(nlh);
+@@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+       if (attrs[XFRMA_IF_ID])
+               if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ 
++      xfrm_mark_get(attrs, &m);
++
+       if (p->index)
+-              xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 
delete, &err);
++              xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
++                                    p->index, delete, &err);
+       else {
+               struct nlattr *rt = attrs[XFRMA_SEC_CTX];
+               struct xfrm_sec_ctx *ctx;
+@@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct 
nlmsghdr *nlh,
+                       if (err)
+                               return err;
+               }
+-              xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, 
&p->sel,
+-                                         ctx, delete, &err);
++              xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
++                                         &p->sel, ctx, delete, &err);
+               security_xfrm_policy_free(ctx);
+       }
+       if (xp == NULL)
+@@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+       u8 type = XFRM_POLICY_TYPE_MAIN;
+       int err = -ENOENT;
+       struct xfrm_mark m;
+-      u32 mark = xfrm_mark_get(attrs, &m);
+       u32 if_id = 0;
+ 
+       err = copy_from_user_policy_type(&type, attrs);
+@@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+       if (attrs[XFRMA_IF_ID])
+               if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ 
++      xfrm_mark_get(attrs, &m);
++
+       if (p->index)
+-              xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 
0, &err);
++              xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
++                                    0, &err);
+       else {
+               struct nlattr *rt = attrs[XFRMA_SEC_CTX];
+               struct xfrm_sec_ctx *ctx;
+@@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, 
struct nlmsghdr *nlh,
+                       if (err)
+                               return err;
+               }
+-              xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
++              xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
+                                          &p->sel, ctx, 0, &err);
+               security_xfrm_policy_free(ctx);
+       }
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 820f534a67b1..908b68fda24c 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2483,6 +2483,7 @@ static void generic_acomp_notifier_set(struct 
drm_audio_component *acomp,
+       mutex_lock(&spec->bind_lock);
+       spec->use_acomp_notifier = use_acomp;
+       spec->codec->relaxed_resume = use_acomp;
++      spec->codec->bus->keep_power = 0;
+       /* reprogram each jack detection logic depending on the notifier */
+       if (spec->use_jack_detect) {
+               for (i = 0; i < spec->num_pins; i++)
+@@ -2578,7 +2579,6 @@ static void generic_acomp_init(struct hda_codec *codec,
+       if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops,
+                                match_bound_vga, 0)) {
+               spec->acomp_registered = true;
+-              codec->bus->keep_power = 0;
+       }
+ }
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index bf205621d7ac..f50d71da1226 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5940,6 +5940,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec 
*codec,
+               snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+ }
+ 
++static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
++                            const struct hda_fixup *fix, int action)
++{
++      if (action != HDA_FIXUP_ACT_INIT)
++              return;
++
++      msleep(100);
++      alc_write_coef_idx(codec, 0x65, 0x0);
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -6117,8 +6127,10 @@ enum {
+       ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+       ALC269VC_FIXUP_ACER_HEADSET_MIC,
+       ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
+-      ALC289_FIXUP_ASUS_G401,
++      ALC289_FIXUP_ASUS_GA401,
++      ALC289_FIXUP_ASUS_GA502,
+       ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
++      ALC285_FIXUP_HP_GPIO_AMP_INIT,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7328,7 +7340,14 @@ static const struct hda_fixup alc269_fixups[] = {
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+-      [ALC289_FIXUP_ASUS_G401] = {
++      [ALC289_FIXUP_ASUS_GA401] = {
++              .type = HDA_FIXUP_PINS,
++              .v.pins = (const struct hda_pintbl[]) {
++                      { 0x19, 0x03a11020 }, /* headset mic with jack detect */
++                      { }
++              },
++      },
++      [ALC289_FIXUP_ASUS_GA502] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+@@ -7344,6 +7363,12 @@ static const struct hda_fixup alc269_fixups[] = {
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
++      [ALC285_FIXUP_HP_GPIO_AMP_INIT] = {
++              .type = HDA_FIXUP_FUNC,
++              .v.func = alc285_fixup_hp_gpio_amp_init,
++              .chained = true,
++              .chain_id = ALC285_FIXUP_HP_GPIO_LED
++      },
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7494,7 +7519,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", 
ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
+-      SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
++      SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -7526,7 +7551,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", 
ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", 
ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+-      SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", 
ALC289_FIXUP_ASUS_G401),
++      SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", 
ALC289_FIXUP_ASUS_GA502),
++      SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", 
ALC289_FIXUP_ASUS_GA401),
+       SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", 
ALC256_FIXUP_ASUS_AIO_GPIO2),
+       SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+       SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+@@ -7546,7 +7572,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", 
ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
+       SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", 
ALC269_FIXUP_LIFEBOOK_EXTMIC),
+       SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", 
ALC700_FIXUP_INTEL_REFERENCE),
+-      SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", 
ALC225_FIXUP_HEADSET_JACK),
++      SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", 
ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", 
ALC269_FIXUP_HEADSET_MODE),
+       SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", 
ALC269_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen 
(NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 086244c70743..d11d00efc574 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -354,6 +354,7 @@ static int set_sync_ep_implicit_fb_quirk(struct 
snd_usb_substream *subs,
+               ifnum = 0;
+               goto add_sync_ep_from_ifnum;
+       case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
++      case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
+       case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+       case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
+               ep = 0x81;
+diff --git a/tools/lib/traceevent/plugins/Makefile 
b/tools/lib/traceevent/plugins/Makefile
+index f440989fa55e..23c3535bcbd6 100644
+--- a/tools/lib/traceevent/plugins/Makefile
++++ b/tools/lib/traceevent/plugins/Makefile
+@@ -196,7 +196,7 @@ define do_generate_dynamic_list_file
+       xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
+       if [ "$$symbol_type" = "U W" ];then                             \
+               (echo '{';                                              \
+-              $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
++              $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print 
"\t"$$2";"}' | sort -u;\
+               echo '};';                                              \
+               ) > $2;                                                 \
+       else                                                            \
+diff --git a/tools/perf/arch/arm/util/auxtrace.c 
b/tools/perf/arch/arm/util/auxtrace.c
+index 0a6e75b8777a..28a5d0c18b1d 100644
+--- a/tools/perf/arch/arm/util/auxtrace.c
++++ b/tools/perf/arch/arm/util/auxtrace.c
+@@ -56,7 +56,7 @@ struct auxtrace_record
+       struct perf_pmu *cs_etm_pmu;
+       struct evsel *evsel;
+       bool found_etm = false;
+-      bool found_spe = false;
++      struct perf_pmu *found_spe = NULL;
+       static struct perf_pmu **arm_spe_pmus = NULL;
+       static int nr_spes = 0;
+       int i = 0;
+@@ -74,12 +74,12 @@ struct auxtrace_record
+                   evsel->core.attr.type == cs_etm_pmu->type)
+                       found_etm = true;
+ 
+-              if (!nr_spes)
++              if (!nr_spes || found_spe)
+                       continue;
+ 
+               for (i = 0; i < nr_spes; i++) {
+                       if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
+-                              found_spe = true;
++                              found_spe = arm_spe_pmus[i];
+                               break;
+                       }
+               }
+@@ -96,7 +96,7 @@ struct auxtrace_record
+ 
+ #if defined(__aarch64__)
+       if (found_spe)
+-              return arm_spe_recording_init(err, arm_spe_pmus[i]);
++              return arm_spe_recording_init(err, found_spe);
+ #endif
+ 
+       /*
+diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
+index fddb3ced9db6..4aa6de1aa67d 100644
+--- a/tools/perf/bench/bench.h
++++ b/tools/perf/bench/bench.h
+@@ -2,6 +2,10 @@
+ #ifndef BENCH_H
+ #define BENCH_H
+ 
++#include <sys/time.h>
++
++extern struct timeval bench__start, bench__end, bench__runtime;
++
+ /*
+  * The madvise transparent hugepage constants were added in glibc
+  * 2.13. For compatibility with older versions of glibc, define these
+diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
+index bb617e568841..a7526c05df38 100644
+--- a/tools/perf/bench/epoll-ctl.c
++++ b/tools/perf/bench/epoll-ctl.c
+@@ -35,7 +35,6 @@
+ 
+ static unsigned int nthreads = 0;
+ static unsigned int nsecs    = 8;
+-struct timeval start, end, runtime;
+ static bool done, __verbose, randomize;
+ 
+ /*
+@@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused,
+ {
+       /* inform all threads that we're done for the day */
+       done = true;
+-      gettimeofday(&end, NULL);
+-      timersub(&end, &start, &runtime);
++      gettimeofday(&bench__end, NULL);
++      timersub(&bench__end, &bench__start, &bench__runtime);
+ }
+ 
+ static void nest_epollfd(void)
+@@ -361,7 +360,7 @@ int bench_epoll_ctl(int argc, const char **argv)
+ 
+       threads_starting = nthreads;
+ 
+-      gettimeofday(&start, NULL);
++      gettimeofday(&bench__start, NULL);
+ 
+       do_threads(worker, cpu);
+ 
+diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
+index 7af694437f4e..d1c5cb526b9f 100644
+--- a/tools/perf/bench/epoll-wait.c
++++ b/tools/perf/bench/epoll-wait.c
+@@ -90,7 +90,6 @@
+ 
+ static unsigned int nthreads = 0;
+ static unsigned int nsecs    = 8;
+-struct timeval start, end, runtime;
+ static bool wdone, done, __verbose, randomize, nonblocking;
+ 
+ /*
+@@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused,
+ {
+       /* inform all threads that we're done for the day */
+       done = true;
+-      gettimeofday(&end, NULL);
+-      timersub(&end, &start, &runtime);
++      gettimeofday(&bench__end, NULL);
++      timersub(&bench__end, &bench__start, &bench__runtime);
+ }
+ 
+ static void print_summary(void)
+@@ -287,7 +286,7 @@ static void print_summary(void)
+ 
+       printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+              avg, rel_stddev_stats(stddev, avg),
+-             (int) runtime.tv_sec);
++             (int)bench__runtime.tv_sec);
+ }
+ 
+ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
+@@ -479,7 +478,7 @@ int bench_epoll_wait(int argc, const char **argv)
+ 
+       threads_starting = nthreads;
+ 
+-      gettimeofday(&start, NULL);
++      gettimeofday(&bench__start, NULL);
+ 
+       do_threads(worker, cpu);
+ 
+@@ -519,7 +518,7 @@ int bench_epoll_wait(int argc, const char **argv)
+               qsort(worker, nthreads, sizeof(struct worker), cmpworker);
+ 
+       for (i = 0; i < nthreads; i++) {
+-              unsigned long t = worker[i].ops/runtime.tv_sec;
++              unsigned long t = worker[i].ops / bench__runtime.tv_sec;
+ 
+               update_stats(&throughput_stats, t);
+ 
+diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
+index 8ba0c3330a9a..21776862e940 100644
+--- a/tools/perf/bench/futex-hash.c
++++ b/tools/perf/bench/futex-hash.c
+@@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024;
+ static bool fshared = false, done = false, silent = false;
+ static int futex_flag = 0;
+ 
+-struct timeval start, end, runtime;
++struct timeval bench__start, bench__end, bench__runtime;
+ static pthread_mutex_t thread_lock;
+ static unsigned int threads_starting;
+ static struct stats throughput_stats;
+@@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused,
+ {
+       /* inform all threads that we're done for the day */
+       done = true;
+-      gettimeofday(&end, NULL);
+-      timersub(&end, &start, &runtime);
++      gettimeofday(&bench__end, NULL);
++      timersub(&bench__end, &bench__start, &bench__runtime);
+ }
+ 
+ static void print_summary(void)
+@@ -114,7 +114,7 @@ static void print_summary(void)
+ 
+       printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+              !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+-             (int) runtime.tv_sec);
++             (int)bench__runtime.tv_sec);
+ }
+ 
+ int bench_futex_hash(int argc, const char **argv)
+@@ -161,7 +161,7 @@ int bench_futex_hash(int argc, const char **argv)
+ 
+       threads_starting = nthreads;
+       pthread_attr_init(&thread_attr);
+-      gettimeofday(&start, NULL);
++      gettimeofday(&bench__start, NULL);
+       for (i = 0; i < nthreads; i++) {
+               worker[i].tid = i;
+               worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
+@@ -204,7 +204,7 @@ int bench_futex_hash(int argc, const char **argv)
+       pthread_mutex_destroy(&thread_lock);
+ 
+       for (i = 0; i < nthreads; i++) {
+-              unsigned long t = worker[i].ops/runtime.tv_sec;
++              unsigned long t = worker[i].ops / bench__runtime.tv_sec;
+               update_stats(&throughput_stats, t);
+               if (!silent) {
+                       if (nfutexes == 1)
+diff --git a/tools/perf/bench/futex-lock-pi.c 
b/tools/perf/bench/futex-lock-pi.c
+index d0cae8125423..30d97121dc4f 100644
+--- a/tools/perf/bench/futex-lock-pi.c
++++ b/tools/perf/bench/futex-lock-pi.c
+@@ -37,7 +37,6 @@ static bool silent = false, multi = false;
+ static bool done = false, fshared = false;
+ static unsigned int nthreads = 0;
+ static int futex_flag = 0;
+-struct timeval start, end, runtime;
+ static pthread_mutex_t thread_lock;
+ static unsigned int threads_starting;
+ static struct stats throughput_stats;
+@@ -64,7 +63,7 @@ static void print_summary(void)
+ 
+       printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+              !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+-             (int) runtime.tv_sec);
++             (int)bench__runtime.tv_sec);
+ }
+ 
+ static void toggle_done(int sig __maybe_unused,
+@@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused,
+ {
+       /* inform all threads that we're done for the day */
+       done = true;
+-      gettimeofday(&end, NULL);
+-      timersub(&end, &start, &runtime);
++      gettimeofday(&bench__end, NULL);
++      timersub(&bench__end, &bench__start, &bench__runtime);
+ }
+ 
+ static void *workerfn(void *arg)
+@@ -185,7 +184,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
+ 
+       threads_starting = nthreads;
+       pthread_attr_init(&thread_attr);
+-      gettimeofday(&start, NULL);
++      gettimeofday(&bench__start, NULL);
+ 
+       create_threads(worker, thread_attr, cpu);
+       pthread_attr_destroy(&thread_attr);
+@@ -211,7 +210,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
+       pthread_mutex_destroy(&thread_lock);
+ 
+       for (i = 0; i < nthreads; i++) {
+-              unsigned long t = worker[i].ops/runtime.tv_sec;
++              unsigned long t = worker[i].ops / bench__runtime.tv_sec;
+ 
+               update_stats(&throughput_stats, t);
+               if (!silent)
+diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
+index 016bba2c142d..55a9de311d7b 100644
+--- a/tools/perf/tests/bp_account.c
++++ b/tools/perf/tests/bp_account.c
+@@ -23,7 +23,7 @@
+ #include "../perf-sys.h"
+ #include "cloexec.h"
+ 
+-volatile long the_var;
++static volatile long the_var;
+ 
+ static noinline int test_function(void)
+ {
+diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
+index 3baca06786fb..018ecf7b6da9 100644
+--- a/tools/perf/util/env.c
++++ b/tools/perf/util/env.c
+@@ -326,11 +326,11 @@ static const char *normalize_arch(char *arch)
+ 
+ const char *perf_env__arch(struct perf_env *env)
+ {
+-      struct utsname uts;
+       char *arch_name;
+ 
+       if (!env || !env->arch) { /* Assume local operation */
+-              if (uname(&uts) < 0)
++              static struct utsname uts = { .machine[0] = '\0', };
++              if (uts.machine[0] == '\0' && uname(&uts) < 0)
+                       return NULL;
+               arch_name = uts.machine;
+       } else
+diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh 
b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+index 9dc35a16e415..51df5e305855 100755
+--- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
++++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
+@@ -144,7 +144,7 @@ setup()
+ 
+ cleanup()
+ {
+-      for n in h1 r1 h2 h3 h4
++      for n in h0 r1 h1 h2 h3
+       do
+               ip netns del ${n} 2>/dev/null
+       done
+diff --git a/tools/testing/selftests/net/ip_defrag.sh 
b/tools/testing/selftests/net/ip_defrag.sh
+index 15d3489ecd9c..ceb7ad4dbd94 100755
+--- a/tools/testing/selftests/net/ip_defrag.sh
++++ b/tools/testing/selftests/net/ip_defrag.sh
+@@ -6,6 +6,8 @@
+ set +x
+ set -e
+ 
++modprobe -q nf_defrag_ipv6
++
+ readonly NETNS="ns-$(mktemp -u XXXXXX)"
+ 
+ setup() {
+diff --git a/tools/testing/selftests/net/psock_fanout.c 
b/tools/testing/selftests/net/psock_fanout.c
+index 8c8c7d79c38d..2c522f7a0aec 100644
+--- a/tools/testing/selftests/net/psock_fanout.c
++++ b/tools/testing/selftests/net/psock_fanout.c
+@@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
+       int fds[2], fds_udp[2][2], ret;
+ 
+       fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
+-              typeflags, PORT_BASE, PORT_BASE + port_off);
++              typeflags, (uint16_t)PORT_BASE,
++              (uint16_t)(PORT_BASE + port_off));
+ 
+       fds[0] = sock_fanout_open(typeflags, 0);
+       fds[1] = sock_fanout_open(typeflags, 0);
+diff --git a/tools/testing/selftests/net/so_txtime.c 
b/tools/testing/selftests/net/so_txtime.c
+index ceaad78e9667..3155fbbf644b 100644
+--- a/tools/testing/selftests/net/so_txtime.c
++++ b/tools/testing/selftests/net/so_txtime.c
+@@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
+       if (rbuf[0] != ts->data)
+               error(1, 0, "payload mismatch. expected %c", ts->data);
+ 
+-      if (labs(tstop - texpect) > cfg_variance_us)
++      if (llabs(tstop - texpect) > cfg_variance_us)
+               error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
+ 
+       return false;
+diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c 
b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+index 422e7761254d..bcb79ba1f214 100644
+--- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
++++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
+@@ -329,8 +329,7 @@ int main(int argc, char **argv)
+       bool all_tests = true;
+       int arg_index = 0;
+       int failures = 0;
+-      int s, t;
+-      char opt;
++      int s, t, opt;
+ 
+       while ((opt = getopt_long(argc, argv, "", long_options,
+                                 &arg_index)) != -1) {
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index ce7fa37987e1..767ac4eab4fe 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1199,7 +1199,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, 
phys_addr_t addr,
+       return true;
+ }
+ 
+-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
++static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long 
sz)
+ {
+       pud_t *pudp;
+       pmd_t *pmdp;
+@@ -1211,11 +1211,11 @@ static bool stage2_is_exec(struct kvm *kvm, 
phys_addr_t addr)
+               return false;
+ 
+       if (pudp)
+-              return kvm_s2pud_exec(pudp);
++              return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
+       else if (pmdp)
+-              return kvm_s2pmd_exec(pmdp);
++              return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
+       else
+-              return kvm_s2pte_exec(ptep);
++              return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
+ }
+ 
+ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+@@ -1805,7 +1805,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
+        * execute permissions, and we preserve whatever we have.
+        */
+       needs_exec = exec_fault ||
+-              (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
++              (fault_status == FSC_PERM &&
++               stage2_is_exec(kvm, fault_ipa, vma_pagesize));
+ 
+       if (vma_pagesize == PUD_SIZE) {
+               pud_t new_pud = kvm_pfn_pud(pfn, mem_type);

Reply via email to