commit:     669b6ce0423f386158d729e907a47a74fbd2c10f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 21 11:42:37 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 21 11:42:37 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=669b6ce0

Linux patch 5.10.32

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1031_linux-5.10.32.patch | 3223 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3227 insertions(+)

diff --git a/0000_README b/0000_README
index 69e2945..68ff733 100644
--- a/0000_README
+++ b/0000_README
@@ -167,6 +167,10 @@ Patch:  1030_linux-5.10.31.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.10.31
 
+Patch:  1031_linux-5.10.32.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.10.32
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1031_linux-5.10.32.patch b/1031_linux-5.10.32.patch
new file mode 100644
index 0000000..354de13
--- /dev/null
+++ b/1031_linux-5.10.32.patch
@@ -0,0 +1,3223 @@
+diff --git a/Makefile b/Makefile
+index c4c0b47e6edea..cad90171b4b9b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 10
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Dare mighty things
+ 
+diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
+index 2be55fb96d870..98e575dbcce51 100644
+--- a/arch/arc/kernel/signal.c
++++ b/arch/arc/kernel/signal.c
+@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs 
*regs,
+                            sizeof(sf->uc.uc_mcontext.regs.scratch));
+       err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+ 
+-      return err;
++      return err ? -EFAULT : 0;
+ }
+ 
+ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user 
*sf)
+@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct 
rt_sigframe __user *sf)
+                               &(sf->uc.uc_mcontext.regs.scratch),
+                               sizeof(sf->uc.uc_mcontext.regs.scratch));
+       if (err)
+-              return err;
++              return -EFAULT;
+ 
+       set_current_blocked(&set);
+       regs->bta       = uregs.scratch.bta;
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index d6475cc6a91a7..049174086756d 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -22,6 +22,11 @@
+               i2c1 = &i2c2;
+               i2c2 = &i2c3;
+               i2c3 = &i2c4;
++              mmc0 = &mmc1;
++              mmc1 = &mmc2;
++              mmc2 = &mmc3;
++              mmc3 = &mmc4;
++              mmc4 = &mmc5;
+               serial0 = &uart1;
+               serial1 = &uart2;
+               serial2 = &uart3;
+diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi 
b/arch/arm/boot/dts/omap44xx-clocks.dtsi
+index 532868591107b..1f1c04d8f4721 100644
+--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
++++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
+@@ -770,14 +770,6 @@
+               ti,max-div = <2>;
+       };
+ 
+-      sha2md5_fck: sha2md5_fck@15c8 {
+-              #clock-cells = <0>;
+-              compatible = "ti,gate-clock";
+-              clocks = <&l3_div_ck>;
+-              ti,bit-shift = <1>;
+-              reg = <0x15c8>;
+-      };
+-
+       usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
+               #clock-cells = <0>;
+               compatible = "ti,gate-clock";
+diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
+index 2bf2e5839a7f1..530210db27198 100644
+--- a/arch/arm/boot/dts/omap5.dtsi
++++ b/arch/arm/boot/dts/omap5.dtsi
+@@ -25,6 +25,11 @@
+               i2c2 = &i2c3;
+               i2c3 = &i2c4;
+               i2c4 = &i2c5;
++              mmc0 = &mmc1;
++              mmc1 = &mmc2;
++              mmc2 = &mmc3;
++              mmc3 = &mmc4;
++              mmc4 = &mmc5;
+               serial0 = &uart1;
+               serial1 = &uart2;
+               serial2 = &uart3;
+diff --git a/arch/arm/mach-footbridge/cats-pci.c 
b/arch/arm/mach-footbridge/cats-pci.c
+index 0b2fd7e2e9b42..90b1e9be430e9 100644
+--- a/arch/arm/mach-footbridge/cats-pci.c
++++ b/arch/arm/mach-footbridge/cats-pci.c
+@@ -15,14 +15,14 @@
+ #include <asm/mach-types.h>
+ 
+ /* cats host-specific stuff */
+-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
++static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
+ 
+ static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
+ {
+       return 0;
+ }
+ 
+-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+       if (dev->irq >= 255)
+               return -1;      /* not a valid interrupt. */
+diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c 
b/arch/arm/mach-footbridge/ebsa285-pci.c
+index 6f28aaa9ca79b..c3f280d08fa7f 100644
+--- a/arch/arm/mach-footbridge/ebsa285-pci.c
++++ b/arch/arm/mach-footbridge/ebsa285-pci.c
+@@ -14,9 +14,9 @@
+ #include <asm/mach/pci.h>
+ #include <asm/mach-types.h>
+ 
+-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI 
};
++static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
+ 
+-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+       if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
+           dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
+diff --git a/arch/arm/mach-footbridge/netwinder-pci.c 
b/arch/arm/mach-footbridge/netwinder-pci.c
+index 9473aa0305e5f..e8304392074b8 100644
+--- a/arch/arm/mach-footbridge/netwinder-pci.c
++++ b/arch/arm/mach-footbridge/netwinder-pci.c
+@@ -18,7 +18,7 @@
+  * We now use the slot ID instead of the device identifiers to select
+  * which interrupt is routed where.
+  */
+-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 
pin)
++static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+       switch (slot) {
+       case 0:  /* host bridge */
+diff --git a/arch/arm/mach-footbridge/personal-pci.c 
b/arch/arm/mach-footbridge/personal-pci.c
+index 4391e433a4b2f..9d19aa98a663e 100644
+--- a/arch/arm/mach-footbridge/personal-pci.c
++++ b/arch/arm/mach-footbridge/personal-pci.c
+@@ -14,13 +14,12 @@
+ #include <asm/mach/pci.h>
+ #include <asm/mach-types.h>
+ 
+-static int irqmap_personal_server[] __initdata = {
++static int irqmap_personal_server[] = {
+       IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
+       IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
+ };
+ 
+-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
+-      u8 pin)
++static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
+       unsigned char line;
+ 
+diff --git a/arch/arm/mach-keystone/keystone.c 
b/arch/arm/mach-keystone/keystone.c
+index 09a65c2dfd732..b8fa01f9516eb 100644
+--- a/arch/arm/mach-keystone/keystone.c
++++ b/arch/arm/mach-keystone/keystone.c
+@@ -65,7 +65,7 @@ static void __init keystone_init(void)
+ static long long __init keystone_pv_fixup(void)
+ {
+       long long offset;
+-      phys_addr_t mem_start, mem_end;
++      u64 mem_start, mem_end;
+ 
+       mem_start = memblock_start_of_DRAM();
+       mem_end = memblock_end_of_DRAM();
+@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
+       if (mem_start < KEYSTONE_HIGH_PHYS_START ||
+           mem_end   > KEYSTONE_HIGH_PHYS_END) {
+               pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
+-                      (u64)mem_start, (u64)mem_end);
++                      mem_start, mem_end);
+               return 0;
+       }
+ 
+diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S 
b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+index 14a6c3eb32985..f745a65d3bd7a 100644
+--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+@@ -15,6 +15,7 @@
+ #include <linux/platform_data/gpio-omap.h>
+ 
+ #include <asm/assembler.h>
++#include <asm/irq.h>
+ 
+ #include "ams-delta-fiq.h"
+ #include "board-ams-delta.h"
+diff --git a/arch/arm/mach-omap2/board-generic.c 
b/arch/arm/mach-omap2/board-generic.c
+index 7290f033fd2da..1610c567a6a3a 100644
+--- a/arch/arm/mach-omap2/board-generic.c
++++ b/arch/arm/mach-omap2/board-generic.c
+@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
+ }
+ 
+ /* Clocks are needed early, see drivers/clocksource for the rest */
+-void __init __maybe_unused omap_init_time_of(void)
++static void __init __maybe_unused omap_init_time_of(void)
+ {
+       omap_clk_init();
+       timer_probe();
+diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
+index 17b66f0d0deef..605925684b0aa 100644
+--- a/arch/arm/mach-omap2/sr_device.c
++++ b/arch/arm/mach-omap2/sr_device.c
+@@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
+ 
+ int __init omap_devinit_smartreflex(void)
+ {
+-      const char * const *sr_inst;
++      const char * const *sr_inst = NULL;
+       int i, nr_sr = 0;
+ 
+       if (soc_is_omap44xx()) {
+diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
+index 88950e41a3a9e..59d916ccdf25f 100644
+--- a/arch/arm/mm/pmsa-v7.c
++++ b/arch/arm/mm/pmsa-v7.c
+@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
+       phys_addr_t mem_end;
+       phys_addr_t reg_start, reg_end;
+       unsigned int mem_max_regions;
++      bool first = true;
+       int num;
+       u64 i;
+ 
+@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
+ #endif
+ 
+       for_each_mem_range(i, &reg_start, &reg_end) {
+-              if (i == 0) {
++              if (first) {
+                       phys_addr_t phys_offset = PHYS_OFFSET;
+ 
+                       /*
+@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
+                       mem_start = reg_start;
+                       mem_end = reg_end;
+                       specified_mem_size = mem_end - mem_start;
++                      first = false;
+               } else {
+                       /*
+                        * memblock auto merges contiguous blocks, remove
+diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
+index 2de019f7503e8..8359748a19a11 100644
+--- a/arch/arm/mm/pmsa-v8.c
++++ b/arch/arm/mm/pmsa-v8.c
+@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
+ {
+       phys_addr_t mem_end;
+       phys_addr_t reg_start, reg_end;
++      bool first = true;
+       u64 i;
+ 
+       for_each_mem_range(i, &reg_start, &reg_end) {
+-              if (i == 0) {
++              if (first) {
+                       phys_addr_t phys_offset = PHYS_OFFSET;
+ 
+                       /*
+@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
+                       if (reg_start != phys_offset)
+                               panic("First memory bank must be contiguous 
from PHYS_OFFSET");
+                       mem_end = reg_end;
++                      first = false;
+               } else {
+                       /*
+                        * memblock auto merges contiguous blocks, remove
+diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
+index c4b49b322e8a8..f5f790c6e5f89 100644
+--- a/arch/arm/probes/uprobes/core.c
++++ b/arch/arm/probes/uprobes/core.c
+@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+ static struct undef_hook uprobes_arm_break_hook = {
+       .instr_mask     = 0x0fffffff,
+       .instr_val      = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
+-      .cpsr_mask      = MODE_MASK,
++      .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
+       .cpsr_val       = USR_MODE,
+       .fn             = uprobe_trap_handler,
+ };
+@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
+ static struct undef_hook uprobes_arm_ss_hook = {
+       .instr_mask     = 0x0fffffff,
+       .instr_val      = (UPROBE_SS_ARM_INSN & 0x0fffffff),
+-      .cpsr_mask      = MODE_MASK,
++      .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
+       .cpsr_val       = USR_MODE,
+       .fn             = uprobe_trap_handler,
+ };
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c1be64228327c..5e5cf3af63515 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -1390,10 +1390,13 @@ config ARM64_PAN
+        The feature is detected at runtime, and will remain as a 'nop'
+        instruction if the cpu does not implement the feature.
+ 
++config AS_HAS_LSE_ATOMICS
++      def_bool $(as-instr,.arch_extension lse)
++
+ config ARM64_LSE_ATOMICS
+       bool
+       default ARM64_USE_LSE_ATOMICS
+-      depends on $(as-instr,.arch_extension lse)
++      depends on AS_HAS_LSE_ATOMICS
+ 
+ config ARM64_USE_LSE_ATOMICS
+       bool "Atomic instructions"
+@@ -1667,6 +1670,7 @@ config ARM64_MTE
+       bool "Memory Tagging Extension support"
+       default y
+       depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
++      depends on AS_HAS_LSE_ATOMICS
+       select ARCH_USES_HIGH_VMA_FLAGS
+       help
+         Memory Tagging (part of the ARMv8.5 Extensions) provides
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts 
b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+index 302e24be0a318..a1f621b388fe7 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+@@ -8,3 +8,7 @@
+       compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
+                    "allwinner,sun50i-a64";
+ };
++
++&mmc0 {
++      cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
++};
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi 
b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+index 3402cec87035b..df62044ff7a7a 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+@@ -34,7 +34,7 @@
+       vmmc-supply = <&reg_dcdc1>;
+       disable-wp;
+       bus-width = <4>;
+-      cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
++      cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
+       status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts 
b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+index 7c9dbde645b52..e8163c572daba 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+@@ -289,10 +289,6 @@
+       vcc-pm-supply = <&reg_aldo1>;
+ };
+ 
+-&rtc {
+-      clocks = <&ext_osc32k>;
+-};
+-
+ &spdif {
+       status = "okay";
+ };
+diff --git a/arch/arm64/include/asm/alternative.h 
b/arch/arm64/include/asm/alternative.h
+index 619db9b4c9d5c..3cb3c4ab3ea56 100644
+--- a/arch/arm64/include/asm/alternative.h
++++ b/arch/arm64/include/asm/alternative.h
+@@ -119,9 +119,9 @@ static inline void apply_alternatives_module(void *start, 
size_t length) { }
+       .popsection
+       .subsection 1
+ 663:  \insn2
+-664:  .previous
+-      .org    . - (664b-663b) + (662b-661b)
++664:  .org    . - (664b-663b) + (662b-661b)
+       .org    . - (662b-661b) + (664b-663b)
++      .previous
+       .endif
+ .endm
+ 
+@@ -191,11 +191,11 @@ static inline void apply_alternatives_module(void 
*start, size_t length) { }
+  */
+ .macro alternative_endif
+ 664:
++      .org    . - (664b-663b) + (662b-661b)
++      .org    . - (662b-661b) + (664b-663b)
+       .if .Lasm_alt_mode==0
+       .previous
+       .endif
+-      .org    . - (664b-663b) + (662b-661b)
+-      .org    . - (662b-661b) + (664b-663b)
+ .endm
+ 
+ /*
+diff --git a/arch/arm64/include/asm/word-at-a-time.h 
b/arch/arm64/include/asm/word-at-a-time.h
+index 3333950b59093..ea487218db790 100644
+--- a/arch/arm64/include/asm/word-at-a-time.h
++++ b/arch/arm64/include/asm/word-at-a-time.h
+@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
+  */
+ static inline unsigned long load_unaligned_zeropad(const void *addr)
+ {
+-      unsigned long ret, offset;
++      unsigned long ret, tmp;
+ 
+       /* Load word from unaligned pointer addr */
+       asm(
+@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const 
void *addr)
+       "2:\n"
+       "       .pushsection .fixup,\"ax\"\n"
+       "       .align 2\n"
+-      "3:     and     %1, %2, #0x7\n"
+-      "       bic     %2, %2, #0x7\n"
+-      "       ldr     %0, [%2]\n"
++      "3:     bic     %1, %2, #0x7\n"
++      "       ldr     %0, [%1]\n"
++      "       and     %1, %2, #0x7\n"
+       "       lsl     %1, %1, #0x3\n"
+ #ifndef __AARCH64EB__
+       "       lsr     %0, %0, %1\n"
+@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const 
void *addr)
+       "       b       2b\n"
+       "       .popsection\n"
+       _ASM_EXTABLE(1b, 3b)
+-      : "=&r" (ret), "=&r" (offset)
++      : "=&r" (ret), "=&r" (tmp)
+       : "r" (addr), "Q" (*(unsigned long *)addr));
+ 
+       return ret;
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index d72c818b019ca..2da82c139e1cd 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -148,16 +148,18 @@ alternative_cb_end
+       .endm
+ 
+       /* Check for MTE asynchronous tag check faults */
+-      .macro check_mte_async_tcf, flgs, tmp
++      .macro check_mte_async_tcf, tmp, ti_flags
+ #ifdef CONFIG_ARM64_MTE
++      .arch_extension lse
+ alternative_if_not ARM64_MTE
+       b       1f
+ alternative_else_nop_endif
+       mrs_s   \tmp, SYS_TFSRE0_EL1
+       tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+       /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+-      orr     \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
+-      str     \flgs, [tsk, #TSK_TI_FLAGS]
++      mov     \tmp, #_TIF_MTE_ASYNC_FAULT
++      add     \ti_flags, tsk, #TSK_TI_FLAGS
++      stset   \tmp, [\ti_flags]
+       msr_s   SYS_TFSRE0_EL1, xzr
+ 1:
+ #endif
+@@ -207,7 +209,7 @@ alternative_else_nop_endif
+       disable_step_tsk x19, x20
+ 
+       /* Check for asynchronous tag check faults in user space */
+-      check_mte_async_tcf x19, x22
++      check_mte_async_tcf x22, x23
+       apply_ssbd 1, x22, x23
+ 
+       ptrauth_keys_install_kernel tsk, x20, x22, x23
+diff --git a/arch/ia64/configs/generic_defconfig 
b/arch/ia64/configs/generic_defconfig
+index ca0d596c800d8..8916a2850c48b 100644
+--- a/arch/ia64/configs/generic_defconfig
++++ b/arch/ia64/configs/generic_defconfig
+@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
+ CONFIG_SCSI_FC_ATTRS=y
+ CONFIG_SCSI_SYM53C8XX_2=y
+ CONFIG_SCSI_QLOGIC_1280=y
+-CONFIG_ATA=y
+-CONFIG_ATA_PIIX=y
+ CONFIG_SATA_VITESSE=y
+ CONFIG_MD=y
+ CONFIG_BLK_DEV_MD=m
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index df7fccf76df69..f7abd118d23d3 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -144,7 +144,7 @@ config ARCH_FLATMEM_ENABLE
+ config ARCH_SPARSEMEM_ENABLE
+       def_bool y
+       depends on MMU
+-      select SPARSEMEM_STATIC if 32BIT && SPARSMEM
++      select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
+       select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
+ 
+ config ARCH_SELECT_MEMORY_MODEL
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index d23795057c4f1..28c89fce0dab8 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1051,9 +1051,6 @@ void __init setup_arch(char **cmdline_p)
+ 
+       cleanup_highmap();
+ 
+-      /* Look for ACPI tables and reserve memory occupied by them. */
+-      acpi_boot_table_init();
+-
+       memblock_set_current_limit(ISA_END_ADDRESS);
+       e820__memblock_setup();
+ 
+@@ -1132,6 +1129,8 @@ void __init setup_arch(char **cmdline_p)
+       reserve_initrd();
+ 
+       acpi_table_upgrade();
++      /* Look for ACPI tables and reserve memory occupied by them. */
++      acpi_boot_table_init();
+ 
+       vsmp_init();
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index f3eca45267781..15532feb19f10 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3329,7 +3329,11 @@ enum nvmx_vmentry_status 
nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       enum vm_entry_failure_code entry_failure_code;
+       bool evaluate_pending_interrupts;
+-      u32 exit_reason, failed_index;
++      union vmx_exit_reason exit_reason = {
++              .basic = EXIT_REASON_INVALID_STATE,
++              .failed_vmentry = 1,
++      };
++      u32 failed_index;
+ 
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+               kvm_vcpu_flush_tlb_current(vcpu);
+@@ -3381,7 +3385,7 @@ enum nvmx_vmentry_status 
nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ 
+               if (nested_vmx_check_guest_state(vcpu, vmcs12,
+                                                &entry_failure_code)) {
+-                      exit_reason = EXIT_REASON_INVALID_STATE;
++                      exit_reason.basic = EXIT_REASON_INVALID_STATE;
+                       vmcs12->exit_qualification = entry_failure_code;
+                       goto vmentry_fail_vmexit;
+               }
+@@ -3392,7 +3396,7 @@ enum nvmx_vmentry_status 
nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+               vcpu->arch.tsc_offset += vmcs12->tsc_offset;
+ 
+       if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
+-              exit_reason = EXIT_REASON_INVALID_STATE;
++              exit_reason.basic = EXIT_REASON_INVALID_STATE;
+               vmcs12->exit_qualification = entry_failure_code;
+               goto vmentry_fail_vmexit_guest_mode;
+       }
+@@ -3402,7 +3406,7 @@ enum nvmx_vmentry_status 
nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                  
vmcs12->vm_entry_msr_load_addr,
+                                                  
vmcs12->vm_entry_msr_load_count);
+               if (failed_index) {
+-                      exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
++                      exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
+                       vmcs12->exit_qualification = failed_index;
+                       goto vmentry_fail_vmexit_guest_mode;
+               }
+@@ -3470,7 +3474,7 @@ vmentry_fail_vmexit:
+               return NVMX_VMENTRY_VMEXIT;
+ 
+       load_vmcs12_host_state(vcpu, vmcs12);
+-      vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
++      vmcs12->vm_exit_reason = exit_reason.full;
+       if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+               vmx->nested.need_vmcs12_to_shadow_sync = true;
+       return NVMX_VMENTRY_VMEXIT;
+@@ -5533,7 +5537,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
+       return kvm_skip_emulated_instruction(vcpu);
+ 
+ fail:
+-      nested_vmx_vmexit(vcpu, vmx->exit_reason,
++      /*
++       * This is effectively a reflected VM-Exit, as opposed to a synthesized
++       * nested VM-Exit.  Pass the original exit reason, i.e. don't hardcode
++       * EXIT_REASON_VMFUNC as the exit reason.
++       */
++      nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
+                         vmx_get_intr_info(vcpu),
+                         vmx_get_exit_qual(vcpu));
+       return 1;
+@@ -5601,7 +5610,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu 
*vcpu,
+  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
+  */
+ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+-      struct vmcs12 *vmcs12, u32 exit_reason)
++                                      struct vmcs12 *vmcs12,
++                                      union vmx_exit_reason exit_reason)
+ {
+       u32 msr_index = kvm_rcx_read(vcpu);
+       gpa_t bitmap;
+@@ -5615,7 +5625,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu 
*vcpu,
+        * First we need to figure out which of the four to use:
+        */
+       bitmap = vmcs12->msr_bitmap;
+-      if (exit_reason == EXIT_REASON_MSR_WRITE)
++      if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
+               bitmap += 2048;
+       if (msr_index >= 0xc0000000) {
+               msr_index -= 0xc0000000;
+@@ -5752,11 +5762,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 
*vmcs12)
+  * Return true if L0 wants to handle an exit from L2 regardless of whether or 
not
+  * L1 wants the exit.  Only call this when in is_guest_mode (L2).
+  */
+-static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
++static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
++                                   union vmx_exit_reason exit_reason)
+ {
+       u32 intr_info;
+ 
+-      switch ((u16)exit_reason) {
++      switch ((u16)exit_reason.basic) {
+       case EXIT_REASON_EXCEPTION_NMI:
+               intr_info = vmx_get_intr_info(vcpu);
+               if (is_nmi(intr_info))
+@@ -5812,12 +5823,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu 
*vcpu, u32 exit_reason)
+  * Return 1 if L1 wants to intercept an exit from L2.  Only call this when in
+  * is_guest_mode (L2).
+  */
+-static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
++static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
++                                   union vmx_exit_reason exit_reason)
+ {
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       u32 intr_info;
+ 
+-      switch ((u16)exit_reason) {
++      switch ((u16)exit_reason.basic) {
+       case EXIT_REASON_EXCEPTION_NMI:
+               intr_info = vmx_get_intr_info(vcpu);
+               if (is_nmi(intr_info))
+@@ -5936,7 +5948,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu 
*vcpu, u32 exit_reason)
+ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+-      u32 exit_reason = vmx->exit_reason;
++      union vmx_exit_reason exit_reason = vmx->exit_reason;
+       unsigned long exit_qual;
+       u32 exit_intr_info;
+ 
+@@ -5955,7 +5967,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
+               goto reflect_vmexit;
+       }
+ 
+-      trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX);
++      trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
+ 
+       /* If L0 (KVM) wants the exit, it trumps L1's desires. */
+       if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
+@@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
+       exit_qual = vmx_get_exit_qual(vcpu);
+ 
+ reflect_vmexit:
+-      nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual);
++      nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
+       return true;
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 82af43e14b09c..f8835cabf29f3 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1578,7 +1578,7 @@ static int skip_emulated_instruction(struct kvm_vcpu 
*vcpu)
+        * i.e. we end up advancing IP with some random value.
+        */
+       if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
+-          to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
++          to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
+               orig_rip = kvm_rip_read(vcpu);
+               rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ #ifdef CONFIG_X86_64
+@@ -5687,7 +5687,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 
*info1, u64 *info2,
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+       *info1 = vmx_get_exit_qual(vcpu);
+-      if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
++      if (!(vmx->exit_reason.failed_vmentry)) {
+               *info2 = vmx->idt_vectoring_info;
+               *intr_info = vmx_get_intr_info(vcpu);
+               if (is_exception_with_error_code(*intr_info))
+@@ -5931,8 +5931,9 @@ void dump_vmcs(void)
+ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
+ {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+-      u32 exit_reason = vmx->exit_reason;
++      union vmx_exit_reason exit_reason = vmx->exit_reason;
+       u32 vectoring_info = vmx->idt_vectoring_info;
++      u16 exit_handler_index;
+ 
+       /*
+        * Flush logged GPAs PML buffer, this will make dirty_bitmap more
+@@ -5974,11 +5975,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, 
fastpath_t exit_fastpath)
+                       return 1;
+       }
+ 
+-      if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
++      if (exit_reason.failed_vmentry) {
+               dump_vmcs();
+               vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+               vcpu->run->fail_entry.hardware_entry_failure_reason
+-                      = exit_reason;
++                      = exit_reason.full;
+               vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
+               return 0;
+       }
+@@ -6000,24 +6001,24 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, 
fastpath_t exit_fastpath)
+        * will cause infinite loop.
+        */
+       if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+-                      (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+-                      exit_reason != EXIT_REASON_EPT_VIOLATION &&
+-                      exit_reason != EXIT_REASON_PML_FULL &&
+-                      exit_reason != EXIT_REASON_APIC_ACCESS &&
+-                      exit_reason != EXIT_REASON_TASK_SWITCH)) {
++          (exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
++           exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
++           exit_reason.basic != EXIT_REASON_PML_FULL &&
++           exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
++           exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
++              int ndata = 3;
++
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+-              vcpu->run->internal.ndata = 3;
+               vcpu->run->internal.data[0] = vectoring_info;
+-              vcpu->run->internal.data[1] = exit_reason;
++              vcpu->run->internal.data[1] = exit_reason.full;
+               vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
+-              if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
+-                      vcpu->run->internal.ndata++;
+-                      vcpu->run->internal.data[3] =
++              if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
++                      vcpu->run->internal.data[ndata++] =
+                               vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+               }
+-              vcpu->run->internal.data[vcpu->run->internal.ndata++] =
+-                      vcpu->arch.last_vmentry_cpu;
++              vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
++              vcpu->run->internal.ndata = ndata;
+               return 0;
+       }
+ 
+@@ -6043,38 +6044,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, 
fastpath_t exit_fastpath)
+       if (exit_fastpath != EXIT_FASTPATH_NONE)
+               return 1;
+ 
+-      if (exit_reason >= kvm_vmx_max_exit_handlers)
++      if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
+               goto unexpected_vmexit;
+ #ifdef CONFIG_RETPOLINE
+-      if (exit_reason == EXIT_REASON_MSR_WRITE)
++      if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
+               return kvm_emulate_wrmsr(vcpu);
+-      else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
++      else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
+               return handle_preemption_timer(vcpu);
+-      else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
++      else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
+               return handle_interrupt_window(vcpu);
+-      else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
++      else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
+               return handle_external_interrupt(vcpu);
+-      else if (exit_reason == EXIT_REASON_HLT)
++      else if (exit_reason.basic == EXIT_REASON_HLT)
+               return kvm_emulate_halt(vcpu);
+-      else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
++      else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
+               return handle_ept_misconfig(vcpu);
+ #endif
+ 
+-      exit_reason = array_index_nospec(exit_reason,
+-                                       kvm_vmx_max_exit_handlers);
+-      if (!kvm_vmx_exit_handlers[exit_reason])
++      exit_handler_index = array_index_nospec((u16)exit_reason.basic,
++                                              kvm_vmx_max_exit_handlers);
++      if (!kvm_vmx_exit_handlers[exit_handler_index])
+               goto unexpected_vmexit;
+ 
+-      return kvm_vmx_exit_handlers[exit_reason](vcpu);
++      return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
+ 
+ unexpected_vmexit:
+-      vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++      vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
++                  exit_reason.full);
+       dump_vmcs();
+       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+       vcpu->run->internal.suberror =
+                       KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+       vcpu->run->internal.ndata = 2;
+-      vcpu->run->internal.data[0] = exit_reason;
++      vcpu->run->internal.data[0] = exit_reason.full;
+       vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+       return 0;
+ }
+@@ -6393,9 +6395,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+ {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 
+-      if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
++      if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
+               handle_external_interrupt_irqoff(vcpu);
+-      else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
++      else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
+               handle_exception_nmi_irqoff(vmx);
+ }
+ 
+@@ -6583,7 +6585,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, 
unsigned long host_rsp)
+ 
+ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+ {
+-      switch (to_vmx(vcpu)->exit_reason) {
++      switch (to_vmx(vcpu)->exit_reason.basic) {
+       case EXIT_REASON_MSR_WRITE:
+               return handle_fastpath_set_msr_irqoff(vcpu);
+       case EXIT_REASON_PREEMPTION_TIMER:
+@@ -6782,17 +6784,17 @@ reenter_guest:
+       vmx->idt_vectoring_info = 0;
+ 
+       if (unlikely(vmx->fail)) {
+-              vmx->exit_reason = 0xdead;
++              vmx->exit_reason.full = 0xdead;
+               return EXIT_FASTPATH_NONE;
+       }
+ 
+-      vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+-      if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
++      vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
++      if (unlikely((u16)vmx->exit_reason.basic == 
EXIT_REASON_MCE_DURING_VMENTRY))
+               kvm_machine_check();
+ 
+-      trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
++      trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
+ 
+-      if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
++      if (unlikely(vmx->exit_reason.failed_vmentry))
+               return EXIT_FASTPATH_NONE;
+ 
+       vmx->loaded_vmcs->launched = 1;
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index f6f66e5c65104..ae3a89ac0600d 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -70,6 +70,29 @@ struct pt_desc {
+       struct pt_ctx guest;
+ };
+ 
++union vmx_exit_reason {
++      struct {
++              u32     basic                   : 16;
++              u32     reserved16              : 1;
++              u32     reserved17              : 1;
++              u32     reserved18              : 1;
++              u32     reserved19              : 1;
++              u32     reserved20              : 1;
++              u32     reserved21              : 1;
++              u32     reserved22              : 1;
++              u32     reserved23              : 1;
++              u32     reserved24              : 1;
++              u32     reserved25              : 1;
++              u32     reserved26              : 1;
++              u32     enclave_mode            : 1;
++              u32     smi_pending_mtf         : 1;
++              u32     smi_from_vmx_root       : 1;
++              u32     reserved30              : 1;
++              u32     failed_vmentry          : 1;
++      };
++      u32 full;
++};
++
+ /*
+  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
+  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
+@@ -244,7 +267,7 @@ struct vcpu_vmx {
+       int vpid;
+       bool emulation_required;
+ 
+-      u32 exit_reason;
++      union vmx_exit_reason exit_reason;
+ 
+       /* Posted interrupt descriptor */
+       struct pi_desc pi_desc;
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index fe6a460c43735..af3ee288bc117 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct 
dma_device *device,
+       kfree(chan->dev);
+  err_free_local:
+       free_percpu(chan->local);
++      chan->local = NULL;
+       return rc;
+ }
+ 
+diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
+index e5162690de8f1..db25f9b7778c9 100644
+--- a/drivers/dma/dw/Kconfig
++++ b/drivers/dma/dw/Kconfig
+@@ -10,6 +10,7 @@ config DW_DMAC_CORE
+ 
+ config DW_DMAC
+       tristate "Synopsys DesignWare AHB DMA platform driver"
++      depends on HAS_IOMEM
+       select DW_DMAC_CORE
+       help
+         Support the Synopsys DesignWare AHB DMA controller. This
+@@ -18,6 +19,7 @@ config DW_DMAC
+ config DW_DMAC_PCI
+       tristate "Synopsys DesignWare AHB DMA PCI driver"
+       depends on PCI
++      depends on HAS_IOMEM
+       select DW_DMAC_CORE
+       help
+         Support the Synopsys DesignWare AHB DMA controller on the
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index a6704838ffcb7..459e9fbc2253a 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -263,6 +263,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
+       idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
+ }
+ 
++void idxd_wq_reset(struct idxd_wq *wq)
++{
++      struct idxd_device *idxd = wq->idxd;
++      struct device *dev = &idxd->pdev->dev;
++      u32 operand;
++
++      if (wq->state != IDXD_WQ_ENABLED) {
++              dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
++              return;
++      }
++
++      operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
++      idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
++      wq->state = IDXD_WQ_DISABLED;
++}
++
+ int idxd_wq_map_portal(struct idxd_wq *wq)
+ {
+       struct idxd_device *idxd = wq->idxd;
+@@ -291,8 +307,6 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
+ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+ {
+       struct idxd_device *idxd = wq->idxd;
+-      struct device *dev = &idxd->pdev->dev;
+-      int i, wq_offset;
+ 
+       lockdep_assert_held(&idxd->dev_lock);
+       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+@@ -303,14 +317,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+       wq->priority = 0;
+       clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+       memset(wq->name, 0, WQ_NAME_SIZE);
+-
+-      for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+-              wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+-              iowrite32(0, idxd->reg_base + wq_offset);
+-              dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+-                      wq->id, i, wq_offset,
+-                      ioread32(idxd->reg_base + wq_offset));
+-      }
+ }
+ 
+ /* Device control bits */
+@@ -560,7 +566,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
+       if (!wq->group)
+               return 0;
+ 
+-      memset(wq->wqcfg, 0, idxd->wqcfg_size);
++      /*
++       * Instead of memset the entire shadow copy of WQCFG, copy from the 
hardware after
++       * wq reset. This will copy back the sticky values that are present on 
some devices.
++       */
++      for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
++              wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
++              wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
++      }
+ 
+       /* byte 0-3 */
+       wq->wqcfg->wq_size = wq->size;
+diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
+index 953ef6536aac4..1d7849cb91004 100644
+--- a/drivers/dma/idxd/idxd.h
++++ b/drivers/dma/idxd/idxd.h
+@@ -295,6 +295,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
+ int idxd_wq_enable(struct idxd_wq *wq);
+ int idxd_wq_disable(struct idxd_wq *wq);
+ void idxd_wq_drain(struct idxd_wq *wq);
++void idxd_wq_reset(struct idxd_wq *wq);
+ int idxd_wq_map_portal(struct idxd_wq *wq);
+ void idxd_wq_unmap_portal(struct idxd_wq *wq);
+ void idxd_wq_disable_cleanup(struct idxd_wq *wq);
+diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
+index 552e2e2707058..6bb1c1773aae6 100644
+--- a/drivers/dma/idxd/irq.c
++++ b/drivers/dma/idxd/irq.c
+@@ -66,7 +66,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, 
u32 cause)
+               for (i = 0; i < 4; i++)
+                       idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
+                                       IDXD_SWERR_OFFSET + i * sizeof(u64));
+-              iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
++
++              iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
++                        idxd->reg_base + IDXD_SWERR_OFFSET);
+ 
+               if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
+                       int id = idxd->sw_err.wq_idx;
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index fb97c9f319a55..7566b573d546e 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -241,7 +241,6 @@ static void disable_wq(struct idxd_wq *wq)
+ {
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+-      int rc;
+ 
+       mutex_lock(&wq->wq_lock);
+       dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
+@@ -262,17 +261,13 @@ static void disable_wq(struct idxd_wq *wq)
+       idxd_wq_unmap_portal(wq);
+ 
+       idxd_wq_drain(wq);
+-      rc = idxd_wq_disable(wq);
++      idxd_wq_reset(wq);
+ 
+       idxd_wq_free_resources(wq);
+       wq->client_count = 0;
+       mutex_unlock(&wq->wq_lock);
+ 
+-      if (rc < 0)
+-              dev_warn(dev, "Failed to disable %s: %d\n",
+-                       dev_name(&wq->conf_dev), rc);
+-      else
+-              dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
++      dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+ }
+ 
+ static int idxd_config_bus_remove(struct device *dev)
+@@ -923,7 +918,7 @@ static ssize_t wq_size_store(struct device *dev,
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+ 
+-      if (wq->state != IDXD_WQ_DISABLED)
++      if (idxd->state == IDXD_DEV_ENABLED)
+               return -EPERM;
+ 
+       if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
+@@ -1259,8 +1254,14 @@ static ssize_t op_cap_show(struct device *dev,
+ {
+       struct idxd_device *idxd =
+               container_of(dev, struct idxd_device, conf_dev);
++      int i, rc = 0;
++
++      for (i = 0; i < 4; i++)
++              rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
+ 
+-      return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
++      rc--;
++      rc += sysfs_emit_at(buf, rc, "\n");
++      return rc;
+ }
+ static DEVICE_ATTR_RO(op_cap);
+ 
+diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c
+index f387c5bbc170c..1669345441619 100644
+--- a/drivers/dma/plx_dma.c
++++ b/drivers/dma/plx_dma.c
+@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
+ 
+       rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
+                        KBUILD_MODNAME, plxdev);
+-      if (rc) {
+-              kfree(plxdev);
+-              return rc;
+-      }
++      if (rc)
++              goto free_plx;
+ 
+       spin_lock_init(&plxdev->ring_lock);
+       tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
+@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
+       rc = dma_async_device_register(dma);
+       if (rc) {
+               pci_err(pdev, "Failed to register dma device: %d\n", rc);
+-              free_irq(pci_irq_vector(pdev, 0),  plxdev);
+-              kfree(plxdev);
+-              return rc;
++              goto put_device;
+       }
+ 
+       pci_set_drvdata(pdev, plxdev);
+ 
+       return 0;
++
++put_device:
++      put_device(&pdev->dev);
++      free_irq(pci_irq_vector(pdev, 0),  plxdev);
++free_plx:
++      kfree(plxdev);
++
++      return rc;
+ }
+ 
+ static int plx_dma_probe(struct pci_dev *pdev,
+diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
+index 728f6c6871824..fa5d945b2f286 100644
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
+       long                    gpio;
+       struct gpio_desc        *desc;
+       int                     status;
++      struct gpio_chip        *gc;
++      int                     offset;
+ 
+       status = kstrtol(buf, 0, &gpio);
+       if (status < 0)
+@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
+               pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+               return -EINVAL;
+       }
++      gc = desc->gdev->chip;
++      offset = gpio_chip_hwgpio(desc);
++      if (!gpiochip_line_is_valid(gc, offset)) {
++              pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
++              return -EINVAL;
++      }
+ 
+       /* No extra locking here; FLAG_SYSFS just signifies that the
+        * request and export were done by on behalf of userspace, so
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 5e11cdb207d83..0ca7e53db112a 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1240,8 +1240,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
+ 
+ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+ {
+-      *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+-              REG_A5XX_RBBM_PERFCTR_CP_0_HI);
++      *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
++              REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+ 
+       return 0;
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+index 83b50f6d6bb78..722c2fe3bfd56 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+@@ -1073,8 +1073,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, 
uint64_t *value)
+       /* Force the GPU power on so we can read this register */
+       a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+ 
+-      *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+-              REG_A6XX_RBBM_PERFCTR_CP_0_HI);
++      *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
++              REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
+ 
+       a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+       mutex_unlock(&perfcounter_oob);
+diff --git a/drivers/gpu/drm/xen/xen_drm_front.c 
b/drivers/gpu/drm/xen/xen_drm_front.c
+index cc93a8c9547bc..8ea91542b567a 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front.c
++++ b/drivers/gpu/drm/xen/xen_drm_front.c
+@@ -531,7 +531,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info 
*front_info)
+       drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
+       if (IS_ERR(drm_dev)) {
+               ret = PTR_ERR(drm_dev);
+-              goto fail;
++              goto fail_dev;
+       }
+ 
+       drm_info->drm_dev = drm_dev;
+@@ -561,8 +561,10 @@ fail_modeset:
+       drm_kms_helper_poll_fini(drm_dev);
+       drm_mode_config_cleanup(drm_dev);
+       drm_dev_put(drm_dev);
+-fail:
++fail_dev:
+       kfree(drm_info);
++      front_info->drm_info = NULL;
++fail:
+       return ret;
+ }
+ 
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 44d715c12f6ab..6cda5935fc09c 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev 
*input_dev,
+ {
+       struct wacom_features *features = &wacom_wac->features;
+ 
+-      input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+-
+       if (!(features->device_type & WACOM_DEVICETYPE_PEN))
+               return -ENODEV;
+ 
+@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev 
*input_dev,
+               return 0;
+       }
+ 
++      input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+       __set_bit(BTN_TOUCH, input_dev->keybit);
+       __set_bit(ABS_MISC, input_dev->absbit);
+ 
+@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct 
input_dev *input_dev,
+ {
+       struct wacom_features *features = &wacom_wac->features;
+ 
+-      input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+-
+       if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
+               return -ENODEV;
+ 
+@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct 
input_dev *input_dev,
+               /* setup has already been done */
+               return 0;
+ 
++      input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+       __set_bit(BTN_TOUCH, input_dev->keybit);
+ 
+       if (features->touch_max == 1) {
+diff --git a/drivers/input/keyboard/nspire-keypad.c 
b/drivers/input/keyboard/nspire-keypad.c
+index 63d5e488137dc..e9fa1423f1360 100644
+--- a/drivers/input/keyboard/nspire-keypad.c
++++ b/drivers/input/keyboard/nspire-keypad.c
+@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
+       return IRQ_HANDLED;
+ }
+ 
+-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
++static int nspire_keypad_open(struct input_dev *input)
+ {
++      struct nspire_keypad *keypad = input_get_drvdata(input);
+       unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
++      int error;
++
++      error = clk_prepare_enable(keypad->clk);
++      if (error)
++              return error;
+ 
+       cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
+       if (cycles_per_us == 0)
+@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad 
*keypad)
+       keypad->int_mask = 1 << 1;
+       writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
+ 
+-      /* Disable GPIO interrupts to prevent hanging on touchpad */
+-      /* Possibly used to detect touchpad events */
+-      writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+-      /* Acknowledge existing interrupts */
+-      writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+-
+-      return 0;
+-}
+-
+-static int nspire_keypad_open(struct input_dev *input)
+-{
+-      struct nspire_keypad *keypad = input_get_drvdata(input);
+-      int error;
+-
+-      error = clk_prepare_enable(keypad->clk);
+-      if (error)
+-              return error;
+-
+-      error = nspire_keypad_chip_init(keypad);
+-      if (error) {
+-              clk_disable_unprepare(keypad->clk);
+-              return error;
+-      }
+-
+       return 0;
+ }
+ 
+@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
+ {
+       struct nspire_keypad *keypad = input_get_drvdata(input);
+ 
++      /* Disable interrupts */
++      writel(0, keypad->reg_base + KEYPAD_INTMSK);
++      /* Acknowledge existing interrupts */
++      writel(~0, keypad->reg_base + KEYPAD_INT);
++
+       clk_disable_unprepare(keypad->clk);
+ }
+ 
+@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device 
*pdev)
+               return -ENOMEM;
+       }
+ 
++      error = clk_prepare_enable(keypad->clk);
++      if (error) {
++              dev_err(&pdev->dev, "failed to enable clock\n");
++              return error;
++      }
++
++      /* Disable interrupts */
++      writel(0, keypad->reg_base + KEYPAD_INTMSK);
++      /* Acknowledge existing interrupts */
++      writel(~0, keypad->reg_base + KEYPAD_INT);
++
++      /* Disable GPIO interrupts to prevent hanging on touchpad */
++      /* Possibly used to detect touchpad events */
++      writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
++      /* Acknowledge existing GPIO interrupts */
++      writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
++
++      clk_disable_unprepare(keypad->clk);
++
+       input_set_drvdata(input, keypad);
+ 
+       input->id.bustype = BUS_HOST;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h 
b/drivers/input/serio/i8042-x86ia64io.h
+index 9119e12a57784..a5a0035536462 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -588,6 +588,7 @@ static const struct dmi_system_id 
i8042_dmi_noselftest_table[] = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+               },
++      }, {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible 
Notebook */
+diff --git a/drivers/input/touchscreen/s6sy761.c 
b/drivers/input/touchscreen/s6sy761.c
+index b63d7fdf0cd20..85a1f465c097e 100644
+--- a/drivers/input/touchscreen/s6sy761.c
++++ b/drivers/input/touchscreen/s6sy761.c
+@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data 
*sdata,
+       u8 major = event[4];
+       u8 minor = event[5];
+       u8 z = event[6] & S6SY761_MASK_Z;
+-      u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
+-      u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
++      u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
++      u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
+ 
+       input_mt_slot(sdata->input, tid);
+ 
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 66f4c6398f670..cea2b37897367 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int 
index,
+       u8 *res;
+ 
+       position = (index + rsb) * v->fec->roots;
+-      block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
++      block = div64_u64_rem(position, v->fec->io_size, &rem);
+       *offset = (unsigned)rem;
+ 
+       res = dm_bufio_read(v->fec->bufio, block, buf);
+@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct 
dm_verity_fec_io *fio,
+ 
+               /* read the next block when we run out of parity bytes */
+               offset += v->fec->roots;
+-              if (offset >= v->fec->roots << SECTOR_SHIFT) {
++              if (offset >= v->fec->io_size) {
+                       dm_bufio_release(buf);
+ 
+                       par = fec_read_parity(v, rsb, block_offset, &offset, 
&buf);
+@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
+               return -E2BIG;
+       }
+ 
++      if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
++              f->io_size = 1 << v->data_dev_block_bits;
++      else
++              f->io_size = v->fec->roots << SECTOR_SHIFT;
++
+       f->bufio = dm_bufio_client_create(f->dev->bdev,
+-                                        f->roots << SECTOR_SHIFT,
++                                        f->io_size,
+                                         1, 0, NULL, NULL);
+       if (IS_ERR(f->bufio)) {
+               ti->error = "Cannot initialize FEC bufio client";
+diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
+index 42fbd3a7fc9f1..3c46c8d618833 100644
+--- a/drivers/md/dm-verity-fec.h
++++ b/drivers/md/dm-verity-fec.h
+@@ -36,6 +36,7 @@ struct dm_verity_fec {
+       struct dm_dev *dev;     /* parity data device */
+       struct dm_bufio_client *data_bufio;     /* for data dev access */
+       struct dm_bufio_client *bufio;          /* for parity data access */
++      size_t io_size;         /* IO size for roots */
+       sector_t start;         /* parity data start in blocks */
+       sector_t blocks;        /* number of blocks covered */
+       sector_t rounds;        /* number of interleaving rounds */
+diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
+index 57f1f17089946..5c5c92132287d 100644
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
+               return 0;
+       case NAND_OP_WAITRDY_INSTR:
+               return readl_poll_timeout(nfc->regs + NFI_STA, status,
+-                                        status & STA_BUSY, 20,
+-                                        instr->ctx.waitrdy.timeout_ms);
++                                        !(status & STA_BUSY), 20,
++                                        instr->ctx.waitrdy.timeout_ms * 1000);
+       default:
+               break;
+       }
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c 
b/drivers/net/dsa/mv88e6xxx/chip.c
+index 87160e723dfcf..70ec17f3c3007 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2994,10 +2994,17 @@ out_resources:
+       return err;
+ }
+ 
++/* prod_id for switch families which do not have a PHY model number */
++static const u16 family_prod_id_table[] = {
++      [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
++      [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
++};
++
+ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
+ {
+       struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+       struct mv88e6xxx_chip *chip = mdio_bus->chip;
++      u16 prod_id;
+       u16 val;
+       int err;
+ 
+@@ -3008,23 +3015,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, 
int phy, int reg)
+       err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
+       mv88e6xxx_reg_unlock(chip);
+ 
+-      if (reg == MII_PHYSID2) {
+-              /* Some internal PHYs don't have a model number. */
+-              if (chip->info->family != MV88E6XXX_FAMILY_6165)
+-                      /* Then there is the 6165 family. It gets is
+-                       * PHYs correct. But it can also have two
+-                       * SERDES interfaces in the PHY address
+-                       * space. And these don't have a model
+-                       * number. But they are not PHYs, so we don't
+-                       * want to give them something a PHY driver
+-                       * will recognise.
+-                       *
+-                       * Use the mv88e6390 family model number
+-                       * instead, for anything which really could be
+-                       * a PHY,
+-                       */
+-                      if (!(val & 0x3f0))
+-                              val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
++      /* Some internal PHYs don't have a model number. */
++      if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
++          chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
++              prod_id = family_prod_id_table[chip->info->family];
++              if (prod_id)
++                      val |= prod_id >> 4;
+       }
+ 
+       return err ? err : val;
+diff --git a/drivers/net/ethernet/amd/pcnet32.c 
b/drivers/net/ethernet/amd/pcnet32.c
+index 187b0b9a6e1df..f78daba60b35c 100644
+--- a/drivers/net/ethernet/amd/pcnet32.c
++++ b/drivers/net/ethernet/amd/pcnet32.c
+@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+       }
+       pci_set_master(pdev);
+ 
+-      ioaddr = pci_resource_start(pdev, 0);
+-      if (!ioaddr) {
++      if (!pci_resource_len(pdev, 0)) {
+               if (pcnet32_debug & NETIF_MSG_PROBE)
+                       pr_err("card has no PCI IO resources, aborting\n");
+               err = -ENODEV;
+@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct 
pci_device_id *ent)
+                       pr_err("architecture does not support 32bit PCI 
busmaster DMA\n");
+               goto err_disable_dev;
+       }
++
++      ioaddr = pci_resource_start(pdev, 0);
+       if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
+               if (pcnet32_debug & NETIF_MSG_PROBE)
+                       pr_err("io address range already allocated\n");
+diff --git a/drivers/net/ethernet/cadence/macb_main.c 
b/drivers/net/ethernet/cadence/macb_main.c
+index 48a6bda2a8cc7..390f45e49eaf7 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -3777,6 +3777,7 @@ static int macb_init(struct platform_device *pdev)
+       reg = gem_readl(bp, DCFG8);
+       bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+                       GEM_BFEXT(T2SCR, reg));
++      INIT_LIST_HEAD(&bp->rx_fs_list.list);
+       if (bp->max_tuples > 0) {
+               /* also needs one ethtype match to check IPv4 */
+               if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+@@ -3787,7 +3788,6 @@ static int macb_init(struct platform_device *pdev)
+                       /* Filtering is supported in hw but don't enable it in 
kernel now */
+                       dev->hw_features |= NETIF_F_NTUPLE;
+                       /* init Rx flow definitions */
+-                      INIT_LIST_HEAD(&bp->rx_fs_list.list);
+                       bp->rx_fs_list.count = 0;
+                       spin_lock_init(&bp->rx_fs_lock);
+               } else
+diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c 
b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+index 423d6d78d15c7..3a50d5a62aceb 100644
+--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
++++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+@@ -354,18 +354,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info 
*tx_info, u16 word,
+       return cxgb4_ofld_send(tx_info->netdev, skb);
+ }
+ 
+-/*
+- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
+- * @tx_info - driver specific tls info.
+- * return: NET_TX_OK/NET_XMIT_DROP.
+- */
+-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
+-{
+-      return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
+-                                TCB_T_STATE_V(TCB_T_STATE_M),
+-                                CHCR_TCB_STATE_CLOSED, 1);
+-}
+-
+ /*
+  * chcr_ktls_dev_del:  call back for tls_dev_del.
+  * Remove the tid and l2t entry and close the connection.
+@@ -400,8 +388,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
+ 
+       /* clear tid */
+       if (tx_info->tid != -1) {
+-              /* clear tcb state and then release tid */
+-              chcr_ktls_mark_tcb_close(tx_info);
+               cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+                                tx_info->tid, tx_info->ip_family);
+       }
+@@ -579,7 +565,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, 
struct sock *sk,
+       return 0;
+ 
+ free_tid:
+-      chcr_ktls_mark_tcb_close(tx_info);
+ #if IS_ENABLED(CONFIG_IPV6)
+       /* clear clip entry */
+       if (tx_info->ip_family == AF_INET6)
+@@ -677,10 +662,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter 
*adap,
+       if (tx_info->pending_close) {
+               spin_unlock(&tx_info->lock);
+               if (!status) {
+-                      /* it's a late success, tcb status is establised,
+-                       * mark it close.
+-                       */
+-                      chcr_ktls_mark_tcb_close(tx_info);
+                       cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+                                        tid, tx_info->ip_family);
+               }
+@@ -1668,54 +1649,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff 
*nskb,
+       refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
+ }
+ 
+-/*
+- * chcr_ktls_update_snd_una:  Reset the SEND_UNA. It will be done to avoid
+- * sending the same segment again. It will discard the segment which is before
+- * the current tx max.
+- * @tx_info - driver specific tls info.
+- * @q - TX queue.
+- * return: NET_TX_OK/NET_XMIT_DROP.
+- */
+-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
+-                                  struct sge_eth_txq *q)
+-{
+-      struct fw_ulptx_wr *wr;
+-      unsigned int ndesc;
+-      int credits;
+-      void *pos;
+-      u32 len;
+-
+-      len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
+-      ndesc = DIV_ROUND_UP(len, 64);
+-
+-      credits = chcr_txq_avail(&q->q) - ndesc;
+-      if (unlikely(credits < 0)) {
+-              chcr_eth_txq_stop(q);
+-              return NETDEV_TX_BUSY;
+-      }
+-
+-      pos = &q->q.desc[q->q.pidx];
+-
+-      wr = pos;
+-      /* ULPTX wr */
+-      wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+-      wr->cookie = 0;
+-      /* fill len in wr field */
+-      wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
+-
+-      pos += sizeof(*wr);
+-
+-      pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
+-                                       TCB_SND_UNA_RAW_W,
+-                                       TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
+-                                       TCB_SND_UNA_RAW_V(0), 0);
+-
+-      chcr_txq_advance(&q->q, ndesc);
+-      cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
+-
+-      return 0;
+-}
+-
+ /*
+  * chcr_end_part_handler: This handler will handle the record which
+  * is complete or if record's end part is received. T6 adapter has a issue 
that
+@@ -1740,7 +1673,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info 
*tx_info,
+                                struct sge_eth_txq *q, u32 skb_offset,
+                                u32 tls_end_offset, bool last_wr)
+ {
++      bool free_skb_if_tx_fails = false;
+       struct sk_buff *nskb = NULL;
++
+       /* check if it is a complete record */
+       if (tls_end_offset == record->len) {
+               nskb = skb;
+@@ -1763,6 +1698,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info 
*tx_info,
+ 
+               if (last_wr)
+                       dev_kfree_skb_any(skb);
++              else
++                      free_skb_if_tx_fails = true;
+ 
+               last_wr = true;
+ 
+@@ -1774,6 +1711,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info 
*tx_info,
+                                      record->num_frags,
+                                      (last_wr && tcp_push_no_fin),
+                                      mss)) {
++              if (free_skb_if_tx_fails)
++                      dev_kfree_skb_any(skb);
+               goto out;
+       }
+       tx_info->prev_seq = record->end_seq;
+@@ -1910,11 +1849,6 @@ static int chcr_short_record_handler(struct 
chcr_ktls_info *tx_info,
+                       /* reset tcp_seq as per the prior_data_required len */
+                       tcp_seq -= prior_data_len;
+               }
+-              /* reset snd una, so the middle record won't send the already
+-               * sent part.
+-               */
+-              if (chcr_ktls_update_snd_una(tx_info, q))
+-                      goto out;
+               atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
+       } else {
+               atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
+@@ -2015,12 +1949,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct 
net_device *dev)
+        * we will send the complete record again.
+        */
+ 
++      spin_lock_irqsave(&tx_ctx->base.lock, flags);
++
+       do {
+-              int i;
+ 
+               cxgb4_reclaim_completed_tx(adap, &q->q, true);
+-              /* lock taken */
+-              spin_lock_irqsave(&tx_ctx->base.lock, flags);
+               /* fetch the tls record */
+               record = tls_get_record(&tx_ctx->base, tcp_seq,
+                                       &tx_info->record_no);
+@@ -2079,11 +2012,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct 
net_device *dev)
+                                                   tls_end_offset, skb_offset,
+                                                   0);
+ 
+-                      spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+                       if (ret) {
+                               /* free the refcount taken earlier */
+                               if (tls_end_offset < data_len)
+                                       dev_kfree_skb_any(skb);
++                              spin_unlock_irqrestore(&tx_ctx->base.lock, 
flags);
+                               goto out;
+                       }
+ 
+@@ -2093,16 +2026,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct 
net_device *dev)
+                       continue;
+               }
+ 
+-              /* increase page reference count of the record, so that there
+-               * won't be any chance of page free in middle if in case stack
+-               * receives ACK and try to delete the record.
+-               */
+-              for (i = 0; i < record->num_frags; i++)
+-                      __skb_frag_ref(&record->frags[i]);
+-              /* lock cleared */
+-              spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+-
+-
+               /* if a tls record is finishing in this SKB */
+               if (tls_end_offset <= data_len) {
+                       ret = chcr_end_part_handler(tx_info, skb, record,
+@@ -2127,13 +2050,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct 
net_device *dev)
+                       data_len = 0;
+               }
+ 
+-              /* clear the frag ref count which increased locally before */
+-              for (i = 0; i < record->num_frags; i++) {
+-                      /* clear the frag ref count */
+-                      __skb_frag_unref(&record->frags[i]);
+-              }
+               /* if any failure, come out from the loop. */
+               if (ret) {
++                      spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+                       if (th->fin)
+                               dev_kfree_skb_any(skb);
+ 
+@@ -2148,6 +2067,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct 
net_device *dev)
+ 
+       } while (data_len > 0);
+ 
++      spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+       atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
+       atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
+ 
+diff --git a/drivers/net/ethernet/davicom/dm9000.c 
b/drivers/net/ethernet/davicom/dm9000.c
+index ae09cac876028..afc4a103c5080 100644
+--- a/drivers/net/ethernet/davicom/dm9000.c
++++ b/drivers/net/ethernet/davicom/dm9000.c
+@@ -1474,8 +1474,10 @@ dm9000_probe(struct platform_device *pdev)
+ 
+       /* Init network device */
+       ndev = alloc_etherdev(sizeof(struct board_info));
+-      if (!ndev)
+-              return -ENOMEM;
++      if (!ndev) {
++              ret = -ENOMEM;
++              goto out_regulator_disable;
++      }
+ 
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+ 
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c 
b/drivers/net/ethernet/ibm/ibmvnic.c
+index 4a4cb62b73320..8cc444684491a 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1159,19 +1159,13 @@ static int __ibmvnic_open(struct net_device *netdev)
+ 
+       rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
+       if (rc) {
+-              for (i = 0; i < adapter->req_rx_queues; i++)
+-                      napi_disable(&adapter->napi[i]);
++              ibmvnic_napi_disable(adapter);
+               release_resources(adapter);
+               return rc;
+       }
+ 
+       netif_tx_start_all_queues(netdev);
+ 
+-      if (prev_state == VNIC_CLOSED) {
+-              for (i = 0; i < adapter->req_rx_queues; i++)
+-                      napi_schedule(&adapter->napi[i]);
+-      }
+-
+       adapter->state = VNIC_OPEN;
+       return rc;
+ }
+@@ -1942,7 +1936,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+       u64 old_num_rx_queues, old_num_tx_queues;
+       u64 old_num_rx_slots, old_num_tx_slots;
+       struct net_device *netdev = adapter->netdev;
+-      int i, rc;
++      int rc;
+ 
+       netdev_dbg(adapter->netdev,
+                  "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
+@@ -2088,10 +2082,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+       /* refresh device's multicast list */
+       ibmvnic_set_multi(netdev);
+ 
+-      /* kick napi */
+-      for (i = 0; i < adapter->req_rx_queues; i++)
+-              napi_schedule(&adapter->napi[i]);
+-
+       if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
+           adapter->reset_reason == VNIC_RESET_MOBILITY) {
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c 
b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 7fab60128c76d..f0edea7cdbccc 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -11863,6 +11863,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
+ {
+       int err = 0;
+       int size;
++      u16 pow;
+ 
+       /* Set default capability flags */
+       pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+@@ -11881,6 +11882,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
+       pf->rss_table_size = pf->hw.func_caps.rss_table_size;
+       pf->rss_size_max = min_t(int, pf->rss_size_max,
+                                pf->hw.func_caps.num_tx_qp);
++
++      /* find the next higher power-of-2 of num cpus */
++      pow = roundup_pow_of_two(num_online_cpus());
++      pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
++
+       if (pf->hw.func_caps.rss) {
+               pf->flags |= I40E_FLAG_RSS_ENABLED;
+               pf->alloc_rss_size = min_t(int, pf->rss_size_max,
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c 
b/drivers/net/ethernet/intel/ice/ice_dcb.c
+index 211ac6f907adb..28e834a128c07 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
+@@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp 
*cee_cfg,
+                  struct ice_port_info *pi)
+ {
+       u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+-      u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+-      u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
++      u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
++      u8 i, err, sync, oper, app_index, ice_app_sel_type;
+       u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+       u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+       struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 278fc866fad49..0b9fddbc5db4f 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6896,6 +6896,11 @@ static int __maybe_unused ixgbe_resume(struct device 
*dev_d)
+ 
+       adapter->hw.hw_addr = adapter->io_addr;
+ 
++      err = pci_enable_device_mem(pdev);
++      if (err) {
++              e_dev_err("Cannot enable PCI device from suspend\n");
++              return err;
++      }
+       smp_mb__before_atomic();
+       clear_bit(__IXGBE_DISABLED, &adapter->state);
+       pci_set_master(pdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+index 308fd279669ec..89510cac46c22 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
+                       *_policy = MLX5_GET(pplm_reg, _buf, 
fec_override_admin_##link); \
+       } while (0)
+ 
+-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link)         
        \
+-      do {                                                                    
        \
+-              unsigned long policy_long;                                      
        \
+-              u16 *__policy = &(policy);                                      
        \
+-              bool _write = (write);                                          
        \
+-                                                                              
        \
+-              policy_long = *__policy;                                        
        \
+-              if (_write && *__policy)                                        
        \
+-                      *__policy = find_first_bit(&policy_long,                
        \
+-                                                 sizeof(policy_long) * 
BITS_PER_BYTE);\
+-              MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link);  
        \
+-              if (!_write && *__policy)                                       
        \
+-                      *__policy = 1 << *__policy;                             
        \
+-      } while (0)
+-
+ /* get/set FEC admin field for a given speed */
+ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
+                                enum mlx5e_fec_supported_link_mode link_mode)
+@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 
*fec_policy, bool write,
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
+               break;
+       case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
+-              MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 
50g_1x);
++              MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 
50g_1x);
+               break;
+       case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
+-              MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 
100g_2x);
++              MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 
100g_2x);
+               break;
+       case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
+-              MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 
200g_4x);
++              MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 
200g_4x);
+               break;
+       case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
+-              MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 
400g_8x);
++              MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 
400g_8x);
+               break;
+       default:
+               return -EINVAL;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 930f19c598bb6..3079a82f1f412 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -2196,6 +2196,9 @@ static int mlx5e_flower_parse_meta(struct net_device 
*filter_dev,
+               return 0;
+ 
+       flow_rule_match_meta(rule, &match);
++      if (!match.mask->ingress_ifindex)
++              return 0;
++
+       if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+               NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+               return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c 
b/drivers/net/ethernet/realtek/r8169_main.c
+index d634da20b4f94..3bb36f4a984e8 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -2378,13 +2378,14 @@ static void r8168b_1_hw_jumbo_disable(struct 
rtl8169_private *tp)
+ static void rtl_jumbo_config(struct rtl8169_private *tp)
+ {
+       bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
++      int readrq = 4096;
+ 
+       rtl_unlock_config_regs(tp);
+       switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_12:
+       case RTL_GIGA_MAC_VER_17:
+               if (jumbo) {
+-                      pcie_set_readrq(tp->pci_dev, 512);
++                      readrq = 512;
+                       r8168b_1_hw_jumbo_enable(tp);
+               } else {
+                       r8168b_1_hw_jumbo_disable(tp);
+@@ -2392,7 +2393,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
+               break;
+       case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
+               if (jumbo) {
+-                      pcie_set_readrq(tp->pci_dev, 512);
++                      readrq = 512;
+                       r8168c_hw_jumbo_enable(tp);
+               } else {
+                       r8168c_hw_jumbo_disable(tp);
+@@ -2417,8 +2418,15 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
+       }
+       rtl_lock_config_regs(tp);
+ 
+-      if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+-              pcie_set_readrq(tp->pci_dev, 4096);
++      if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
++              pcie_set_readrq(tp->pci_dev, readrq);
++
++      /* Chip doesn't support pause in jumbo mode */
++      linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
++                       tp->phydev->advertising, !jumbo);
++      linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
++                       tp->phydev->advertising, !jumbo);
++      phy_start_aneg(tp->phydev);
+ }
+ 
+ DECLARE_RTL_COND(rtl_chipcmd_cond)
+@@ -4710,8 +4718,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
+       if (!tp->supports_gmii)
+               phy_set_max_speed(phydev, SPEED_100);
+ 
+-      phy_support_asym_pause(phydev);
+-
+       phy_attached_info(phydev);
+ 
+       return 0;
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 5dbdaf0f5f09c..823a89354466d 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -2913,9 +2913,35 @@ static struct phy_driver marvell_drivers[] = {
+               .get_stats = marvell_get_stats,
+       },
+       {
+-              .phy_id = MARVELL_PHY_ID_88E6390,
++              .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+-              .name = "Marvell 88E6390",
++              .name = "Marvell 88E6341 Family",
++              /* PHY_GBIT_FEATURES */
++              .flags = PHY_POLL_CABLE_TEST,
++              .probe = m88e1510_probe,
++              .config_init = marvell_config_init,
++              .config_aneg = m88e6390_config_aneg,
++              .read_status = marvell_read_status,
++              .ack_interrupt = marvell_ack_interrupt,
++              .config_intr = marvell_config_intr,
++              .did_interrupt = m88e1121_did_interrupt,
++              .resume = genphy_resume,
++              .suspend = genphy_suspend,
++              .read_page = marvell_read_page,
++              .write_page = marvell_write_page,
++              .get_sset_count = marvell_get_sset_count,
++              .get_strings = marvell_get_strings,
++              .get_stats = marvell_get_stats,
++              .get_tunable = m88e1540_get_tunable,
++              .set_tunable = m88e1540_set_tunable,
++              .cable_test_start = marvell_vct7_cable_test_start,
++              .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
++              .cable_test_get_status = marvell_vct7_cable_test_get_status,
++      },
++      {
++              .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
++              .phy_id_mask = MARVELL_PHY_ID_MASK,
++              .name = "Marvell 88E6390 Family",
+               /* PHY_GBIT_FEATURES */
+               .flags = PHY_POLL_CABLE_TEST,
+               .probe = m88e6390_probe,
+@@ -3001,7 +3027,8 @@ static struct mdio_device_id __maybe_unused 
marvell_tbl[] = {
+       { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
+-      { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
++      { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
++      { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
+       { }
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c 
b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index fa32f9045c0cb..500fdb0b6c42b 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -684,6 +684,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+       IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
+       IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+       IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
++      IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
+ 
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c 
b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index 50133c09a7805..133371385056d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -1181,6 +1181,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+       u32 cmd_pos;
+       const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+       u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
++      unsigned long flags;
+ 
+       if (WARN(!trans->wide_cmd_header &&
+                group_id > IWL_ALWAYS_LONG_GROUP,
+@@ -1264,10 +1265,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans 
*trans,
+               goto free_dup_buf;
+       }
+ 
+-      spin_lock_bh(&txq->lock);
++      spin_lock_irqsave(&txq->lock, flags);
+ 
+       if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+-              spin_unlock_bh(&txq->lock);
++              spin_unlock_irqrestore(&txq->lock, flags);
+ 
+               IWL_ERR(trans, "No space in command queue\n");
+               iwl_op_mode_cmd_queue_full(trans->op_mode);
+@@ -1427,7 +1428,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+  unlock_reg:
+       spin_unlock(&trans_pcie->reg_lock);
+  out:
+-      spin_unlock_bh(&txq->lock);
++      spin_unlock_irqrestore(&txq->lock, flags);
+  free_dup_buf:
+       if (idx < 0)
+               kfree(dup_buf);
+diff --git a/drivers/net/wireless/virt_wifi.c 
b/drivers/net/wireless/virt_wifi.c
+index c878097f0ddaf..1df959532c7d3 100644
+--- a/drivers/net/wireless/virt_wifi.c
++++ b/drivers/net/wireless/virt_wifi.c
+@@ -12,6 +12,7 @@
+ #include <net/cfg80211.h>
+ #include <net/rtnetlink.h>
+ #include <linux/etherdevice.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ 
+ static struct wiphy *common_wiphy;
+@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct 
*work)
+                            scan_result.work);
+       struct wiphy *wiphy = priv_to_wiphy(priv);
+       struct cfg80211_scan_info scan_info = { .aborted = false };
++      u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
+ 
+       informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+                                          CFG80211_BSS_FTYPE_PRESP,
+-                                         fake_router_bssid,
+-                                         ktime_get_boottime_ns(),
++                                         fake_router_bssid, tsf,
+                                          WLAN_CAPABILITY_ESS, 0,
+                                          (void *)&ssid, sizeof(ssid),
+                                          DBM_TO_MBM(-50), GFP_KERNEL);
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index ef23119db5746..e05cc9f8a9fd1 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -1239,6 +1239,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
+                       || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
+               return -ENXIO;
+ 
++      /* Test if an explicit flush function is defined */
++      if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
++              return 1;
++
++      /* Test if any flush hints for the region are available */
+       for (i = 0; i < nd_region->ndr_mappings; i++) {
+               struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+               struct nvdimm *nvdimm = nd_mapping->nvdimm;
+@@ -1249,8 +1254,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
+       }
+ 
+       /*
+-       * The platform defines dimm devices without hints, assume
+-       * platform persistence mechanism like ADR
++       * The platform defines dimm devices without hints nor explicit flush,
++       * assume platform persistence mechanism like ADR
+        */
+       return 0;
+ }
+diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
+index 024e5a550759c..8b9a39077dbab 100644
+--- a/drivers/scsi/libsas/sas_ata.c
++++ b/drivers/scsi/libsas/sas_ata.c
+@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct 
ata_queued_cmd *qc)
+               memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
+               task->total_xfer_len = qc->nbytes;
+               task->num_scatter = qc->n_elem;
++              task->data_dir = qc->dma_dir;
++      } else if (qc->tf.protocol == ATA_PROT_NODATA) {
++              task->data_dir = DMA_NONE;
+       } else {
+               for_each_sg(qc->sg, sg, qc->n_elem, si)
+                       xfer += sg_dma_len(sg);
+ 
+               task->total_xfer_len = xfer;
+               task->num_scatter = si;
+-      }
+-
+-      if (qc->tf.protocol == ATA_PROT_NODATA)
+-              task->data_dir = DMA_NONE;
+-      else
+               task->data_dir = qc->dma_dir;
++      }
+       task->scatter = qc->sg;
+       task->ata_task.retry_count = 1;
+       task->task_state_flags = SAS_TASK_STATE_PENDING;
+diff --git a/drivers/scsi/scsi_transport_srp.c 
b/drivers/scsi/scsi_transport_srp.c
+index 1e939a2a387f3..98a34ed10f1a0 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
+       res = mutex_lock_interruptible(&rport->mutex);
+       if (res)
+               goto out;
+-      if (rport->state != SRP_RPORT_FAIL_FAST)
++      if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != 
SRP_RPORT_LOST)
+               /*
+                * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+                * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 706de3ef94bbf..465f646e33298 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -1658,6 +1658,8 @@ static int vfio_pci_mmap(void *device_data, struct 
vm_area_struct *vma)
+ 
+       index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+ 
++      if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
++              return -EINVAL;
+       if (vma->vm_end < vma->vm_start)
+               return -EINVAL;
+       if ((vma->vm_flags & VM_SHARED) == 0)
+@@ -1666,7 +1668,7 @@ static int vfio_pci_mmap(void *device_data, struct 
vm_area_struct *vma)
+               int regnum = index - VFIO_PCI_NUM_REGIONS;
+               struct vfio_pci_region *region = vdev->region + regnum;
+ 
+-              if (region && region->ops && region->ops->mmap &&
++              if (region->ops && region->ops->mmap &&
+                   (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
+                       return region->ops->mmap(vdev, region, vma);
+               return -EINVAL;
+diff --git a/fs/readdir.c b/fs/readdir.c
+index 19434b3c982cd..09e8ed7d41614 100644
+--- a/fs/readdir.c
++++ b/fs/readdir.c
+@@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char 
*name, int namlen,
+ 
+       if (buf->result)
+               return -EINVAL;
++      buf->result = verify_dirent_name(name, namlen);
++      if (buf->result < 0)
++              return buf->result;
+       d_ino = ino;
+       if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+               buf->result = -EOVERFLOW;
+@@ -405,6 +408,9 @@ static int compat_fillonedir(struct dir_context *ctx, 
const char *name,
+ 
+       if (buf->result)
+               return -EINVAL;
++      buf->result = verify_dirent_name(name, namlen);
++      if (buf->result < 0)
++              return buf->result;
+       d_ino = ino;
+       if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
+               buf->result = -EOVERFLOW;
+diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
+index ff7b7607c8cf5..f5cf19d197763 100644
+--- a/include/linux/marvell_phy.h
++++ b/include/linux/marvell_phy.h
+@@ -25,11 +25,12 @@
+ #define MARVELL_PHY_ID_88X3310                0x002b09a0
+ #define MARVELL_PHY_ID_88E2110                0x002b09b0
+ 
+-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
++/* These Ethernet switch families contain embedded PHYs, but they do
+  * not have a model ID. So the switch driver traps reads to the ID2
+  * register and returns the switch family ID
+  */
+-#define MARVELL_PHY_ID_88E6390                0x01410f90
++#define MARVELL_PHY_ID_88E6341_FAMILY 0x01410f41
++#define MARVELL_PHY_ID_88E6390_FAMILY 0x01410f90
+ 
+ #define MARVELL_PHY_FAMILY_ID(id)     ((id) >> 4)
+ 
+diff --git a/include/linux/netfilter_arp/arp_tables.h 
b/include/linux/netfilter_arp/arp_tables.h
+index 7d3537c40ec95..26a13294318cf 100644
+--- a/include/linux/netfilter_arp/arp_tables.h
++++ b/include/linux/netfilter_arp/arp_tables.h
+@@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table 
*);
+ int arpt_register_table(struct net *net, const struct xt_table *table,
+                       const struct arpt_replace *repl,
+                       const struct nf_hook_ops *ops, struct xt_table **res);
+-void arpt_unregister_table(struct net *net, struct xt_table *table,
+-                         const struct nf_hook_ops *ops);
++void arpt_unregister_table(struct net *net, struct xt_table *table);
++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
++                                  const struct nf_hook_ops *ops);
+ extern unsigned int arpt_do_table(struct sk_buff *skb,
+                                 const struct nf_hook_state *state,
+                                 struct xt_table *table);
+diff --git a/include/linux/netfilter_bridge/ebtables.h 
b/include/linux/netfilter_bridge/ebtables.h
+index 2f5c4e6ecd8a4..3a956145a25cb 100644
+--- a/include/linux/netfilter_bridge/ebtables.h
++++ b/include/linux/netfilter_bridge/ebtables.h
+@@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
+                             const struct ebt_table *table,
+                             const struct nf_hook_ops *ops,
+                             struct ebt_table **res);
+-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
+-                               const struct nf_hook_ops *);
++extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
++void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
++                                 const struct nf_hook_ops *ops);
+ extern unsigned int ebt_do_table(struct sk_buff *skb,
+                                const struct nf_hook_state *state,
+                                struct ebt_table *table);
+diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
+index fdcdfe414223e..9d9ecc0f4c383 100644
+--- a/include/uapi/linux/idxd.h
++++ b/include/uapi/linux/idxd.h
+@@ -187,8 +187,8 @@ struct dsa_completion_record {
+                       uint32_t        rsvd2:8;
+               };
+ 
+-              uint16_t        delta_rec_size;
+-              uint16_t        crc_val;
++              uint32_t        delta_rec_size;
++              uint32_t        crc_val;
+ 
+               /* DIF check & strip */
+               struct {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 12cd2997f982a..3370f0d476e97 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5328,12 +5328,26 @@ static struct bpf_insn_aux_data *cur_aux(struct 
bpf_verifier_env *env)
+       return &env->insn_aux_data[env->insn_idx];
+ }
+ 
++enum {
++      REASON_BOUNDS   = -1,
++      REASON_TYPE     = -2,
++      REASON_PATHS    = -3,
++      REASON_LIMIT    = -4,
++      REASON_STACK    = -5,
++};
++
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+-                            u32 *ptr_limit, u8 opcode, bool off_is_neg)
++                            const struct bpf_reg_state *off_reg,
++                            u32 *alu_limit, u8 opcode)
+ {
++      bool off_is_neg = off_reg->smin_value < 0;
+       bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
+                           (opcode == BPF_SUB && !off_is_neg);
+-      u32 off, max;
++      u32 off, max = 0, ptr_limit = 0;
++
++      if (!tnum_is_const(off_reg->var_off) &&
++          (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
++              return REASON_BOUNDS;
+ 
+       switch (ptr_reg->type) {
+       case PTR_TO_STACK:
+@@ -5346,22 +5360,27 @@ static int retrieve_ptr_limit(const struct 
bpf_reg_state *ptr_reg,
+                */
+               off = ptr_reg->off + ptr_reg->var_off.value;
+               if (mask_to_left)
+-                      *ptr_limit = MAX_BPF_STACK + off;
++                      ptr_limit = MAX_BPF_STACK + off;
+               else
+-                      *ptr_limit = -off - 1;
+-              return *ptr_limit >= max ? -ERANGE : 0;
++                      ptr_limit = -off - 1;
++              break;
+       case PTR_TO_MAP_VALUE:
+               max = ptr_reg->map_ptr->value_size;
+               if (mask_to_left) {
+-                      *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
++                      ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+               } else {
+                       off = ptr_reg->smin_value + ptr_reg->off;
+-                      *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
++                      ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+               }
+-              return *ptr_limit >= max ? -ERANGE : 0;
++              break;
+       default:
+-              return -EINVAL;
++              return REASON_TYPE;
+       }
++
++      if (ptr_limit >= max)
++              return REASON_LIMIT;
++      *alu_limit = ptr_limit;
++      return 0;
+ }
+ 
+ static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
+@@ -5379,7 +5398,7 @@ static int update_alu_sanitation_state(struct 
bpf_insn_aux_data *aux,
+       if (aux->alu_state &&
+           (aux->alu_state != alu_state ||
+            aux->alu_limit != alu_limit))
+-              return -EACCES;
++              return REASON_PATHS;
+ 
+       /* Corresponding fixup done in fixup_bpf_calls(). */
+       aux->alu_state = alu_state;
+@@ -5398,14 +5417,20 @@ static int sanitize_val_alu(struct bpf_verifier_env 
*env,
+       return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+ }
+ 
++static bool sanitize_needed(u8 opcode)
++{
++      return opcode == BPF_ADD || opcode == BPF_SUB;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+                           struct bpf_insn *insn,
+                           const struct bpf_reg_state *ptr_reg,
+-                          struct bpf_reg_state *dst_reg,
+-                          bool off_is_neg)
++                          const struct bpf_reg_state *off_reg,
++                          struct bpf_reg_state *dst_reg)
+ {
+       struct bpf_verifier_state *vstate = env->cur_state;
+       struct bpf_insn_aux_data *aux = cur_aux(env);
++      bool off_is_neg = off_reg->smin_value < 0;
+       bool ptr_is_dst_reg = ptr_reg == dst_reg;
+       u8 opcode = BPF_OP(insn->code);
+       u32 alu_state, alu_limit;
+@@ -5427,7 +5452,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+       alu_state |= ptr_is_dst_reg ?
+                    BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ 
+-      err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
++      err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+       if (err < 0)
+               return err;
+ 
+@@ -5451,7 +5476,46 @@ do_sim:
+       ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+       if (!ptr_is_dst_reg && ret)
+               *dst_reg = tmp;
+-      return !ret ? -EFAULT : 0;
++      return !ret ? REASON_STACK : 0;
++}
++
++static int sanitize_err(struct bpf_verifier_env *env,
++                      const struct bpf_insn *insn, int reason,
++                      const struct bpf_reg_state *off_reg,
++                      const struct bpf_reg_state *dst_reg)
++{
++      static const char *err = "pointer arithmetic with it prohibited for 
!root";
++      const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
++      u32 dst = insn->dst_reg, src = insn->src_reg;
++
++      switch (reason) {
++      case REASON_BOUNDS:
++              verbose(env, "R%d has unknown scalar with mixed signed bounds, 
%s\n",
++                      off_reg == dst_reg ? dst : src, err);
++              break;
++      case REASON_TYPE:
++              verbose(env, "R%d has pointer with unsupported alu operation, 
%s\n",
++                      off_reg == dst_reg ? src : dst, err);
++              break;
++      case REASON_PATHS:
++              verbose(env, "R%d tried to %s from different maps, paths or 
scalars, %s\n",
++                      dst, op, err);
++              break;
++      case REASON_LIMIT:
++              verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
++                      dst, op, err);
++              break;
++      case REASON_STACK:
++              verbose(env, "R%d could not be pushed for speculative 
verification, %s\n",
++                      dst, err);
++              break;
++      default:
++              verbose(env, "verifier internal error: unknown reason (%d)\n",
++                      reason);
++              break;
++      }
++
++      return -EACCES;
+ }
+ 
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and 
var_off.
+@@ -5472,8 +5536,8 @@ static int adjust_ptr_min_max_vals(struct 
bpf_verifier_env *env,
+           smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+       u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+           umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+-      u32 dst = insn->dst_reg, src = insn->src_reg;
+       u8 opcode = BPF_OP(insn->code);
++      u32 dst = insn->dst_reg;
+       int ret;
+ 
+       dst_reg = &regs[dst];
+@@ -5521,13 +5585,6 @@ static int adjust_ptr_min_max_vals(struct 
bpf_verifier_env *env,
+               verbose(env, "R%d pointer arithmetic on %s prohibited\n",
+                       dst, reg_type_str[ptr_reg->type]);
+               return -EACCES;
+-      case PTR_TO_MAP_VALUE:
+-              if (!env->allow_ptr_leaks && !known && (smin_val < 0) != 
(smax_val < 0)) {
+-                      verbose(env, "R%d has unknown scalar with mixed signed 
bounds, pointer arithmetic with it prohibited for !root\n",
+-                              off_reg == dst_reg ? dst : src);
+-                      return -EACCES;
+-              }
+-              fallthrough;
+       default:
+               break;
+       }
+@@ -5547,11 +5604,10 @@ static int adjust_ptr_min_max_vals(struct 
bpf_verifier_env *env,
+ 
+       switch (opcode) {
+       case BPF_ADD:
+-              ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 
0);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to add from different maps, 
paths, or prohibited types\n", dst);
+-                      return ret;
+-              }
++              ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+               /* We can take a fixed offset as long as it doesn't overflow
+                * the s32 'off' field
+                */
+@@ -5602,11 +5658,10 @@ static int adjust_ptr_min_max_vals(struct 
bpf_verifier_env *env,
+               }
+               break;
+       case BPF_SUB:
+-              ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 
0);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to sub from different maps, 
paths, or prohibited types\n", dst);
+-                      return ret;
+-              }
++              ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+               if (dst_reg == off_reg) {
+                       /* scalar -= pointer.  Creates an unknown scalar */
+                       verbose(env, "R%d tried to subtract pointer from 
scalar\n",
+@@ -6296,9 +6351,8 @@ static int adjust_scalar_min_max_vals(struct 
bpf_verifier_env *env,
+       s32 s32_min_val, s32_max_val;
+       u32 u32_min_val, u32_max_val;
+       u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+-      u32 dst = insn->dst_reg;
+-      int ret;
+       bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
++      int ret;
+ 
+       smin_val = src_reg.smin_value;
+       smax_val = src_reg.smax_value;
+@@ -6340,6 +6394,12 @@ static int adjust_scalar_min_max_vals(struct 
bpf_verifier_env *env,
+               return 0;
+       }
+ 
++      if (sanitize_needed(opcode)) {
++              ret = sanitize_val_alu(env, insn);
++              if (ret < 0)
++                      return sanitize_err(env, insn, ret, NULL, NULL);
++      }
++
+       /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
+        * There are two classes of instructions: The first class we track both
+        * alu32 and alu64 sign/unsigned bounds independently this provides the
+@@ -6356,21 +6416,11 @@ static int adjust_scalar_min_max_vals(struct 
bpf_verifier_env *env,
+        */
+       switch (opcode) {
+       case BPF_ADD:
+-              ret = sanitize_val_alu(env, insn);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to add from different pointers 
or scalars\n", dst);
+-                      return ret;
+-              }
+               scalar32_min_max_add(dst_reg, &src_reg);
+               scalar_min_max_add(dst_reg, &src_reg);
+               dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+               break;
+       case BPF_SUB:
+-              ret = sanitize_val_alu(env, insn);
+-              if (ret < 0) {
+-                      verbose(env, "R%d tried to sub from different pointers 
or scalars\n", dst);
+-                      return ret;
+-              }
+               scalar32_min_max_sub(dst_reg, &src_reg);
+               scalar_min_max_sub(dst_reg, &src_reg);
+               dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index eead7efbe7e5d..38d7c03e694cd 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
+               /* Debug-check: all keys must be persistent! */
+               debug_locks_off();
+               pr_err("INFO: trying to register non-static key.\n");
+-              pr_err("the code is fine but needs lockdep annotation.\n");
++              pr_err("The code is fine but needs lockdep annotation, or 
maybe\n");
++              pr_err("you didn't initialize this object before use?\n");
+               pr_err("turning off the locking correctness validator.\n");
+               dump_stack();
+               return false;
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index c789b39ed5271..dcf4a9028e165 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1302,7 +1302,7 @@ config LOCKDEP
+       bool
+       depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
+       select STACKTRACE
+-      select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE 
&& !ARC && !X86
++      depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || 
ARC || X86
+       select KALLSYMS
+       select KALLSYMS_ALL
+ 
+@@ -1596,7 +1596,7 @@ config LATENCYTOP
+       depends on DEBUG_KERNEL
+       depends on STACKTRACE_SUPPORT
+       depends on PROC_FS
+-      select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM 
&& !ARC && !X86
++      depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || 
ARC || X86
+       select KALLSYMS
+       select KALLSYMS_ALL
+       select STACKTRACE
+@@ -1849,7 +1849,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
+       depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
+       depends on !X86_64
+       select STACKTRACE
+-      select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM 
&& !ARC && !X86
++      depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || 
ARC || X86
+       help
+         Provide stacktrace filter for fault-injection capabilities
+ 
+diff --git a/mm/ptdump.c b/mm/ptdump.c
+index ba88ec43ff218..93f2f63dc52dc 100644
+--- a/mm/ptdump.c
++++ b/mm/ptdump.c
+@@ -108,7 +108,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
+                           unsigned long next, struct mm_walk *walk)
+ {
+       struct ptdump_state *st = walk->private;
+-      pte_t val = READ_ONCE(*pte);
++      pte_t val = ptep_get(pte);
+ 
+       if (st->effective_prot)
+               st->effective_prot(st, 4, pte_val(val));
+diff --git a/net/bridge/netfilter/ebtable_broute.c 
b/net/bridge/netfilter/ebtable_broute.c
+index 66e7af1654943..32bc2821027f3 100644
+--- a/net/bridge/netfilter/ebtable_broute.c
++++ b/net/bridge/netfilter/ebtable_broute.c
+@@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
+                                 &net->xt.broute_table);
+ }
+ 
++static void __net_exit broute_net_pre_exit(struct net *net)
++{
++      ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
++}
++
+ static void __net_exit broute_net_exit(struct net *net)
+ {
+-      ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
++      ebt_unregister_table(net, net->xt.broute_table);
+ }
+ 
+ static struct pernet_operations broute_net_ops = {
+       .init = broute_net_init,
+       .exit = broute_net_exit,
++      .pre_exit = broute_net_pre_exit,
+ };
+ 
+ static int __init ebtable_broute_init(void)
+diff --git a/net/bridge/netfilter/ebtable_filter.c 
b/net/bridge/netfilter/ebtable_filter.c
+index 78cb9b21022d0..bcf982e12f16b 100644
+--- a/net/bridge/netfilter/ebtable_filter.c
++++ b/net/bridge/netfilter/ebtable_filter.c
+@@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net 
*net)
+                                 &net->xt.frame_filter);
+ }
+ 
++static void __net_exit frame_filter_net_pre_exit(struct net *net)
++{
++      ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
++}
++
+ static void __net_exit frame_filter_net_exit(struct net *net)
+ {
+-      ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
++      ebt_unregister_table(net, net->xt.frame_filter);
+ }
+ 
+ static struct pernet_operations frame_filter_net_ops = {
+       .init = frame_filter_net_init,
+       .exit = frame_filter_net_exit,
++      .pre_exit = frame_filter_net_pre_exit,
+ };
+ 
+ static int __init ebtable_filter_init(void)
+diff --git a/net/bridge/netfilter/ebtable_nat.c 
b/net/bridge/netfilter/ebtable_nat.c
+index 0888936ef8537..0d092773f8161 100644
+--- a/net/bridge/netfilter/ebtable_nat.c
++++ b/net/bridge/netfilter/ebtable_nat.c
+@@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
+                                 &net->xt.frame_nat);
+ }
+ 
++static void __net_exit frame_nat_net_pre_exit(struct net *net)
++{
++      ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
++}
++
+ static void __net_exit frame_nat_net_exit(struct net *net)
+ {
+-      ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
++      ebt_unregister_table(net, net->xt.frame_nat);
+ }
+ 
+ static struct pernet_operations frame_nat_net_ops = {
+       .init = frame_nat_net_init,
+       .exit = frame_nat_net_exit,
++      .pre_exit = frame_nat_net_pre_exit,
+ };
+ 
+ static int __init ebtable_nat_init(void)
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index ebe33b60efd6b..d481ff24a1501 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1232,10 +1232,34 @@ out:
+       return ret;
+ }
+ 
+-void ebt_unregister_table(struct net *net, struct ebt_table *table,
+-                        const struct nf_hook_ops *ops)
++static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
++{
++      struct ebt_table *t;
++
++      mutex_lock(&ebt_mutex);
++
++      list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
++              if (strcmp(t->name, name) == 0) {
++                      mutex_unlock(&ebt_mutex);
++                      return t;
++              }
++      }
++
++      mutex_unlock(&ebt_mutex);
++      return NULL;
++}
++
++void ebt_unregister_table_pre_exit(struct net *net, const char *name, const 
struct nf_hook_ops *ops)
++{
++      struct ebt_table *table = __ebt_find_table(net, name);
++
++      if (table)
++              nf_unregister_net_hooks(net, ops, 
hweight32(table->valid_hooks));
++}
++EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
++
++void ebt_unregister_table(struct net *net, struct ebt_table *table)
+ {
+-      nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+       __ebt_unregister_table(net, table);
+ }
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 62ff7121b22d3..64f4c7ec729dc 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5867,7 +5867,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
+       NAPI_GRO_CB(skb)->frag0_len = 0;
+ 
+       if (!skb_headlen(skb) && pinfo->nr_frags &&
+-          !PageHighMem(skb_frag_page(frag0))) {
++          !PageHighMem(skb_frag_page(frag0)) &&
++          (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
+               NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+               NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+                                                   skb_frag_size(frag0),
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 2fe4bbb6b80cf..8339978d46ff8 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1380,7 +1380,7 @@ static int __neigh_update(struct neighbour *neigh, const 
u8 *lladdr,
+                        * we can reinject the packet there.
+                        */
+                       n2 = NULL;
+-                      if (dst) {
++                      if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
+                               n2 = dst_neigh_lookup_skb(dst, skb);
+                               if (n2)
+                                       n1 = n2;
+diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
+index 09998dc5c185f..d4ac02718b72a 100644
+--- a/net/ethtool/pause.c
++++ b/net/ethtool/pause.c
+@@ -38,16 +38,16 @@ static int pause_prepare_data(const struct ethnl_req_info 
*req_base,
+       if (!dev->ethtool_ops->get_pauseparam)
+               return -EOPNOTSUPP;
+ 
++      ethtool_stats_init((u64 *)&data->pausestat,
++                         sizeof(data->pausestat) / 8);
++
+       ret = ethnl_ops_begin(dev);
+       if (ret < 0)
+               return ret;
+       dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
+       if (req_base->flags & ETHTOOL_FLAG_STATS &&
+-          dev->ethtool_ops->get_pause_stats) {
+-              ethtool_stats_init((u64 *)&data->pausestat,
+-                                 sizeof(data->pausestat) / 8);
++          dev->ethtool_ops->get_pause_stats)
+               dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
+-      }
+       ethnl_ops_complete(dev);
+ 
+       return 0;
+diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
+index d1b6a9665b170..f0b47d43c9f6e 100644
+--- a/net/ieee802154/nl802154.c
++++ b/net/ieee802154/nl802154.c
+@@ -1498,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct 
netlink_callback *cb)
+       if (err)
+               return err;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++              err = skb->len;
++              goto out_err;
++      }
++
+       if (!wpan_dev->netdev) {
+               err = -EINVAL;
+               goto out_err;
+@@ -1552,6 +1557,9 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, 
struct genl_info *info)
+       struct ieee802154_llsec_key_id id = { };
+       u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, 
info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+               return -EINVAL;
+@@ -1601,6 +1609,9 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, 
struct genl_info *info)
+       struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+       struct ieee802154_llsec_key_id id;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, 
info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+               return -EINVAL;
+@@ -1666,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct 
netlink_callback *cb)
+       if (err)
+               return err;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++              err = skb->len;
++              goto out_err;
++      }
++
+       if (!wpan_dev->netdev) {
+               err = -EINVAL;
+               goto out_err;
+@@ -1752,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, 
struct genl_info *info)
+       struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+       struct ieee802154_llsec_device dev_desc;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
+                                         &dev_desc) < 0)
+               return -EINVAL;
+@@ -1767,6 +1786,9 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, 
struct genl_info *info)
+       struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+       __le64 extended_addr;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, 
info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+               return -EINVAL;
+@@ -1836,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct 
netlink_callback *cb)
+       if (err)
+               return err;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++              err = skb->len;
++              goto out_err;
++      }
++
+       if (!wpan_dev->netdev) {
+               err = -EINVAL;
+               goto out_err;
+@@ -1893,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff 
*skb, struct genl_info *info
+       struct ieee802154_llsec_device_key key;
+       __le64 extended_addr;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, 
info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 
0)
+               return -EINVAL;
+@@ -1924,6 +1954,9 @@ static int nl802154_del_llsec_devkey(struct sk_buff 
*skb, struct genl_info *info
+       struct ieee802154_llsec_device_key key;
+       __le64 extended_addr;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, 
info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+               return -EINVAL;
+@@ -1998,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, 
struct netlink_callback *cb)
+       if (err)
+               return err;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
++              err = skb->len;
++              goto out_err;
++      }
++
+       if (!wpan_dev->netdev) {
+               err = -EINVAL;
+               goto out_err;
+@@ -2082,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff 
*skb,
+       struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+       struct ieee802154_llsec_seclevel sl;
+ 
++      if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
++              return -EOPNOTSUPP;
++
+       if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+                                &sl) < 0)
+               return -EINVAL;
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index e0093411d85d6..d6d45d820d79a 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1541,10 +1541,15 @@ out_free:
+       return ret;
+ }
+ 
+-void arpt_unregister_table(struct net *net, struct xt_table *table,
+-                         const struct nf_hook_ops *ops)
++void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
++                                  const struct nf_hook_ops *ops)
+ {
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
++}
++EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
++
++void arpt_unregister_table(struct net *net, struct xt_table *table)
++{
+       __arpt_unregister_table(net, table);
+ }
+ 
+diff --git a/net/ipv4/netfilter/arptable_filter.c 
b/net/ipv4/netfilter/arptable_filter.c
+index c216b9ad3bb24..6c300ba5634e2 100644
+--- a/net/ipv4/netfilter/arptable_filter.c
++++ b/net/ipv4/netfilter/arptable_filter.c
+@@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct 
net *net)
+       return err;
+ }
+ 
++static void __net_exit arptable_filter_net_pre_exit(struct net *net)
++{
++      if (net->ipv4.arptable_filter)
++              arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
++                                             arpfilter_ops);
++}
++
+ static void __net_exit arptable_filter_net_exit(struct net *net)
+ {
+       if (!net->ipv4.arptable_filter)
+               return;
+-      arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
++      arpt_unregister_table(net, net->ipv4.arptable_filter);
+       net->ipv4.arptable_filter = NULL;
+ }
+ 
+ static struct pernet_operations arptable_filter_net_ops = {
+       .exit = arptable_filter_net_exit,
++      .pre_exit = arptable_filter_net_pre_exit,
+ };
+ 
+ static int __init arptable_filter_init(void)
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 3e5f4f2e705e8..08829809e88b7 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -1369,9 +1369,19 @@ static __net_init int ipv4_sysctl_init_net(struct net 
*net)
+               if (!table)
+                       goto err_alloc;
+ 
+-              /* Update the variables to point into the current struct net */
+-              for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
+-                      table[i].data += (void *)net - (void *)&init_net;
++              for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
++                      if (table[i].data) {
++                              /* Update the variables to point into
++                               * the current struct net
++                               */
++                              table[i].data += (void *)net - (void 
*)&init_net;
++                      } else {
++                              /* Entries without data pointer are global;
++                               * Make them read-only in non-init_net ns
++                               */
++                              table[i].mode &= ~0222;
++                      }
++              }
+       }
+ 
+       net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 5d27b5c631217..ecc1abfca0650 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -2275,6 +2275,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct 
net *net, struct list_head
+                       t = rtnl_dereference(t->next);
+               }
+       }
++
++      t = rtnl_dereference(ip6n->tnls_wc[0]);
++      while (t) {
++              /* If dev is in the same netns, it has already
++               * been added to the list by the previous loop.
++               */
++              if (!net_eq(dev_net(t->dev), net))
++                      unregister_netdevice_queue(t->dev, list);
++              t = rtnl_dereference(t->next);
++      }
+ }
+ 
+ static int __net_init ip6_tnl_init_net(struct net *net)
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index b26f469a3fb8c..146ba7fa5bf62 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1867,9 +1867,9 @@ static void __net_exit sit_destroy_tunnels(struct net 
*net,
+               if (dev->rtnl_link_ops == &sit_link_ops)
+                       unregister_netdevice_queue(dev, head);
+ 
+-      for (prio = 1; prio < 4; prio++) {
++      for (prio = 0; prio < 4; prio++) {
+               int h;
+-              for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
++              for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
+                       struct ip_tunnel *t;
+ 
+                       t = rtnl_dereference(sitn->tunnels[prio][h]);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 2bf6271d9e3f6..6a96deded7632 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1789,8 +1789,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
+               }
+ 
+               if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-                  sta->sdata->u.vlan.sta)
++                  sta->sdata->u.vlan.sta) {
++                      ieee80211_clear_fast_rx(sta);
+                       RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
++              }
+ 
+               if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       ieee80211_vif_dec_num_mcast(sta->sdata);
+diff --git a/net/netfilter/nf_conntrack_standalone.c 
b/net/netfilter/nf_conntrack_standalone.c
+index 0ee702d374b02..c6c0cb4656645 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
+       case IPPROTO_GRE: return "gre";
+       case IPPROTO_SCTP: return "sctp";
+       case IPPROTO_UDPLITE: return "udplite";
++      case IPPROTO_ICMPV6: return "icmpv6";
+       }
+ 
+       return "unknown";
+diff --git a/net/netfilter/nf_flow_table_offload.c 
b/net/netfilter/nf_flow_table_offload.c
+index 2a6993fa40d78..1c5460e7bce87 100644
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule 
*flow_rule,
+                                    const __be32 *addr, const __be32 *mask)
+ {
+       struct flow_action_entry *entry;
+-      int i;
++      int i, j;
+ 
+-      for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += 
sizeof(u32)) {
++      for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += 
sizeof(u32), j++) {
+               entry = flow_action_entry_next(flow_rule);
+               flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
+-                                  offset + i, &addr[i], mask);
++                                  offset + i, &addr[j], mask);
+       }
+ }
+ 
+diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
+index 0e2c315c3b5ed..82ec27bdf9412 100644
+--- a/net/netfilter/nft_limit.c
++++ b/net/netfilter/nft_limit.c
+@@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
+               return -EOVERFLOW;
+ 
+       if (pkts) {
+-              tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
++              tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
+       } else {
+               /* The token bucket size limits the number of tokens can be
+                * accumulated. tokens_max specifies the bucket size.
+                * tokens_max = unit * (rate + burst) / rate.
+                */
+-              tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
++              tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
+                                limit->rate);
+       }
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 53d0a4161df3f..9463c54c465af 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1520,11 +1520,9 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+       /* Supposedly, no process has access to the socket, but
+        * the net layers still may.
+-       * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+-       * held and that should be grabbed before socket lock.
+        */
+-      spin_lock_bh(&net->sctp.addr_wq_lock);
+-      bh_lock_sock_nested(sk);
++      local_bh_disable();
++      bh_lock_sock(sk);
+ 
+       /* Hold the sock, since sk_common_release() will put sock_put()
+        * and we have just a little more cleanup.
+@@ -1533,7 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
+       sk_common_release(sk);
+ 
+       bh_unlock_sock(sk);
+-      spin_unlock_bh(&net->sctp.addr_wq_lock);
++      local_bh_enable();
+ 
+       sock_put(sk);
+ 
+@@ -4939,9 +4937,6 @@ static int sctp_init_sock(struct sock *sk)
+       sk_sockets_allocated_inc(sk);
+       sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 
+-      /* Nothing can fail after this block, otherwise
+-       * sctp_destroy_sock() will be called without addr_wq_lock held
+-       */
+       if (net->sctp.default_auto_asconf) {
+               spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+               list_add_tail(&sp->auto_asconf_list,
+@@ -4976,7 +4971,9 @@ static void sctp_destroy_sock(struct sock *sk)
+ 
+       if (sp->do_auto_asconf) {
+               sp->do_auto_asconf = 0;
++              spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+               list_del(&sp->auto_asconf_list);
++              spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+       }
+       sctp_endpoint_free(sp->ep);
+       local_bh_disable();
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index b81ca117dac7a..e4cb0ff4dcf41 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, 
struct sk_buff *skb)
+ {
+       int err;
+ 
++      if (x->outer_mode.encap == XFRM_MODE_BEET &&
++          ip_is_fragment(ip_hdr(skb))) {
++              net_warn_ratelimited("BEET mode doesn't support inner IPv4 
fragments\n");
++              return -EAFNOSUPPORT;
++      }
++
+       err = xfrm4_tunnel_check_size(skb);
+       if (err)
+               return err;
+@@ -705,8 +711,15 @@ out:
+ static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ #if IS_ENABLED(CONFIG_IPV6)
++      unsigned int ptr = 0;
+       int err;
+ 
++      if (x->outer_mode.encap == XFRM_MODE_BEET &&
++          ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
++              net_warn_ratelimited("BEET mode doesn't support inner IPv6 
fragments\n");
++              return -EAFNOSUPPORT;
++      }
++
+       err = xfrm6_tunnel_check_size(skb);
+       if (err)
+               return err;
+diff --git a/sound/soc/codecs/max98373-i2c.c b/sound/soc/codecs/max98373-i2c.c
+index 92921e34f9486..32b0c1d983650 100644
+--- a/sound/soc/codecs/max98373-i2c.c
++++ b/sound/soc/codecs/max98373-i2c.c
+@@ -440,6 +440,7 @@ static bool max98373_volatile_reg(struct device *dev, 
unsigned int reg)
+       case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+       case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+       case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
++      case MAX98373_R20FF_GLOBAL_SHDN:
+       case MAX98373_R21FF_REV_ID:
+               return true;
+       default:
+diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
+index fa589d834f9aa..14fd2f9a0bf3a 100644
+--- a/sound/soc/codecs/max98373-sdw.c
++++ b/sound/soc/codecs/max98373-sdw.c
+@@ -214,6 +214,7 @@ static bool max98373_volatile_reg(struct device *dev, 
unsigned int reg)
+       case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+       case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+       case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
++      case MAX98373_R20FF_GLOBAL_SHDN:
+       case MAX98373_R21FF_REV_ID:
+       /* SoundWire Control Port Registers */
+       case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
+diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
+index 929bb1798c43f..1fd4dbbb4ecf4 100644
+--- a/sound/soc/codecs/max98373.c
++++ b/sound/soc/codecs/max98373.c
+@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget 
*w,
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R20FF_GLOBAL_SHDN,
+                       MAX98373_GLOBAL_EN_MASK, 1);
++              usleep_range(30000, 31000);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               regmap_update_bits(max98373->regmap,
+                       MAX98373_R20FF_GLOBAL_SHDN,
+                       MAX98373_GLOBAL_EN_MASK, 0);
++              usleep_range(30000, 31000);
+               max98373->tdm_mode = false;
+               break;
+       default:
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index 39637ca78cdbb..9f5f217a96077 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -524,11 +524,13 @@ static int fsl_esai_startup(struct snd_pcm_substream 
*substream,
+                                  ESAI_SAICR_SYNC, esai_priv->synchronous ?
+                                  ESAI_SAICR_SYNC : 0);
+ 
+-              /* Set a default slot number -- 2 */
++              /* Set slots count */
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
+-                                 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
++                                 ESAI_xCCR_xDC_MASK,
++                                 ESAI_xCCR_xDC(esai_priv->slots));
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
+-                                 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
++                                 ESAI_xCCR_xDC_MASK,
++                                 ESAI_xCCR_xDC(esai_priv->slots));
+       }
+ 
+       return 0;
+diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h
+index 637189ec1ab99..d30439b4b8ab4 100644
+--- a/tools/include/uapi/asm/errno.h
++++ b/tools/include/uapi/asm/errno.h
+@@ -9,8 +9,6 @@
+ #include "../../../arch/alpha/include/uapi/asm/errno.h"
+ #elif defined(__mips__)
+ #include "../../../arch/mips/include/uapi/asm/errno.h"
+-#elif defined(__ia64__)
+-#include "../../../arch/ia64/include/uapi/asm/errno.h"
+ #elif defined(__xtensa__)
+ #include "../../../arch/xtensa/include/uapi/asm/errno.h"
+ #else
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index 5f7b85fba39d0..7150e34cf2afb 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -703,18 +703,19 @@ int xsk_socket__create_shared(struct xsk_socket 
**xsk_ptr,
+                             struct xsk_ring_cons *comp,
+                             const struct xsk_socket_config *usr_config)
+ {
++      bool unmap, rx_setup_done = false, tx_setup_done = false;
+       void *rx_map = NULL, *tx_map = NULL;
+       struct sockaddr_xdp sxdp = {};
+       struct xdp_mmap_offsets off;
+       struct xsk_socket *xsk;
+       struct xsk_ctx *ctx;
+       int err, ifindex;
+-      bool unmap = umem->fill_save != fill;
+-      bool rx_setup_done = false, tx_setup_done = false;
+ 
+       if (!umem || !xsk_ptr || !(rx || tx))
+               return -EFAULT;
+ 
++      unmap = umem->fill_save != fill;
++
+       xsk = calloc(1, sizeof(*xsk));
+       if (!xsk)
+               return -ENOMEM;

Reply via email to