commit:     7a397352aed0d1e1fdec8f3eb9b2c26f8d4620ff
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 19 19:52:50 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 19 19:52:50 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7a397352

Linux patch 4.14.112

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1111_linux-4.14.112.patch | 2505 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2509 insertions(+)

diff --git a/0000_README b/0000_README
index f77da1f..ea26cf5 100644
--- a/0000_README
+++ b/0000_README
@@ -487,6 +487,10 @@ Patch:  1110_4.14.111.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.14.111
 
+Patch:  1111_4.14.112.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.14.112
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1111_linux-4.14.112.patch b/1111_linux-4.14.112.patch
new file mode 100644
index 0000000..1ad2301
--- /dev/null
+++ b/1111_linux-4.14.112.patch
@@ -0,0 +1,2505 @@
+diff --git a/Makefile b/Makefile
+index da223c660c9a..94673d2a6a27 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 111
++SUBLEVEL = 112
+ EXTRAVERSION =
+ NAME = Petit Gorille
+ 
+@@ -480,7 +480,7 @@ endif
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+ CLANG_FLAGS   := --target=$(notdir $(CROSS_COMPILE:%-=%))
+-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
+ CLANG_FLAGS   += --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+ endif
+diff --git a/arch/arm/boot/dts/am335x-evm.dts 
b/arch/arm/boot/dts/am335x-evm.dts
+index ddd897556e03..478434ebff92 100644
+--- a/arch/arm/boot/dts/am335x-evm.dts
++++ b/arch/arm/boot/dts/am335x-evm.dts
+@@ -57,6 +57,24 @@
+               enable-active-high;
+       };
+ 
++      /* TPS79501 */
++      v1_8d_reg: fixedregulator-v1_8d {
++              compatible = "regulator-fixed";
++              regulator-name = "v1_8d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <1800000>;
++              regulator-max-microvolt = <1800000>;
++      };
++
++      /* TPS79501 */
++      v3_3d_reg: fixedregulator-v3_3d {
++              compatible = "regulator-fixed";
++              regulator-name = "v3_3d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <3300000>;
++              regulator-max-microvolt = <3300000>;
++      };
++
+       matrix_keypad: matrix_keypad0 {
+               compatible = "gpio-matrix-keypad";
+               debounce-delay-ms = <5>;
+@@ -492,10 +510,10 @@
+               status = "okay";
+ 
+               /* Regulators */
+-              AVDD-supply = <&vaux2_reg>;
+-              IOVDD-supply = <&vaux2_reg>;
+-              DRVDD-supply = <&vaux2_reg>;
+-              DVDD-supply = <&vbat>;
++              AVDD-supply = <&v3_3d_reg>;
++              IOVDD-supply = <&v3_3d_reg>;
++              DRVDD-supply = <&v3_3d_reg>;
++              DVDD-supply = <&v1_8d_reg>;
+       };
+ };
+ 
+diff --git a/arch/arm/boot/dts/am335x-evmsk.dts 
b/arch/arm/boot/dts/am335x-evmsk.dts
+index 9ba4b18c0cb2..bbd828892fcb 100644
+--- a/arch/arm/boot/dts/am335x-evmsk.dts
++++ b/arch/arm/boot/dts/am335x-evmsk.dts
+@@ -73,6 +73,24 @@
+               enable-active-high;
+       };
+ 
++      /* TPS79518 */
++      v1_8d_reg: fixedregulator-v1_8d {
++              compatible = "regulator-fixed";
++              regulator-name = "v1_8d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <1800000>;
++              regulator-max-microvolt = <1800000>;
++      };
++
++      /* TPS78633 */
++      v3_3d_reg: fixedregulator-v3_3d {
++              compatible = "regulator-fixed";
++              regulator-name = "v3_3d";
++              vin-supply = <&vbat>;
++              regulator-min-microvolt = <3300000>;
++              regulator-max-microvolt = <3300000>;
++      };
++
+       leds {
+               pinctrl-names = "default";
+               pinctrl-0 = <&user_leds_s0>;
+@@ -493,10 +511,10 @@
+               status = "okay";
+ 
+               /* Regulators */
+-              AVDD-supply = <&vaux2_reg>;
+-              IOVDD-supply = <&vaux2_reg>;
+-              DRVDD-supply = <&vaux2_reg>;
+-              DVDD-supply = <&vbat>;
++              AVDD-supply = <&v3_3d_reg>;
++              IOVDD-supply = <&v3_3d_reg>;
++              DRVDD-supply = <&v3_3d_reg>;
++              DVDD-supply = <&v1_8d_reg>;
+       };
+ };
+ 
+diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h 
b/arch/arm/boot/dts/sama5d2-pinfunc.h
+index e57191fb83de..9daa6dfd71e0 100644
+--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
++++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
+@@ -518,7 +518,7 @@
+ #define PIN_PC9__GPIO                 PINMUX_PIN(PIN_PC9, 0, 0)
+ #define PIN_PC9__FIQ                  PINMUX_PIN(PIN_PC9, 1, 3)
+ #define PIN_PC9__GTSUCOMP             PINMUX_PIN(PIN_PC9, 2, 1)
+-#define PIN_PC9__ISC_D0                       PINMUX_PIN(PIN_PC9, 2, 1)
++#define PIN_PC9__ISC_D0                       PINMUX_PIN(PIN_PC9, 3, 1)
+ #define PIN_PC9__TIOA4                        PINMUX_PIN(PIN_PC9, 4, 2)
+ #define PIN_PC10                      74
+ #define PIN_PC10__GPIO                        PINMUX_PIN(PIN_PC10, 0, 0)
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts 
b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+index 28257724a56e..e720f40bbd5d 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+@@ -82,8 +82,7 @@
+ 
+       vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
+               compatible = "regulator-fixed";
+-              enable-active-high;
+-              gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
++              gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&usb20_host_drv>;
+               regulator-name = "vcc_host1_5v";
+@@ -275,7 +274,7 @@
+ 
+       usb2 {
+               usb20_host_drv: usb20-host-drv {
+-                      rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
++                      rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
+ 
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi 
b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index efac2202b16e..f6b4b8f0260f 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -1333,11 +1333,11 @@
+ 
+               sdmmc0 {
+                       sdmmc0_clk: sdmmc0-clk {
+-                              rockchip,pins = <1 RK_PA6 1 
&pcfg_pull_none_4ma>;
++                              rockchip,pins = <1 RK_PA6 1 
&pcfg_pull_none_8ma>;
+                       };
+ 
+                       sdmmc0_cmd: sdmmc0-cmd {
+-                              rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
++                              rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
+                       };
+ 
+                       sdmmc0_dectn: sdmmc0-dectn {
+@@ -1349,14 +1349,14 @@
+                       };
+ 
+                       sdmmc0_bus1: sdmmc0-bus1 {
+-                              rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
++                              rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
+                       };
+ 
+                       sdmmc0_bus4: sdmmc0-bus4 {
+-                              rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
+-                                              <1 RK_PA1 1 &pcfg_pull_up_4ma>,
+-                                              <1 RK_PA2 1 &pcfg_pull_up_4ma>,
+-                                              <1 RK_PA3 1 &pcfg_pull_up_4ma>;
++                              rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
++                                              <1 RK_PA1 1 &pcfg_pull_up_8ma>,
++                                              <1 RK_PA2 1 &pcfg_pull_up_8ma>,
++                                              <1 RK_PA3 1 &pcfg_pull_up_8ma>;
+                       };
+ 
+                       sdmmc0_gpio: sdmmc0-gpio {
+@@ -1530,50 +1530,50 @@
+                       rgmiim1_pins: rgmiim1-pins {
+                               rockchip,pins =
+                                       /* mac_txclk */
+-                                      <1 RK_PB4 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PB4 2 &pcfg_pull_none_8ma>,
+                                       /* mac_rxclk */
+-                                      <1 RK_PB5 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB5 2 &pcfg_pull_none_4ma>,
+                                       /* mac_mdio */
+-                                      <1 RK_PC3 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC3 2 &pcfg_pull_none_4ma>,
+                                       /* mac_txen */
+-                                      <1 RK_PD1 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PD1 2 &pcfg_pull_none_8ma>,
+                                       /* mac_clk */
+-                                      <1 RK_PC5 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC5 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxdv */
+-                                      <1 RK_PC6 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC6 2 &pcfg_pull_none_4ma>,
+                                       /* mac_mdc */
+-                                      <1 RK_PC7 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PC7 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxd1 */
+-                                      <1 RK_PB2 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB2 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxd0 */
+-                                      <1 RK_PB3 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB3 2 &pcfg_pull_none_4ma>,
+                                       /* mac_txd1 */
+-                                      <1 RK_PB0 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PB0 2 &pcfg_pull_none_8ma>,
+                                       /* mac_txd0 */
+-                                      <1 RK_PB1 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PB1 2 &pcfg_pull_none_8ma>,
+                                       /* mac_rxd3 */
+-                                      <1 RK_PB6 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB6 2 &pcfg_pull_none_4ma>,
+                                       /* mac_rxd2 */
+-                                      <1 RK_PB7 2 &pcfg_pull_none_2ma>,
++                                      <1 RK_PB7 2 &pcfg_pull_none_4ma>,
+                                       /* mac_txd3 */
+-                                      <1 RK_PC0 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PC0 2 &pcfg_pull_none_8ma>,
+                                       /* mac_txd2 */
+-                                      <1 RK_PC1 2 &pcfg_pull_none_12ma>,
++                                      <1 RK_PC1 2 &pcfg_pull_none_8ma>,
+ 
+                                       /* mac_txclk */
+-                                      <0 RK_PB0 1 &pcfg_pull_none>,
++                                      <0 RK_PB0 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txen */
+-                                      <0 RK_PB4 1 &pcfg_pull_none>,
++                                      <0 RK_PB4 1 &pcfg_pull_none_8ma>,
+                                       /* mac_clk */
+-                                      <0 RK_PD0 1 &pcfg_pull_none>,
++                                      <0 RK_PD0 1 &pcfg_pull_none_4ma>,
+                                       /* mac_txd1 */
+-                                      <0 RK_PC0 1 &pcfg_pull_none>,
++                                      <0 RK_PC0 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txd0 */
+-                                      <0 RK_PC1 1 &pcfg_pull_none>,
++                                      <0 RK_PC1 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txd3 */
+-                                      <0 RK_PC7 1 &pcfg_pull_none>,
++                                      <0 RK_PC7 1 &pcfg_pull_none_8ma>,
+                                       /* mac_txd2 */
+-                                      <0 RK_PC6 1 &pcfg_pull_none>;
++                                      <0 RK_PC6 1 &pcfg_pull_none_8ma>;
+                       };
+ 
+                       rmiim1_pins: rmiim1-pins {
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 07fe2479d310..b447b4db423a 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -30,8 +30,8 @@ do {                                                         
        \
+ "     prfm    pstl1strm, %2\n"                                        \
+ "1:   ldxr    %w1, %2\n"                                              \
+       insn "\n"                                                       \
+-"2:   stlxr   %w3, %w0, %2\n"                                         \
+-"     cbnz    %w3, 1b\n"                                              \
++"2:   stlxr   %w0, %w3, %2\n"                                         \
++"     cbnz    %w0, 1b\n"                                              \
+ "     dmb     ish\n"                                                  \
+ "3:\n"                                                                        
\
+ "     .pushsection .fixup,\"ax\"\n"                                   \
+@@ -50,30 +50,30 @@ do {                                                       
                \
+ static inline int
+ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
+ {
+-      int oldval = 0, ret, tmp;
++      int oldval, ret, tmp;
+       u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+ 
+       pagefault_disable();
+ 
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op("mov  %w0, %w4",
++              __futex_atomic_op("mov  %w3, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op("add  %w0, %w1, %w4",
++              __futex_atomic_op("add  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_OR:
+-              __futex_atomic_op("orr  %w0, %w1, %w4",
++              __futex_atomic_op("orr  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+-              __futex_atomic_op("and  %w0, %w1, %w4",
++              __futex_atomic_op("and  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, ~oparg);
+               break;
+       case FUTEX_OP_XOR:
+-              __futex_atomic_op("eor  %w0, %w1, %w4",
++              __futex_atomic_op("eor  %w3, %w1, %w4",
+                                 ret, oldval, uaddr, tmp, oparg);
+               break;
+       default:
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 4fc0e958770b..4cacc33d07ce 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -145,10 +145,16 @@ static void dump_instr(const char *lvl, struct pt_regs 
*regs)
+ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+ {
+       struct stackframe frame;
+-      int skip;
++      int skip = 0;
+ 
+       pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+ 
++      if (regs) {
++              if (user_mode(regs))
++                      return;
++              skip = 1;
++      }
++
+       if (!tsk)
+               tsk = current;
+ 
+@@ -169,7 +175,6 @@ void dump_backtrace(struct pt_regs *regs, struct 
task_struct *tsk)
+       frame.graph = tsk->curr_ret_stack;
+ #endif
+ 
+-      skip = !!regs;
+       printk("Call trace:\n");
+       while (1) {
+               unsigned long stack;
+@@ -232,15 +237,13 @@ static int __die(const char *str, int err, struct 
pt_regs *regs)
+               return ret;
+ 
+       print_modules();
+-      __show_regs(regs);
+       pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
+                TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+                end_of_stack(tsk));
++      show_regs(regs);
+ 
+-      if (!user_mode(regs)) {
+-              dump_backtrace(regs, tsk);
++      if (!user_mode(regs))
+               dump_instr(KERN_EMERG, regs);
+-      }
+ 
+       return ret;
+ }
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index caa295cd5d09..9e6c822d458d 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -447,7 +447,7 @@ void __init arm64_memblock_init(void)
+                * memory spans, randomize the linear region as well.
+                */
+               if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+-                      range = range / ARM64_MEMSTART_ALIGN + 1;
++                      range /= ARM64_MEMSTART_ALIGN;
+                       memstart_addr -= ARM64_MEMSTART_ALIGN *
+                                        ((range * memstart_offset_seed) >> 16);
+               }
+diff --git a/arch/parisc/include/asm/ptrace.h 
b/arch/parisc/include/asm/ptrace.h
+index 46da07670c2b..c8f70f965e8e 100644
+--- a/arch/parisc/include/asm/ptrace.h
++++ b/arch/parisc/include/asm/ptrace.h
+@@ -22,7 +22,7 @@ unsigned long profile_pc(struct pt_regs *);
+ 
+ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ {
+-      return regs->gr[20];
++      return regs->gr[28];
+ }
+ 
+ #endif
+diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
+index cad3e8661cd6..4d712c1d64b8 100644
+--- a/arch/parisc/kernel/process.c
++++ b/arch/parisc/kernel/process.c
+@@ -209,12 +209,6 @@ void __cpuidle arch_cpu_idle(void)
+ 
+ static int __init parisc_idle_init(void)
+ {
+-      const char *marker;
+-
+-      /* check QEMU/SeaBIOS marker in PAGE0 */
+-      marker = (char *) &PAGE0->pad0;
+-      running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
+-
+       if (!running_on_qemu)
+               cpu_idle_poll_ctrl(1);
+ 
+diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
+index f7d0c3b33d70..550f80ae9c8f 100644
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -406,6 +406,9 @@ void __init start_parisc(void)
+       int ret, cpunum;
+       struct pdc_coproc_cfg coproc_cfg;
+ 
++      /* check QEMU/SeaBIOS marker in PAGE0 */
++      running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
++
+       cpunum = smp_processor_id();
+ 
+       set_firmware_width_unlocked();
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 979b9463e17b..927384d85faf 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -746,12 +746,25 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, 
unsigned long r5,
+               if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
+                                          &uc_transact->uc_mcontext))
+                       goto badframe;
+-      }
+-      else
+-      /* Fall through, for non-TM restore */
++      } else
+ #endif
+-      if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+-              goto badframe;
++      {
++              /*
++               * Fall through, for non-TM restore
++               *
++               * Unset MSR[TS] on the thread regs since MSR from user
++               * context does not have MSR active, and recheckpoint was
++               * not called since restore_tm_sigcontexts() was not called
++               * also.
++               *
++               * If not unsetting it, the code can RFID to userspace with
++               * MSR[TS] set, but without CPU in the proper state,
++               * causing a TM bad thing.
++               */
++              current->thread.regs->msr &= ~MSR_TS_MASK;
++              if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
++                      goto badframe;
++      }
+ 
+       if (restore_altstack(&uc->uc_stack))
+               goto badframe;
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index 0a550dc5c525..839015f1b0de 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -48,10 +48,8 @@ targets += $(vdso_img_sodbg)
+ 
+ export CPPFLAGS_vdso.lds += -P -C
+ 
+-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+-                      -Wl,--no-undefined \
+-                      -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
+-                      $(DISABLE_LTO)
++VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
++                      -z max-page-size=4096
+ 
+ $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+       $(call if_changed,vdso)
+@@ -103,10 +101,8 @@ CFLAGS_REMOVE_vvar.o = -pg
+ #
+ 
+ CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
+-                         -Wl,-soname=linux-vdso.so.1 \
+-                         -Wl,-z,max-page-size=4096 \
+-                         -Wl,-z,common-page-size=4096
++VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
++                         -z max-page-size=4096
+ 
+ # 64-bit objects to re-brand as x32
+ vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
+@@ -134,7 +130,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
+       $(call if_changed,vdso)
+ 
+ CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
++VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
+ 
+ # This makes sure the $(obj) subdirectory exists even though vdso32/
+ # is not a kbuild sub-make subdirectory.
+@@ -180,13 +176,13 @@ $(obj)/vdso32.so.dbg: FORCE \
+ # The DSO images are built using a special linker script.
+ #
+ quiet_cmd_vdso = VDSO    $@
+-      cmd_vdso = $(CC) -nostdlib -o $@ \
++      cmd_vdso = $(LD) -nostdlib -o $@ \
+                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+-                     -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
++                     -T $(filter %.lds,$^) $(filter %.o,$^) && \
+                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+ 
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, 
-Wl$(comma)--hash-style=both) \
+-      $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
++VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
++      $(call ld-option, --build-id) -Bsymbolic
+ GCOV_PROFILE := n
+ 
+ #
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index c84584bb9402..3e5dd85b019a 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -3,10 +3,14 @@
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+ #include <asm/apicdef.h>
++#include <asm/nmi.h>
+ 
+ #include "../perf_event.h"
+ 
++static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
++
+ static __initconst const u64 amd_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+@@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
+       }
+ }
+ 
++/*
++ * When a PMC counter overflows, an NMI is used to process the event and
++ * reset the counter. NMI latency can result in the counter being updated
++ * before the NMI can run, which can result in what appear to be spurious
++ * NMIs. This function is intended to wait for the NMI to run and reset
++ * the counter to avoid possible unhandled NMI messages.
++ */
++#define OVERFLOW_WAIT_COUNT   50
++
++static void amd_pmu_wait_on_overflow(int idx)
++{
++      unsigned int i;
++      u64 counter;
++
++      /*
++       * Wait for the counter to be reset if it has overflowed. This loop
++       * should exit very, very quickly, but just in case, don't wait
++       * forever...
++       */
++      for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
++              rdmsrl(x86_pmu_event_addr(idx), counter);
++              if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
++                      break;
++
++              /* Might be in IRQ context, so can't sleep */
++              udelay(1);
++      }
++}
++
++static void amd_pmu_disable_all(void)
++{
++      struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      int idx;
++
++      x86_pmu_disable_all();
++
++      /*
++       * This shouldn't be called from NMI context, but add a safeguard here
++       * to return, since if we're in NMI context we can't wait for an NMI
++       * to reset an overflowed counter value.
++       */
++      if (in_nmi())
++              return;
++
++      /*
++       * Check each counter for overflow and wait for it to be reset by the
++       * NMI if it has overflowed. This relies on the fact that all active
++       * counters are always enabled when this function is caled and
++       * ARCH_PERFMON_EVENTSEL_INT is always set.
++       */
++      for (idx = 0; idx < x86_pmu.num_counters; idx++) {
++              if (!test_bit(idx, cpuc->active_mask))
++                      continue;
++
++              amd_pmu_wait_on_overflow(idx);
++      }
++}
++
++static void amd_pmu_disable_event(struct perf_event *event)
++{
++      x86_pmu_disable_event(event);
++
++      /*
++       * This can be called from NMI context (via x86_pmu_stop). The counter
++       * may have overflowed, but either way, we'll never see it get reset
++       * by the NMI if we're already in the NMI. And the NMI latency support
++       * below will take care of any pending NMI that might have been
++       * generated by the overflow.
++       */
++      if (in_nmi())
++              return;
++
++      amd_pmu_wait_on_overflow(event->hw.idx);
++}
++
++/*
++ * Because of NMI latency, if multiple PMC counters are active or other 
sources
++ * of NMIs are received, the perf NMI handler can handle one or more 
overflowed
++ * PMC counters outside of the NMI associated with the PMC overflow. If the 
NMI
++ * doesn't arrive at the LAPIC in time to become a pending NMI, then the 
kernel
++ * back-to-back NMI support won't be active. This PMC handler needs to take 
into
++ * account that this can occur, otherwise this could result in unknown NMI
++ * messages being issued. Examples of this is PMC overflow while in the NMI
++ * handler when multiple PMCs are active or PMC overflow while handling some
++ * other source of an NMI.
++ *
++ * Attempt to mitigate this by using the number of active PMCs to determine
++ * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
++ * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
++ * number of active PMCs or 2. The value of 2 is used in case an NMI does not
++ * arrive at the LAPIC in time to be collapsed into an already pending NMI.
++ */
++static int amd_pmu_handle_irq(struct pt_regs *regs)
++{
++      struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++      int active, handled;
++
++      /*
++       * Obtain the active count before calling x86_pmu_handle_irq() since
++       * it is possible that x86_pmu_handle_irq() may make a counter
++       * inactive (through x86_pmu_stop).
++       */
++      active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
++
++      /* Process any counter overflows */
++      handled = x86_pmu_handle_irq(regs);
++
++      /*
++       * If a counter was handled, record the number of possible remaining
++       * NMIs that can occur.
++       */
++      if (handled) {
++              this_cpu_write(perf_nmi_counter,
++                             min_t(unsigned int, 2, active));
++
++              return handled;
++      }
++
++      if (!this_cpu_read(perf_nmi_counter))
++              return NMI_DONE;
++
++      this_cpu_dec(perf_nmi_counter);
++
++      return NMI_HANDLED;
++}
++
+ static struct event_constraint *
+ amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event)
+@@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 
config)
+ 
+ static __initconst const struct x86_pmu amd_pmu = {
+       .name                   = "AMD",
+-      .handle_irq             = x86_pmu_handle_irq,
+-      .disable_all            = x86_pmu_disable_all,
++      .handle_irq             = amd_pmu_handle_irq,
++      .disable_all            = amd_pmu_disable_all,
+       .enable_all             = x86_pmu_enable_all,
+       .enable                 = x86_pmu_enable_event,
+-      .disable                = x86_pmu_disable_event,
++      .disable                = amd_pmu_disable_event,
+       .hw_config              = amd_pmu_hw_config,
+       .schedule_events        = x86_schedule_events,
+       .eventsel               = MSR_K7_EVNTSEL0,
+@@ -728,7 +858,7 @@ void amd_pmu_enable_virt(void)
+       cpuc->perf_ctr_virt_mask = 0;
+ 
+       /* Reload all events */
+-      x86_pmu_disable_all();
++      amd_pmu_disable_all();
+       x86_pmu_enable_all(0);
+ }
+ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
+@@ -746,7 +876,7 @@ void amd_pmu_disable_virt(void)
+       cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
+ 
+       /* Reload all events */
+-      x86_pmu_disable_all();
++      amd_pmu_disable_all();
+       x86_pmu_enable_all(0);
+ }
+ EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 65e44f0588e2..6ed99de2ddf5 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1328,8 +1328,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct hw_perf_event *hwc = &event->hw;
+ 
+-      if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
++      if (test_bit(hwc->idx, cpuc->active_mask)) {
+               x86_pmu.disable(event);
++              __clear_bit(hwc->idx, cpuc->active_mask);
+               cpuc->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+@@ -1426,16 +1427,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+ 
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+-              if (!test_bit(idx, cpuc->active_mask)) {
+-                      /*
+-                       * Though we deactivated the counter some cpus
+-                       * might still deliver spurious interrupts still
+-                       * in flight. Catch them:
+-                       */
+-                      if (__test_and_clear_bit(idx, cpuc->running))
+-                              handled++;
++              if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+-              }
+ 
+               event = cpuc->events[idx];
+ 
+diff --git a/arch/x86/include/asm/suspend_32.h 
b/arch/x86/include/asm/suspend_32.h
+index 982c325dad33..8be6afb58471 100644
+--- a/arch/x86/include/asm/suspend_32.h
++++ b/arch/x86/include/asm/suspend_32.h
+@@ -12,7 +12,13 @@
+ 
+ /* image of the saved processor state */
+ struct saved_context {
+-      u16 es, fs, gs, ss;
++      /*
++       * On x86_32, all segment registers, with the possible exception of
++       * gs, are saved at kernel entry in pt_regs.
++       */
++#ifdef CONFIG_X86_32_LAZY_GS
++      u16 gs;
++#endif
+       unsigned long cr0, cr2, cr3, cr4;
+       u64 misc_enable;
+       bool misc_enable_saved;
+diff --git a/arch/x86/include/asm/suspend_64.h 
b/arch/x86/include/asm/suspend_64.h
+index 7306e911faee..a7af9f53c0cb 100644
+--- a/arch/x86/include/asm/suspend_64.h
++++ b/arch/x86/include/asm/suspend_64.h
+@@ -20,8 +20,20 @@
+  */
+ struct saved_context {
+       struct pt_regs regs;
+-      u16 ds, es, fs, gs, ss;
+-      unsigned long gs_base, gs_kernel_base, fs_base;
++
++      /*
++       * User CS and SS are saved in current_pt_regs().  The rest of the
++       * segment selectors need to be saved and restored here.
++       */
++      u16 ds, es, fs, gs;
++
++      /*
++       * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
++       * so we save them separately.  We save the kernelmode GSBASE to
++       * restore percpu access after resume.
++       */
++      unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
++
+       unsigned long cr0, cr2, cr3, cr4, cr8;
+       u64 misc_enable;
+       bool misc_enable_saved;
+@@ -30,8 +42,7 @@ struct saved_context {
+       u16 gdt_pad; /* Unused */
+       struct desc_ptr gdt_desc;
+       u16 idt_pad;
+-      u16 idt_limit;
+-      unsigned long idt_base;
++      struct desc_ptr idt;
+       u16 ldt;
+       u16 tss;
+       unsigned long tr;
+diff --git a/arch/x86/include/asm/xen/hypercall.h 
b/arch/x86/include/asm/xen/hypercall.h
+index bfd882617613..e7e625448008 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -217,6 +217,9 @@ privcmd_call(unsigned call,
+       __HYPERCALL_DECLS;
+       __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ 
++      if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
++              return -EINVAL;
++
+       stac();
+       asm volatile(CALL_NOSPEC
+                    : __HYPERCALL_5PARAM
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 04d5157fe7f8..a7d966964c6f 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context 
*ctxt)
+       /*
+        * descriptor tables
+        */
+-#ifdef CONFIG_X86_32
+       store_idt(&ctxt->idt);
+-#else
+-/* CONFIG_X86_64 */
+-      store_idt((struct desc_ptr *)&ctxt->idt_limit);
+-#endif
++
+       /*
+        * We save it here, but restore it only in the hibernate case.
+        * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
+@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context 
*ctxt)
+       /*
+        * segment registers
+        */
+-#ifdef CONFIG_X86_32
+-      savesegment(es, ctxt->es);
+-      savesegment(fs, ctxt->fs);
++#ifdef CONFIG_X86_32_LAZY_GS
+       savesegment(gs, ctxt->gs);
+-      savesegment(ss, ctxt->ss);
+-#else
+-/* CONFIG_X86_64 */
+-      asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
+-      asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
+-      asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
+-      asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
+-      asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
++#endif
++#ifdef CONFIG_X86_64
++      savesegment(gs, ctxt->gs);
++      savesegment(fs, ctxt->fs);
++      savesegment(ds, ctxt->ds);
++      savesegment(es, ctxt->es);
+ 
+       rdmsrl(MSR_FS_BASE, ctxt->fs_base);
+-      rdmsrl(MSR_GS_BASE, ctxt->gs_base);
+-      rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
++      rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
++      rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
+       mtrr_save_fixed_ranges(NULL);
+ 
+       rdmsrl(MSR_EFER, ctxt->efer);
+@@ -180,6 +172,9 @@ static void fix_processor_context(void)
+       write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
+ 
+       syscall_init();                         /* This sets MSR_*STAR and 
related */
++#else
++      if (boot_cpu_has(X86_FEATURE_SEP))
++              enable_sep_cpu();
+ #endif
+       load_TR_desc();                         /* This does ltr */
+       load_mm_ldt(current->active_mm);        /* This does lldt */
+@@ -192,9 +187,12 @@ static void fix_processor_context(void)
+ }
+ 
+ /**
+- *    __restore_processor_state - restore the contents of CPU registers saved
+- *            by __save_processor_state()
+- *    @ctxt - structure to load the registers contents from
++ * __restore_processor_state - restore the contents of CPU registers saved
++ *                             by __save_processor_state()
++ * @ctxt - structure to load the registers contents from
++ *
++ * The asm code that gets us here will have restored a usable GDT, although
++ * it will be pointing to the wrong alias.
+  */
+ static void notrace __restore_processor_state(struct saved_context *ctxt)
+ {
+@@ -217,46 +215,52 @@ static void notrace __restore_processor_state(struct 
saved_context *ctxt)
+       write_cr2(ctxt->cr2);
+       write_cr0(ctxt->cr0);
+ 
++      /* Restore the IDT. */
++      load_idt(&ctxt->idt);
++
+       /*
+-       * now restore the descriptor tables to their proper values
+-       * ltr is done i fix_processor_context().
++       * Just in case the asm code got us here with the SS, DS, or ES
++       * out of sync with the GDT, update them.
+        */
+-#ifdef CONFIG_X86_32
+-      load_idt(&ctxt->idt);
++      loadsegment(ss, __KERNEL_DS);
++      loadsegment(ds, __USER_DS);
++      loadsegment(es, __USER_DS);
++
++      /*
++       * Restore percpu access.  Percpu access can happen in exception
++       * handlers or in complicated helpers like load_gs_index().
++       */
++#ifdef CONFIG_X86_64
++      wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
+ #else
+-/* CONFIG_X86_64 */
+-      load_idt((const struct desc_ptr *)&ctxt->idt_limit);
++      loadsegment(fs, __KERNEL_PERCPU);
++      loadsegment(gs, __KERNEL_STACK_CANARY);
+ #endif
+ 
++      /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
++      fix_processor_context();
++
+       /*
+-       * segment registers
++       * Now that we have descriptor tables fully restored and working
++       * exception handling, restore the usermode segments.
+        */
+-#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_64
++      loadsegment(ds, ctxt->es);
+       loadsegment(es, ctxt->es);
+       loadsegment(fs, ctxt->fs);
+-      loadsegment(gs, ctxt->gs);
+-      loadsegment(ss, ctxt->ss);
++      load_gs_index(ctxt->gs);
+ 
+       /*
+-       * sysenter MSRs
++       * Restore FSBASE and GSBASE after restoring the selectors, since
++       * restoring the selectors clobbers the bases.  Keep in mind
++       * that MSR_KERNEL_GS_BASE is horribly misnamed.
+        */
+-      if (boot_cpu_has(X86_FEATURE_SEP))
+-              enable_sep_cpu();
+-#else
+-/* CONFIG_X86_64 */
+-      asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
+-      asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
+-      asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
+-      load_gs_index(ctxt->gs);
+-      asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
+-
+       wrmsrl(MSR_FS_BASE, ctxt->fs_base);
+-      wrmsrl(MSR_GS_BASE, ctxt->gs_base);
+-      wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
++      wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
++#elif defined(CONFIG_X86_32_LAZY_GS)
++      loadsegment(gs, ctxt->gs);
+ #endif
+ 
+-      fix_processor_context();
+-
+       do_fpu_end();
+       tsc_verify_tsc_adjust(true);
+       x86_platform.restore_sched_clock_state();
+diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
+index 0df4080fa20f..a94da7dd3eae 100644
+--- a/arch/xtensa/kernel/stacktrace.c
++++ b/arch/xtensa/kernel/stacktrace.c
+@@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, 
void *data)
+       return 1;
+ }
+ 
++/*
++ * level == 0 is for the return address from the caller of this function,
++ * not from this function itself.
++ */
+ unsigned long return_address(unsigned level)
+ {
+       struct return_addr_data r = {
+-              .skip = level + 1,
++              .skip = level,
+       };
+       walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
+       return r.addr;
+diff --git a/block/bio.c b/block/bio.c
+index 2e5d881423b8..d01ab919b313 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1280,8 +1280,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+                       }
+               }
+ 
+-              if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
++              if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
++                      if (!map_data)
++                              __free_page(page);
+                       break;
++              }
+ 
+               len -= bytes;
+               offset = 0;
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index c28dca0c613d..88316f86cc95 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -380,7 +380,7 @@ config XILINX_HWICAP
+ 
+ config R3964
+       tristate "Siemens R3964 line discipline"
+-      depends on TTY
++      depends on TTY && BROKEN
+       ---help---
+         This driver allows synchronous communication with devices using the
+         Siemens R3964 packet protocol. Unless you are dealing with special
+diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
+index dadacbe558ab..1a1f7eb46d1e 100644
+--- a/drivers/gpu/drm/i915/gvt/gtt.c
++++ b/drivers/gpu/drm/i915/gvt/gtt.c
+@@ -1629,7 +1629,7 @@ void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
+       if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
+               return;
+ 
+-      atomic_dec(&mm->pincount);
++      atomic_dec_if_positive(&mm->pincount);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
+index 31421b6b586e..b45ac6bc8add 100644
+--- a/drivers/gpu/drm/udl/udl_drv.c
++++ b/drivers/gpu/drm/udl/udl_drv.c
+@@ -47,6 +47,7 @@ static struct drm_driver driver = {
+       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .load = udl_driver_load,
+       .unload = udl_driver_unload,
++      .release = udl_driver_release,
+ 
+       /* gem hooks */
+       .gem_free_object = udl_gem_free_object,
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+index 2c149b841cf1..307455dd6526 100644
+--- a/drivers/gpu/drm/udl/udl_drv.h
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -101,6 +101,7 @@ void udl_urb_completion(struct urb *urb);
+ 
+ int udl_driver_load(struct drm_device *dev, unsigned long flags);
+ void udl_driver_unload(struct drm_device *dev);
++void udl_driver_release(struct drm_device *dev);
+ 
+ int udl_fbdev_init(struct drm_device *dev);
+ void udl_fbdev_cleanup(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
+index f8ea3c99b523..60866b422f81 100644
+--- a/drivers/gpu/drm/udl/udl_main.c
++++ b/drivers/gpu/drm/udl/udl_main.c
+@@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
+               udl_free_urb_list(dev);
+ 
+       udl_fbdev_cleanup(dev);
+-      udl_modeset_cleanup(dev);
+       kfree(udl);
+ }
++
++void udl_driver_release(struct drm_device *dev)
++{
++      udl_modeset_cleanup(dev);
++      drm_dev_fini(dev);
++      kfree(dev);
++}
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index f9cd81375f28..d76e685206b3 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1789,6 +1789,36 @@ static bool dm_table_supports_discards(struct dm_table 
*t)
+       return true;
+ }
+ 
++static int device_requires_stable_pages(struct dm_target *ti,
++                                      struct dm_dev *dev, sector_t start,
++                                      sector_t len, void *data)
++{
++      struct request_queue *q = bdev_get_queue(dev->bdev);
++
++      return q && bdi_cap_stable_pages_required(q->backing_dev_info);
++}
++
++/*
++ * If any underlying device requires stable pages, a table must require
++ * them as well.  Only targets that support iterate_devices are considered:
++ * don't want error, zero, etc to require stable pages.
++ */
++static bool dm_table_requires_stable_pages(struct dm_table *t)
++{
++      struct dm_target *ti;
++      unsigned i;
++
++      for (i = 0; i < dm_table_get_num_targets(t); i++) {
++              ti = dm_table_get_target(t, i);
++
++              if (ti->type->iterate_devices &&
++                  ti->type->iterate_devices(ti, device_requires_stable_pages, 
NULL))
++                      return true;
++      }
++
++      return false;
++}
++
+ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
+                              struct queue_limits *limits)
+ {
+@@ -1837,6 +1867,15 @@ void dm_table_set_restrictions(struct dm_table *t, 
struct request_queue *q,
+ 
+       dm_table_verify_integrity(t);
+ 
++      /*
++       * Some devices don't use blk_integrity but still want stable pages
++       * because they do their own checksumming.
++       */
++      if (dm_table_requires_stable_pages(t))
++              q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
++      else
++              q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
++
+       /*
+        * Determine whether or not this queue's I/O timings contribute
+        * to the entropy pool, Only request-based targets use this.
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c 
b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 15ad247955f7..446577a1a6a5 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1076,6 +1076,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct 
bnxt_rx_ring_info *rxr,
+       tpa_info = &rxr->rx_tpa[agg_id];
+ 
+       if (unlikely(cons != rxr->rx_next_cons)) {
++              netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
++                          cons, rxr->rx_next_cons);
+               bnxt_sched_reset(bp, rxr);
+               return;
+       }
+@@ -1528,15 +1530,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct 
bnxt_napi *bnapi, u32 *raw_cons,
+       }
+ 
+       cons = rxcmp->rx_cmp_opaque;
+-      rx_buf = &rxr->rx_buf_ring[cons];
+-      data = rx_buf->data;
+-      data_ptr = rx_buf->data_ptr;
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+ 
++              netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
++                          cons, rxr->rx_next_cons);
+               bnxt_sched_reset(bp, rxr);
+               return rc1;
+       }
++      rx_buf = &rxr->rx_buf_ring[cons];
++      data = rx_buf->data;
++      data_ptr = rx_buf->data_ptr;
+       prefetch(data_ptr);
+ 
+       misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+@@ -1553,11 +1557,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct 
bnxt_napi *bnapi, u32 *raw_cons,
+ 
+       rx_buf->data = NULL;
+       if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
++              u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
++
+               bnxt_reuse_rx_data(rxr, cons, data);
+               if (agg_bufs)
+                       bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+ 
+               rc = -EIO;
++              if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
++                      netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
++                      bnxt_sched_reset(bp, rxr);
++              }
+               goto next_rx;
+       }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+index ece3fb147e3e..36ae0b2519d2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+       if (err)
+               return err;
+ 
++      mutex_lock(&mdev->mlx5e_res.td.list_lock);
+       list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
++      mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ 
+       return 0;
+ }
+@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+                      struct mlx5e_tir *tir)
+ {
++      mutex_lock(&mdev->mlx5e_res.td.list_lock);
+       mlx5_core_destroy_tir(mdev, tir->tirn);
+       list_del(&tir->list);
++      mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ }
+ 
+ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+       }
+ 
+       INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
++      mutex_init(&mdev->mlx5e_res.td.list_lock);
+ 
+       return 0;
+ 
+@@ -140,15 +145,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool 
enable_uc_lb)
+ {
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_tir *tir;
+-      int err  = -ENOMEM;
++      int err  = 0;
+       u32 tirn = 0;
+       int inlen;
+       void *in;
+ 
+       inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+       in = kvzalloc(inlen, GFP_KERNEL);
+-      if (!in)
++      if (!in) {
++              err = -ENOMEM;
+               goto out;
++      }
+ 
+       if (enable_uc_lb)
+               MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
+@@ -156,6 +163,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool 
enable_uc_lb)
+ 
+       MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ 
++      mutex_lock(&mdev->mlx5e_res.td.list_lock);
+       list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+               tirn = tir->tirn;
+               err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+@@ -167,6 +175,7 @@ out:
+       kvfree(in);
+       if (err)
+               netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", 
tirn, err);
++      mutex_unlock(&mdev->mlx5e_res.td.list_lock);
+ 
+       return err;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c 
b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 558fc6a05e2a..826d1a4600f3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -155,26 +155,6 @@ static struct mlx5_profile profile[] = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+-              .mr_cache[16]   = {
+-                      .size   = 8,
+-                      .limit  = 4
+-              },
+-              .mr_cache[17]   = {
+-                      .size   = 8,
+-                      .limit  = 4
+-              },
+-              .mr_cache[18]   = {
+-                      .size   = 8,
+-                      .limit  = 4
+-              },
+-              .mr_cache[19]   = {
+-                      .size   = 4,
+-                      .limit  = 2
+-              },
+-              .mr_cache[20]   = {
+-                      .size   = 4,
+-                      .limit  = 2
+-              },
+       },
+ };
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c 
b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+index 9a7655560629..1910ca21a1bc 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+@@ -200,7 +200,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, 
struct net_device *netdev)
+       ret = dev_queue_xmit(skb);
+       nfp_repr_inc_tx_stats(netdev, len, ret);
+ 
+-      return ret;
++      return NETDEV_TX_OK;
+ }
+ 
+ static int nfp_repr_stop(struct net_device *netdev)
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index e33a6c672a0a..0f07b5978fa1 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -779,6 +779,7 @@ struct netvsc_device {
+ 
+       wait_queue_head_t wait_drain;
+       bool destroy;
++      bool tx_disable; /* if true, do not wake up queue again */
+ 
+       /* Receive buffer allocated by us but manages by NetVSP */
+       void *recv_buf;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 806239b89990..a3bb4d5c64f5 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -107,6 +107,7 @@ static struct netvsc_device *alloc_net_device(void)
+ 
+       init_waitqueue_head(&net_device->wait_drain);
+       net_device->destroy = false;
++      net_device->tx_disable = false;
+       atomic_set(&net_device->open_cnt, 0);
+       net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
+       net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
+@@ -712,7 +713,7 @@ static void netvsc_send_tx_complete(struct netvsc_device 
*net_device,
+       } else {
+               struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
+ 
+-              if (netif_tx_queue_stopped(txq) &&
++              if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
+                   (hv_ringbuf_avail_percent(&channel->outbound) > 
RING_AVAIL_PERCENT_HIWATER ||
+                    queue_sends < 1)) {
+                       netif_tx_wake_queue(txq);
+@@ -865,7 +866,8 @@ static inline int netvsc_send_pkt(
+                       netif_tx_stop_queue(txq);
+       } else if (ret == -EAGAIN) {
+               netif_tx_stop_queue(txq);
+-              if (atomic_read(&nvchan->queue_sends) < 1) {
++              if (atomic_read(&nvchan->queue_sends) < 1 &&
++                  !net_device->tx_disable) {
+                       netif_tx_wake_queue(txq);
+                       ret = -ENOSPC;
+               }
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 74b9e51b2b47..eb92720dd1c4 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -108,6 +108,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
+       rcu_read_unlock();
+ }
+ 
++static void netvsc_tx_enable(struct netvsc_device *nvscdev,
++                           struct net_device *ndev)
++{
++      nvscdev->tx_disable = false;
++      virt_wmb(); /* ensure queue wake up mechanism is on */
++
++      netif_tx_wake_all_queues(ndev);
++}
++
+ static int netvsc_open(struct net_device *net)
+ {
+       struct net_device_context *ndev_ctx = netdev_priv(net);
+@@ -128,7 +137,7 @@ static int netvsc_open(struct net_device *net)
+       rdev = nvdev->extension;
+       if (!rdev->link_state) {
+               netif_carrier_on(net);
+-              netif_tx_wake_all_queues(net);
++              netvsc_tx_enable(nvdev, net);
+       }
+ 
+       if (vf_netdev) {
+@@ -183,6 +192,17 @@ static int netvsc_wait_until_empty(struct netvsc_device 
*nvdev)
+       }
+ }
+ 
++static void netvsc_tx_disable(struct netvsc_device *nvscdev,
++                            struct net_device *ndev)
++{
++      if (nvscdev) {
++              nvscdev->tx_disable = true;
++              virt_wmb(); /* ensure txq will not wake up after stop */
++      }
++
++      netif_tx_disable(ndev);
++}
++
+ static int netvsc_close(struct net_device *net)
+ {
+       struct net_device_context *net_device_ctx = netdev_priv(net);
+@@ -191,7 +211,7 @@ static int netvsc_close(struct net_device *net)
+       struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
+       int ret;
+ 
+-      netif_tx_disable(net);
++      netvsc_tx_disable(nvdev, net);
+ 
+       /* No need to close rndis filter if it is removed already */
+       if (!nvdev)
+@@ -893,7 +913,7 @@ static int netvsc_detach(struct net_device *ndev,
+ 
+       /* If device was up (receiving) then shutdown */
+       if (netif_running(ndev)) {
+-              netif_tx_disable(ndev);
++              netvsc_tx_disable(nvdev, ndev);
+ 
+               ret = rndis_filter_close(nvdev);
+               if (ret) {
+@@ -1720,7 +1740,7 @@ static void netvsc_link_change(struct work_struct *w)
+               if (rdev->link_state) {
+                       rdev->link_state = false;
+                       netif_carrier_on(net);
+-                      netif_tx_wake_all_queues(net);
++                      netvsc_tx_enable(net_device, net);
+               } else {
+                       notify = true;
+               }
+@@ -1730,7 +1750,7 @@ static void netvsc_link_change(struct work_struct *w)
+               if (!rdev->link_state) {
+                       rdev->link_state = true;
+                       netif_carrier_off(net);
+-                      netif_tx_stop_all_queues(net);
++                      netvsc_tx_disable(net_device, net);
+               }
+               kfree(event);
+               break;
+@@ -1739,7 +1759,7 @@ static void netvsc_link_change(struct work_struct *w)
+               if (!rdev->link_state) {
+                       rdev->link_state = true;
+                       netif_carrier_off(net);
+-                      netif_tx_stop_all_queues(net);
++                      netvsc_tx_disable(net_device, net);
+                       event->event = RNDIS_STATUS_MEDIA_CONNECT;
+                       spin_lock_irqsave(&ndev_ctx->lock, flags);
+                       list_add(&event->list, &ndev_ctx->reconfig_events);
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index a1b68b19d912..5ab725a571a8 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -878,6 +878,10 @@ static int sfp_probe(struct platform_device *pdev)
+       if (poll)
+               mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
+ 
++      sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
++      if (!sfp->sfp_bus)
++              return -ENOMEM;
++
+       return 0;
+ }
+ 
+@@ -887,10 +891,6 @@ static int sfp_remove(struct platform_device *pdev)
+ 
+       sfp_unregister_socket(sfp->sfp_bus);
+ 
+-      sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
+-      if (!sfp->sfp_bus)
+-              return -ENOMEM;
+-
+       return 0;
+ }
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 65e47cc52d14..01abe8eea753 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1188,6 +1188,7 @@ static const struct usb_device_id products[] = {
+       {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
+       {QMI_FIXED_INTF(0x2001, 0x7e19, 4)},    /* D-Link DWM-221 B1 */
+       {QMI_FIXED_INTF(0x2001, 0x7e35, 4)},    /* D-Link DWM-222 */
++      {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
+       {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
+       {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+       {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index d442afa195ab..867056395d48 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3888,6 +3888,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 
0x9128,
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+                        quirk_dma_func1_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
++                       quirk_dma_func1_alias);
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+                        quirk_dma_func1_alias);
+diff --git a/drivers/staging/ccree/ssi_hash.c 
b/drivers/staging/ccree/ssi_hash.c
+index e266a70a1b32..13291aeaf350 100644
+--- a/drivers/staging/ccree/ssi_hash.c
++++ b/drivers/staging/ccree/ssi_hash.c
+@@ -1781,7 +1781,7 @@ static int ssi_ahash_import(struct ahash_request *req, 
const void *in)
+       struct device *dev = &ctx->drvdata->plat_dev->dev;
+       struct ahash_req_ctx *state = ahash_request_ctx(req);
+       u32 tmp;
+-      int rc = 0;
++      int rc;
+ 
+       memcpy(&tmp, in, sizeof(u32));
+       if (tmp != CC_EXPORT_MAGIC) {
+@@ -1790,12 +1790,9 @@ static int ssi_ahash_import(struct ahash_request *req, 
const void *in)
+       }
+       in += sizeof(u32);
+ 
+-      /* call init() to allocate bufs if the user hasn't */
+-      if (!state->digest_buff) {
+-              rc = ssi_hash_init(state, ctx);
+-              if (rc)
+-                      goto out;
+-      }
++      rc = ssi_hash_init(state, ctx);
++      if (rc)
++              goto out;
+ 
+       dma_sync_single_for_cpu(dev, state->digest_buff_dma_addr,
+                               ctx->inter_digestsize, DMA_BIDIRECTIONAL);
+diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
+index b811442c5ce6..9788a25a34f4 100644
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -467,4 +467,28 @@ config VCC
+       depends on SUN_LDOMS
+       help
+         Support for Sun logical domain consoles.
++
++config LDISC_AUTOLOAD
++      bool "Automatically load TTY Line Disciplines"
++      default y
++      help
++        Historically the kernel has always automatically loaded any
++        line discipline that is in a kernel module when a user asks
++        for it to be loaded with the TIOCSETD ioctl, or through other
++        means.  This is not always the best thing to do on systems
++        where you know you will not be using some of the more
++        "ancient" line disciplines, so prevent the kernel from doing
++        this unless the request is coming from a process with the
++        CAP_SYS_MODULE permissions.
++
++        Say 'Y' here if you trust your userspace users to do the right
++        thing, or if you have only provided the line disciplines that
++        you know you will be using, or if you wish to continue to use
++        the traditional method of on-demand loading of these modules
++        by any user.
++
++        This functionality can be changed at runtime with the
++        dev.tty.ldisc_autoload sysctl, this configuration option will
++        only set the default value of this functionality.
++
+ endif # TTY
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 7e351d205393..dba4f53a7fff 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -511,6 +511,8 @@ static const struct file_operations hung_up_tty_fops = {
+ static DEFINE_SPINLOCK(redirect_lock);
+ static struct file *redirect;
+ 
++extern void tty_sysctl_init(void);
++
+ /**
+  *    tty_wakeup      -       request more data
+  *    @tty: terminal
+@@ -3332,6 +3334,7 @@ void console_sysfs_notify(void)
+  */
+ int __init tty_init(void)
+ {
++      tty_sysctl_init();
+       cdev_init(&tty_cdev, &tty_fops);
+       if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
+           register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index ca656ef8de64..01fcdc7ff077 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -155,6 +155,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
+  *            takes tty_ldiscs_lock to guard against ldisc races
+  */
+ 
++#if defined(CONFIG_LDISC_AUTOLOAD)
++      #define INITIAL_AUTOLOAD_STATE  1
++#else
++      #define INITIAL_AUTOLOAD_STATE  0
++#endif
++static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
++
+ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ {
+       struct tty_ldisc *ld;
+@@ -169,6 +176,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct 
*tty, int disc)
+        */
+       ldops = get_ldops(disc);
+       if (IS_ERR(ldops)) {
++              if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
++                      return ERR_PTR(-EPERM);
+               request_module("tty-ldisc-%d", disc);
+               ldops = get_ldops(disc);
+               if (IS_ERR(ldops))
+@@ -841,3 +850,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
+               tty_ldisc_put(tty->ldisc);
+       tty->ldisc = NULL;
+ }
++
++static int zero;
++static int one = 1;
++static struct ctl_table tty_table[] = {
++      {
++              .procname       = "ldisc_autoload",
++              .data           = &tty_ldisc_autoload,
++              .maxlen         = sizeof(tty_ldisc_autoload),
++              .mode           = 0644,
++              .proc_handler   = proc_dointvec,
++              .extra1         = &zero,
++              .extra2         = &one,
++      },
++      { }
++};
++
++static struct ctl_table tty_dir_table[] = {
++      {
++              .procname       = "tty",
++              .mode           = 0555,
++              .child          = tty_table,
++      },
++      { }
++};
++
++static struct ctl_table tty_root_table[] = {
++      {
++              .procname       = "dev",
++              .mode           = 0555,
++              .child          = tty_dir_table,
++      },
++      { }
++};
++
++void tty_sysctl_init(void)
++{
++      register_sysctl_table(tty_root_table);
++}
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 71458f493cf8..cc9d421c0929 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1087,6 +1087,8 @@ struct virtqueue *vring_create_virtqueue(
+                                         GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+               if (queue)
+                       break;
++              if (!may_reduce_num)
++                      return NULL;
+       }
+ 
+       if (!num)
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 3911c1a80219..61949e3446e5 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -306,10 +306,10 @@ static void blkdev_bio_end_io(struct bio *bio)
+       struct blkdev_dio *dio = bio->bi_private;
+       bool should_dirty = dio->should_dirty;
+ 
+-      if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
+-              if (bio->bi_status && !dio->bio.bi_status)
+-                      dio->bio.bi_status = bio->bi_status;
+-      } else {
++      if (bio->bi_status && !dio->bio.bi_status)
++              dio->bio.bi_status = bio->bi_status;
++
++      if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
+               if (!dio->is_sync) {
+                       struct kiocb *iocb = dio->iocb;
+                       ssize_t ret;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index cddd63b9103f..dd3b4820ac30 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -357,6 +357,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, 
void __user *arg)
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+ 
++      /*
++       * If the fs is mounted with nologreplay, which requires it to be
++       * mounted in RO mode as well, we can not allow discard on free space
++       * inside block groups, because log trees refer to extents that are not
++       * pinned in a block group's free space cache (pinning the extents is
++       * precisely the first phase of replaying a log tree).
++       */
++      if (btrfs_test_opt(fs_info, NOLOGREPLAY))
++              return -EROFS;
++
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
+                               dev_list) {
+diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
+index cbabc6f2b322..266f9069307b 100644
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -386,11 +386,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle 
*trans,
+ 
+ static int prop_compression_validate(const char *value, size_t len)
+ {
+-      if (!strncmp("lzo", value, len))
++      if (!strncmp("lzo", value, 3))
+               return 0;
+-      else if (!strncmp("zlib", value, len))
++      else if (!strncmp("zlib", value, 4))
+               return 0;
+-      else if (!strncmp("zstd", value, len))
++      else if (!strncmp("zstd", value, 4))
+               return 0;
+ 
+       return -EINVAL;
+@@ -416,7 +416,7 @@ static int prop_compression_apply(struct inode *inode,
+               btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+       } else if (!strncmp("zlib", value, 4)) {
+               type = BTRFS_COMPRESS_ZLIB;
+-      } else if (!strncmp("zstd", value, len)) {
++      } else if (!strncmp("zstd", value, 4)) {
+               type = BTRFS_COMPRESS_ZSTD;
+               btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
+       } else {
+diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
+index 50fb0dee23e8..d35b8ec1c485 100644
+--- a/include/linux/bitrev.h
++++ b/include/linux/bitrev.h
+@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
+ 
+ #define __constant_bitrev32(x)        \
+ ({                                    \
+-      u32 __x = x;                    \
+-      __x = (__x >> 16) | (__x << 16);        \
+-      __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 
8);      \
+-      __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 
4);      \
+-      __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 
2);      \
+-      __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 
1);      \
+-      __x;                                                            \
++      u32 ___x = x;                   \
++      ___x = (___x >> 16) | (___x << 16);     \
++      ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) 
<< 8);   \
++      ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) 
<< 4);   \
++      ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) 
<< 2);   \
++      ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) 
<< 1);   \
++      ___x;                                                           \
+ })
+ 
+ #define __constant_bitrev16(x)        \
+ ({                                    \
+-      u16 __x = x;                    \
+-      __x = (__x >> 8) | (__x << 8);  \
+-      __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4);        
\
+-      __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2);        
\
+-      __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1);        
\
+-      __x;                                                            \
++      u16 ___x = x;                   \
++      ___x = (___x >> 8) | (___x << 8);       \
++      ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4);     
\
++      ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2);     
\
++      ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1);     
\
++      ___x;                                                           \
+ })
+ 
+ #define __constant_bitrev8x4(x) \
+ ({                    \
+-      u32 __x = x;    \
+-      __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 
4);      \
+-      __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 
2);      \
+-      __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 
1);      \
+-      __x;                                                            \
++      u32 ___x = x;   \
++      ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) 
<< 4);   \
++      ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) 
<< 2);   \
++      ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) 
<< 1);   \
++      ___x;                                                           \
+ })
+ 
+ #define __constant_bitrev8(x) \
+ ({                                    \
+-      u8 __x = x;                     \
+-      __x = (__x >> 4) | (__x << 4);  \
+-      __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2);      \
+-      __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1);      \
+-      __x;                                                            \
++      u8 ___x = x;                    \
++      ___x = (___x >> 4) | (___x << 4);       \
++      ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2);   \
++      ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1);   \
++      ___x;                                                           \
+ })
+ 
+ #define bitrev32(x) \
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 88f0c530fe9c..32d445315128 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -743,6 +743,8 @@ struct mlx5_pagefault {
+ };
+ 
+ struct mlx5_td {
++      /* protects tirs list changes while tirs refresh */
++      struct mutex     list_lock;
+       struct list_head tirs_list;
+       u32              tdn;
+ };
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 96115bf561b4..3d43329c20be 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -142,6 +142,9 @@ extern void * memscan(void *,int,__kernel_size_t);
+ #ifndef __HAVE_ARCH_MEMCMP
+ extern int memcmp(const void *,const void *,__kernel_size_t);
+ #endif
++#ifndef __HAVE_ARCH_BCMP
++extern int bcmp(const void *,const void *,__kernel_size_t);
++#endif
+ #ifndef __HAVE_ARCH_MEMCHR
+ extern void * memchr(const void *,int,__kernel_size_t);
+ #endif
+diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
+index bbf32524ab27..75007e648dfa 100644
+--- a/include/linux/virtio_ring.h
++++ b/include/linux/virtio_ring.h
+@@ -63,7 +63,7 @@ struct virtqueue;
+ /*
+  * Creates a virtqueue and allocates the descriptor ring.  If
+  * may_reduce_num is set, then this may allocate a smaller ring than
+- * expected.  The caller should query virtqueue_get_ring_size to learn
++ * expected.  The caller should query virtqueue_get_vring_size to learn
+  * the actual size of the ring.
+  */
+ struct virtqueue *vring_create_virtqueue(unsigned int index,
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 80575db4e304..b8ebee43941f 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -603,7 +603,7 @@ int ip_options_get_from_user(struct net *net, struct 
ip_options_rcu **optp,
+                            unsigned char __user *data, int optlen);
+ void ip_options_undo(struct ip_options *opt);
+ void ip_forward_options(struct sk_buff *skb);
+-int ip_options_rcv_srr(struct sk_buff *skb);
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
+ 
+ /*
+  *    Functions provided by ip_sockglue.c
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index f4bf75fac349..d96c9d9cca96 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -56,6 +56,7 @@ struct net {
+                                                */
+       spinlock_t              rules_mod_lock;
+ 
++      u32                     hash_mix;
+       atomic64_t              cookie_gen;
+ 
+       struct list_head        list;           /* list of network namespaces */
+diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
+index 24c78183a4c2..d9b665151f3d 100644
+--- a/include/net/netns/hash.h
++++ b/include/net/netns/hash.h
+@@ -2,21 +2,10 @@
+ #ifndef __NET_NS_HASH_H__
+ #define __NET_NS_HASH_H__
+ 
+-#include <asm/cache.h>
+-
+-struct net;
++#include <net/net_namespace.h>
+ 
+ static inline u32 net_hash_mix(const struct net *net)
+ {
+-#ifdef CONFIG_NET_NS
+-      /*
+-       * shift this right to eliminate bits, that are
+-       * always zeroed
+-       */
+-
+-      return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+-#else
+-      return 0;
+-#endif
++      return net->hash_mix;
+ }
+ #endif
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 0fa7ef74303b..317fc759de76 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -1363,6 +1363,10 @@ int irq_chip_set_vcpu_affinity_parent(struct irq_data 
*data, void *vcpu_info)
+ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
+ {
+       data = data->parent_data;
++
++      if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
++              return 0;
++
+       if (data->chip->irq_set_wake)
+               return data->chip->irq_set_wake(data, on);
+ 
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index c2bfb11a9d05..aa08d4184608 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -535,6 +535,7 @@ int __init early_irq_init(void)
+               alloc_masks(&desc[i], node);
+               raw_spin_lock_init(&desc[i].lock);
+               lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
++              mutex_init(&desc[i].request_mutex);
+               desc_set_defaults(i, &desc[i], node, NULL, NULL);
+       }
+       return arch_early_irq_init();
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 4d54c1fe9623..9829ede00498 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7018,10 +7018,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
+       if (cfs_rq->last_h_load_update == now)
+               return;
+ 
+-      cfs_rq->h_load_next = NULL;
++      WRITE_ONCE(cfs_rq->h_load_next, NULL);
+       for_each_sched_entity(se) {
+               cfs_rq = cfs_rq_of(se);
+-              cfs_rq->h_load_next = se;
++              WRITE_ONCE(cfs_rq->h_load_next, se);
+               if (cfs_rq->last_h_load_update == now)
+                       break;
+       }
+@@ -7031,7 +7031,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
+               cfs_rq->last_h_load_update = now;
+       }
+ 
+-      while ((se = cfs_rq->h_load_next) != NULL) {
++      while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
+               load = cfs_rq->h_load;
+               load = div64_ul(load * se->avg.load_avg,
+                       cfs_rq_load_avg(cfs_rq) + 1);
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index fa5de5e8de61..fdeb9bc6affb 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -597,7 +597,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer 
*timr, ktime_t now)
+ {
+       struct alarm *alarm = &timr->it.alarm.alarmtimer;
+ 
+-      return ktime_sub(now, alarm->node.expires);
++      return ktime_sub(alarm->node.expires, now);
+ }
+ 
+ /**
+diff --git a/lib/string.c b/lib/string.c
+index 5e8d410a93df..1530643edf00 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -865,6 +865,26 @@ __visible int memcmp(const void *cs, const void *ct, 
size_t count)
+ EXPORT_SYMBOL(memcmp);
+ #endif
+ 
++#ifndef __HAVE_ARCH_BCMP
++/**
++ * bcmp - returns 0 if and only if the buffers have identical contents.
++ * @a: pointer to first buffer.
++ * @b: pointer to second buffer.
++ * @len: size of buffers.
++ *
++ * The sign or magnitude of a non-zero return value has no particular
++ * meaning, and architectures may implement their own more efficient bcmp(). 
So
++ * while this particular implementation is a simple (tail) call to memcmp, do
++ * not rely on anything but whether the return value is zero or non-zero.
++ */
++#undef bcmp
++int bcmp(const void *a, const void *b, size_t len)
++{
++      return memcmp(a, b, len);
++}
++EXPORT_SYMBOL(bcmp);
++#endif
++
+ #ifndef __HAVE_ARCH_MEMSCAN
+ /**
+  * memscan - Find a character in an area of memory.
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 3469f5053c79..145cb343c1b0 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1815,11 +1815,15 @@ static int ethtool_get_strings(struct net_device *dev, 
void __user *useraddr)
+       WARN_ON_ONCE(!ret);
+ 
+       gstrings.len = ret;
+-      data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
+-      if (gstrings.len && !data)
+-              return -ENOMEM;
++      if (gstrings.len) {
++              data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
++              if (!data)
++                      return -ENOMEM;
+ 
+-      __ethtool_get_strings(dev, gstrings.string_set, data);
++              __ethtool_get_strings(dev, gstrings.string_set, data);
++      } else {
++              data = NULL;
++      }
+ 
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+@@ -1915,11 +1919,14 @@ static int ethtool_get_stats(struct net_device *dev, 
void __user *useraddr)
+               return -EFAULT;
+ 
+       stats.n_stats = n_stats;
+-      data = vzalloc(n_stats * sizeof(u64));
+-      if (n_stats && !data)
+-              return -ENOMEM;
+-
+-      ops->get_ethtool_stats(dev, &stats, data);
++      if (n_stats) {
++              data = vzalloc(n_stats * sizeof(u64));
++              if (!data)
++                      return -ENOMEM;
++              ops->get_ethtool_stats(dev, &stats, data);
++      } else {
++              data = NULL;
++      }
+ 
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &stats, sizeof(stats)))
+@@ -1955,13 +1962,17 @@ static int ethtool_get_phy_stats(struct net_device 
*dev, void __user *useraddr)
+               return -EFAULT;
+ 
+       stats.n_stats = n_stats;
+-      data = vzalloc(n_stats * sizeof(u64));
+-      if (n_stats && !data)
+-              return -ENOMEM;
++      if (n_stats) {
++              data = vzalloc(n_stats * sizeof(u64));
++              if (!data)
++                      return -ENOMEM;
+ 
+-      mutex_lock(&phydev->lock);
+-      phydev->drv->get_stats(phydev, &stats, data);
+-      mutex_unlock(&phydev->lock);
++              mutex_lock(&phydev->lock);
++              phydev->drv->get_stats(phydev, &stats, data);
++              mutex_unlock(&phydev->lock);
++      } else {
++              data = NULL;
++      }
+ 
+       ret = -EFAULT;
+       if (copy_to_user(useraddr, &stats, sizeof(stats)))
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 0dd6359e5924..60b88718b1d4 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -285,6 +285,7 @@ static __net_init int setup_net(struct net *net, struct 
user_namespace *user_ns)
+ 
+       atomic_set(&net->count, 1);
+       refcount_set(&net->passive, 1);
++      get_random_bytes(&net->hash_mix, sizeof(u32));
+       net->dev_base_seq = 1;
+       net->user_ns = user_ns;
+       idr_init(&net->netns_ids);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 1b39aef5cf82..2b3b0307dd89 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3808,7 +3808,7 @@ int skb_gro_receive(struct sk_buff **head, struct 
sk_buff *skb)
+       struct sk_buff *lp, *p = *head;
+       unsigned int delta_truesize;
+ 
+-      if (unlikely(p->len + len >= 65536))
++      if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
+               return -E2BIG;
+ 
+       lp = NAPI_GRO_CB(p)->last;
+diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
+index 1b160378ea9c..6fc45d3a1f8a 100644
+--- a/net/ipv4/ip_input.c
++++ b/net/ipv4/ip_input.c
+@@ -259,11 +259,10 @@ int ip_local_deliver(struct sk_buff *skb)
+                      ip_local_deliver_finish);
+ }
+ 
+-static inline bool ip_rcv_options(struct sk_buff *skb)
++static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ip_options *opt;
+       const struct iphdr *iph;
+-      struct net_device *dev = skb->dev;
+ 
+       /* It looks as overkill, because not all
+          IP options require packet mangling.
+@@ -299,7 +298,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
+                       }
+               }
+ 
+-              if (ip_options_rcv_srr(skb))
++              if (ip_options_rcv_srr(skb, dev))
+                       goto drop;
+       }
+ 
+@@ -362,7 +361,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+       }
+ #endif
+ 
+-      if (iph->ihl > 5 && ip_rcv_options(skb))
++      if (iph->ihl > 5 && ip_rcv_options(skb, dev))
+               goto drop;
+ 
+       rt = skb_rtable(skb);
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index 32a35043c9f5..3db31bb9df50 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
+       }
+ }
+ 
+-int ip_options_rcv_srr(struct sk_buff *skb)
++int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
+ {
+       struct ip_options *opt = &(IPCB(skb)->opt);
+       int srrspace, srrptr;
+@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
+ 
+               orefdst = skb->_skb_refdst;
+               skb_dst_set(skb, NULL);
+-              err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, 
skb->dev);
++              err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
+               rt2 = skb_rtable(skb);
+               if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != 
RTN_LOCAL)) {
+                       skb_dst_drop(skb);
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index 8b637f9f23a2..f0de9fb92f0d 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = 
DCTCP_MAX_ALPHA;
+ module_param(dctcp_alpha_on_init, uint, 0644);
+ MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
+ 
+-static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
+-module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
+-MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
+-               "parameter for clamping alpha on loss");
+-
+ static struct tcp_congestion_ops dctcp_reno;
+ 
+ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
+@@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 
flags)
+       }
+ }
+ 
+-static void dctcp_state(struct sock *sk, u8 new_state)
++static void dctcp_react_to_loss(struct sock *sk)
+ {
+-      if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
+-              struct dctcp *ca = inet_csk_ca(sk);
++      struct dctcp *ca = inet_csk_ca(sk);
++      struct tcp_sock *tp = tcp_sk(sk);
+ 
+-              /* If this extension is enabled, we clamp dctcp_alpha to
+-               * max on packet loss; the motivation is that dctcp_alpha
+-               * is an indicator to the extend of congestion and packet
+-               * loss is an indicator of extreme congestion; setting
+-               * this in practice turned out to be beneficial, and
+-               * effectively assumes total congestion which reduces the
+-               * window by half.
+-               */
+-              ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+-      }
++      ca->loss_cwnd = tp->snd_cwnd;
++      tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
++}
++
++static void dctcp_state(struct sock *sk, u8 new_state)
++{
++      if (new_state == TCP_CA_Recovery &&
++          new_state != inet_csk(sk)->icsk_ca_state)
++              dctcp_react_to_loss(sk);
++      /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
++       * one loss-adjustment per RTT.
++       */
+ }
+ 
+ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+@@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum 
tcp_ca_event ev)
+       case CA_EVENT_ECN_NO_CE:
+               dctcp_ce_state_1_to_0(sk);
+               break;
++      case CA_EVENT_LOSS:
++              dctcp_react_to_loss(sk);
++              break;
+       default:
+               /* Don't care for the rest. */
+               break;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 7ca8264cbdf9..2af849ba33c9 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -611,7 +611,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+                               inet6_sk(skb->sk) : NULL;
+       struct ipv6hdr *tmp_hdr;
+       struct frag_hdr *fh;
+-      unsigned int mtu, hlen, left, len;
++      unsigned int mtu, hlen, left, len, nexthdr_offset;
+       int hroom, troom;
+       __be32 frag_id;
+       int ptr, offset = 0, err = 0;
+@@ -622,6 +622,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+               goto fail;
+       hlen = err;
+       nexthdr = *prevhdr;
++      nexthdr_offset = prevhdr - skb_network_header(skb);
+ 
+       mtu = ip6_skb_dst_mtu(skb);
+ 
+@@ -656,6 +657,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct 
sk_buff *skb,
+           (err = skb_checksum_help(skb)))
+               goto fail;
+ 
++      prevhdr = skb_network_header(skb) + nexthdr_offset;
+       hroom = LL_RESERVED_SPACE(rt->dst.dev);
+       if (skb_has_frag_list(skb)) {
+               unsigned int first_len = skb_pagelen(skb);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 1812c2a748ff..f71c7915ff0e 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -633,7 +633,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                                          IPPROTO_IPIP,
+                                          RT_TOS(eiph->tos), 0);
+               if (IS_ERR(rt) ||
+-                  rt->dst.dev->type != ARPHRD_TUNNEL) {
++                  rt->dst.dev->type != ARPHRD_TUNNEL6) {
+                       if (!IS_ERR(rt))
+                               ip_rt_put(rt);
+                       goto out;
+@@ -643,7 +643,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+               ip_rt_put(rt);
+               if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
+                                  skb2->dev) ||
+-                  skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
++                  skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
+                       goto out;
+       }
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index e23190725244..f7d080d1cf8e 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -661,6 +661,10 @@ static int ipip6_rcv(struct sk_buff *skb)
+                   !net_eq(tunnel->net, dev_net(tunnel->dev))))
+                       goto out;
+ 
++              /* skb can be uncloned in iptunnel_pull_header, so
++               * old iph is no longer valid
++               */
++              iph = (const struct iphdr *)skb_mac_header(skb);
+               err = IP_ECN_decapsulate(iph, skb);
+               if (unlikely(err)) {
+                       if (log_ecn_error)
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 9bf997404918..7b4f3f865861 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -2059,14 +2059,14 @@ static int __init kcm_init(void)
+       if (err)
+               goto fail;
+ 
+-      err = sock_register(&kcm_family_ops);
+-      if (err)
+-              goto sock_register_fail;
+-
+       err = register_pernet_device(&kcm_net_ops);
+       if (err)
+               goto net_ops_fail;
+ 
++      err = sock_register(&kcm_family_ops);
++      if (err)
++              goto sock_register_fail;
++
+       err = kcm_proc_init();
+       if (err)
+               goto proc_init_fail;
+@@ -2074,12 +2074,12 @@ static int __init kcm_init(void)
+       return 0;
+ 
+ proc_init_fail:
+-      unregister_pernet_device(&kcm_net_ops);
+-
+-net_ops_fail:
+       sock_unregister(PF_KCM);
+ 
+ sock_register_fail:
++      unregister_pernet_device(&kcm_net_ops);
++
++net_ops_fail:
+       proto_unregister(&kcm_proto);
+ 
+ fail:
+@@ -2095,8 +2095,8 @@ fail:
+ static void __exit kcm_exit(void)
+ {
+       kcm_proc_exit();
+-      unregister_pernet_device(&kcm_net_ops);
+       sock_unregister(PF_KCM);
++      unregister_pernet_device(&kcm_net_ops);
+       proto_unregister(&kcm_proto);
+       destroy_workqueue(kcm_wq);
+ 
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index e687b89dafe6..f5deae2ccb79 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1967,14 +1967,14 @@ static struct nlattr *reserve_sfa_size(struct 
sw_flow_actions **sfa,
+ 
+       struct sw_flow_actions *acts;
+       int new_acts_size;
+-      int req_size = NLA_ALIGN(attr_len);
++      size_t req_size = NLA_ALIGN(attr_len);
+       int next_offset = offsetof(struct sw_flow_actions, actions) +
+                                       (*sfa)->actions_len;
+ 
+       if (req_size <= (ksize(*sfa) - next_offset))
+               goto out;
+ 
+-      new_acts_size = ksize(*sfa) * 2;
++      new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
+ 
+       if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 2a08bf75d008..82e9ffecd90e 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -530,7 +530,7 @@ static void rds_tcp_kill_sock(struct net *net)
+       list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+               struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+-              if (net != c_net || !tc->t_sock)
++              if (net != c_net)
+                       continue;
+               if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
+                       list_move_tail(&tc->t_tcp_node, &tmp_list);
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index a859b55d7899..64fd1e9818a6 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -45,6 +45,7 @@ static int tcf_sample_init(struct net *net, struct nlattr 
*nla,
+       struct tc_sample *parm;
+       struct tcf_sample *s;
+       bool exists = false;
++      u32 rate;
+       int ret;
+ 
+       if (!nla)
+@@ -73,10 +74,17 @@ static int tcf_sample_init(struct net *net, struct nlattr 
*nla,
+               if (!ovr)
+                       return -EEXIST;
+       }
+-      s = to_sample(*a);
+ 
++      rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
++      if (!rate) {
++              tcf_idr_release(*a, bind);
++              return -EINVAL;
++      }
++
++      s = to_sample(*a);
+       s->tcf_action = parm->action;
+       s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
++      s->rate = rate;
+       s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
+       psample_group = psample_group_get(net, s->psample_group_num);
+       if (!psample_group) {
+diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
+index 6499aecfbfc4..d8fd152779c8 100644
+--- a/net/sched/cls_matchall.c
++++ b/net/sched/cls_matchall.c
+@@ -125,6 +125,11 @@ static void mall_destroy(struct tcf_proto *tp)
+ 
+ static void *mall_get(struct tcf_proto *tp, u32 handle)
+ {
++      struct cls_mall_head *head = rtnl_dereference(tp->root);
++
++      if (head && head->handle == handle)
++              return head;
++
+       return NULL;
+ }
+ 
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index cbb04d66f564..a7529aca2ac8 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -605,6 +605,7 @@ out:
+ static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ {
+       /* No address mapping for V4 sockets */
++      memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+       return sizeof(struct sockaddr_in);
+ }
+ 
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 350c33ec82b3..3bcd7a2f0394 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1249,7 +1249,7 @@ static int snd_seq_ioctl_set_client_info(struct 
snd_seq_client *client,
+ 
+       /* fill the info fields */
+       if (client_info->name[0])
+-              strlcpy(client->name, client_info->name, sizeof(client->name));
++              strscpy(client->name, client_info->name, sizeof(client->name));
+ 
+       client->filter = client_info->filter;
+       client->event_lost = client_info->event_lost;
+@@ -1527,7 +1527,7 @@ static int snd_seq_ioctl_create_queue(struct 
snd_seq_client *client, void *arg)
+       /* set queue name */
+       if (!info->name[0])
+               snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
+-      strlcpy(q->name, info->name, sizeof(q->name));
++      strscpy(q->name, info->name, sizeof(q->name));
+       snd_use_lock_free(&q->use_lock);
+ 
+       return 0;
+@@ -1589,7 +1589,7 @@ static int snd_seq_ioctl_set_queue_info(struct 
snd_seq_client *client,
+               queuefree(q);
+               return -EPERM;
+       }
+-      strlcpy(q->name, info->name, sizeof(q->name));
++      strscpy(q->name, info->name, sizeof(q->name));
+       queuefree(q);
+ 
+       return 0;
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index a23d6a821ff3..6152ae24772b 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -58,6 +58,8 @@ struct fsl_esai {
+       u32 fifo_depth;
+       u32 slot_width;
+       u32 slots;
++      u32 tx_mask;
++      u32 rx_mask;
+       u32 hck_rate[2];
+       u32 sck_rate[2];
+       bool hck_dir[2];
+@@ -358,21 +360,13 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai 
*dai, u32 tx_mask,
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
+                          ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
+ 
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
+-                         ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
+-                         ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
+-
+       regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
+                          ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
+ 
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
+-                         ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
+-      regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
+-                         ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
+-
+       esai_priv->slot_width = slot_width;
+       esai_priv->slots = slots;
++      esai_priv->tx_mask = tx_mask;
++      esai_priv->rx_mask = rx_mask;
+ 
+       return 0;
+ }
+@@ -593,6 +587,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream 
*substream, int cmd,
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+       u8 i, channels = substream->runtime->channels;
+       u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
++      u32 mask;
+ 
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+@@ -605,15 +600,38 @@ static int fsl_esai_trigger(struct snd_pcm_substream 
*substream, int cmd,
+               for (i = 0; tx && i < channels; i++)
+                       regmap_write(esai_priv->regmap, REG_ESAI_ETDR, 0x0);
+ 
++              /*
++               * When set the TE/RE in the end of enablement flow, there
++               * will be channel swap issue for multi data line case.
++               * In order to workaround this issue, we switch the bit
++               * enablement sequence to below sequence
++               * 1) clear the xSMB & xSMA: which is done in probe and
++               *                           stop state.
++               * 2) set TE/RE
++               * 3) set xSMB
++               * 4) set xSMA:  xSMA is the last one in this flow, which
++               *               will trigger esai to start.
++               */
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
+                                  tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK,
+                                  tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins));
++              mask = tx ? esai_priv->tx_mask : esai_priv->rx_mask;
++
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
++                                 ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(mask));
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
++                                 ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(mask));
++
+               break;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx),
+                                  tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0);
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMA(tx),
++                                 ESAI_xSMA_xS_MASK, 0);
++              regmap_update_bits(esai_priv->regmap, REG_ESAI_xSMB(tx),
++                                 ESAI_xSMB_xS_MASK, 0);
+ 
+               /* Disable and reset FIFO */
+               regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx),
+@@ -903,6 +921,15 @@ static int fsl_esai_probe(struct platform_device *pdev)
+               return ret;
+       }
+ 
++      esai_priv->tx_mask = 0xFFFFFFFF;
++      esai_priv->rx_mask = 0xFFFFFFFF;
++
++      /* Clear the TSMA, TSMB, RSMA, RSMB */
++      regmap_write(esai_priv->regmap, REG_ESAI_TSMA, 0);
++      regmap_write(esai_priv->regmap, REG_ESAI_TSMB, 0);
++      regmap_write(esai_priv->regmap, REG_ESAI_RSMA, 0);
++      regmap_write(esai_priv->regmap, REG_ESAI_RSMB, 0);
++
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
+                                             &fsl_esai_dai, 1);
+       if (ret) {

Reply via email to