commit:     e6ea672694ccf0bad305b4ffeb7b8dac3e3f804e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 13 22:10:33 2019 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 13 22:10:33 2019 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e6ea6726

proj/linux-patches: Linux patch 5.0.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1001_linux-5.0.2.patch | 1235 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1239 insertions(+)

diff --git a/0000_README b/0000_README
index 99e0bb6..04daf20 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-5.0.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.0.1
 
+Patch:  1001_linux-5.0.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.0.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-5.0.2.patch b/1001_linux-5.0.2.patch
new file mode 100644
index 0000000..4fcf3cb
--- /dev/null
+++ b/1001_linux-5.0.2.patch
@@ -0,0 +1,1235 @@
+diff --git a/Makefile b/Makefile
+index 3cd7163fe164..bb2f7664594a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 0
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Shy Crocodile
+ 
+diff --git a/arch/arm/boot/dts/exynos3250.dtsi 
b/arch/arm/boot/dts/exynos3250.dtsi
+index 608d17454179..5892a9f7622f 100644
+--- a/arch/arm/boot/dts/exynos3250.dtsi
++++ b/arch/arm/boot/dts/exynos3250.dtsi
+@@ -168,6 +168,9 @@
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       interrupt-parent = <&gic>;
++                      clock-names = "clkout8";
++                      clocks = <&cmu CLK_FIN_PLL>;
++                      #clock-cells = <1>;
+               };
+ 
+               mipi_phy: video-phy {
+diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi 
b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+index 3a9eb1e91c45..8a64c4e8c474 100644
+--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+@@ -49,7 +49,7 @@
+       };
+ 
+       emmc_pwrseq: pwrseq {
+-              pinctrl-0 = <&sd1_cd>;
++              pinctrl-0 = <&emmc_rstn>;
+               pinctrl-names = "default";
+               compatible = "mmc-pwrseq-emmc";
+               reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
+@@ -165,12 +165,6 @@
+       cpu0-supply = <&buck2_reg>;
+ };
+ 
+-/* RSTN signal for eMMC */
+-&sd1_cd {
+-      samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+-      samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+-};
+-
+ &pinctrl_1 {
+       gpio_power_key: power_key {
+               samsung,pins = "gpx1-3";
+@@ -188,6 +182,11 @@
+               samsung,pins = "gpx3-7";
+               samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
+       };
++
++      emmc_rstn: emmc-rstn {
++              samsung,pins = "gpk1-2";
++              samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
++      };
+ };
+ 
+ &ehci {
+diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi 
b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+index bf09eab90f8a..6bf3661293ee 100644
+--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+@@ -468,7 +468,7 @@
+                       buck8_reg: BUCK8 {
+                               regulator-name = "vdd_1.8v_ldo";
+                               regulator-min-microvolt = <800000>;
+-                              regulator-max-microvolt = <1500000>;
++                              regulator-max-microvolt = <2000000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts 
b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+index 610235028cc7..c14205cd6bf5 100644
+--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
++++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+@@ -118,6 +118,7 @@
+               reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+               clocks = <&pmic>;
+               clock-names = "ext_clock";
++              post-power-on-delay-ms = <10>;
+               power-off-delay-us = <10>;
+       };
+ 
+@@ -300,7 +301,6 @@
+ 
+               dwmmc_0: dwmmc0@f723d000 {
+                       cap-mmc-highspeed;
+-                      mmc-hs200-1_8v;
+                       non-removable;
+                       bus-width = <0x8>;
+                       vmmc-supply = <&ldo19>;
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts 
b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+index 13a0a028df98..e5699d0d91e4 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
++++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+@@ -101,6 +101,7 @@
+       sdio_pwrseq: sdio-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
++              post-power-on-delay-ms = <10>;
+       };
+ };
+ 
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index b684f0294f35..e2b1447192a8 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
+  */
+ static void free_fake_cpuc(struct cpu_hw_events *cpuc)
+ {
+-      kfree(cpuc->shared_regs);
++      intel_cpuc_finish(cpuc);
+       kfree(cpuc);
+ }
+ 
+@@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
+       cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
+       if (!cpuc)
+               return ERR_PTR(-ENOMEM);
+-
+-      /* only needed, if we have extra_regs */
+-      if (x86_pmu.extra_regs) {
+-              cpuc->shared_regs = allocate_shared_regs(cpu);
+-              if (!cpuc->shared_regs)
+-                      goto error;
+-      }
+       cpuc->is_fake = 1;
++
++      if (intel_cpuc_prepare(cpuc, cpu))
++              goto error;
++
+       return cpuc;
+ error:
+       free_fake_cpuc(cpuc);
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 730978dff63f..dadb8f7e5a0d 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -1999,6 +1999,39 @@ static void intel_pmu_nhm_enable_all(int added)
+       intel_pmu_enable_all(added);
+ }
+ 
++static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
++{
++      u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
++
++      if (cpuc->tfa_shadow != val) {
++              cpuc->tfa_shadow = val;
++              wrmsrl(MSR_TSX_FORCE_ABORT, val);
++      }
++}
++
++static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, 
int cntr)
++{
++      /*
++       * We're going to use PMC3, make sure TFA is set before we touch it.
++       */
++      if (cntr == 3 && !cpuc->is_fake)
++              intel_set_tfa(cpuc, true);
++}
++
++static void intel_tfa_pmu_enable_all(int added)
++{
++      struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
++
++      /*
++       * If we find PMC3 is no longer used when we enable the PMU, we can
++       * clear TFA.
++       */
++      if (!test_bit(3, cpuc->active_mask))
++              intel_set_tfa(cpuc, false);
++
++      intel_pmu_enable_all(added);
++}
++
+ static void enable_counter_freeze(void)
+ {
+       update_debugctlmsr(get_debugctlmsr() |
+@@ -2768,6 +2801,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+       raw_spin_unlock(&excl_cntrs->lock);
+ }
+ 
++static struct event_constraint *
++dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int 
idx)
++{
++      WARN_ON_ONCE(!cpuc->constraint_list);
++
++      if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
++              struct event_constraint *cx;
++
++              /*
++               * grab pre-allocated constraint entry
++               */
++              cx = &cpuc->constraint_list[idx];
++
++              /*
++               * initialize dynamic constraint
++               * with static constraint
++               */
++              *cx = *c;
++
++              /*
++               * mark constraint as dynamic
++               */
++              cx->flags |= PERF_X86_EVENT_DYNAMIC;
++              c = cx;
++      }
++
++      return c;
++}
++
+ static struct event_constraint *
+ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event 
*event,
+                          int idx, struct event_constraint *c)
+@@ -2798,27 +2860,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, 
struct perf_event *event,
+        * only needed when constraint has not yet
+        * been cloned (marked dynamic)
+        */
+-      if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+-              struct event_constraint *cx;
+-
+-              /*
+-               * grab pre-allocated constraint entry
+-               */
+-              cx = &cpuc->constraint_list[idx];
+-
+-              /*
+-               * initialize dynamic constraint
+-               * with static constraint
+-               */
+-              *cx = *c;
+-
+-              /*
+-               * mark constraint as dynamic, so we
+-               * can free it later on
+-               */
+-              cx->flags |= PERF_X86_EVENT_DYNAMIC;
+-              c = cx;
+-      }
++      c = dyn_constraint(cpuc, c, idx);
+ 
+       /*
+        * From here on, the constraint is dynamic.
+@@ -3345,6 +3387,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, 
int idx,
+       return c;
+ }
+ 
++static bool allow_tsx_force_abort = true;
++
++static struct event_constraint *
++tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
++                        struct perf_event *event)
++{
++      struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, 
event);
++
++      /*
++       * Without TFA we must not use PMC3.
++       */
++      if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
++              c = dyn_constraint(cpuc, c, idx);
++              c->idxmsk64 &= ~(1ULL << 3);
++              c->weight--;
++      }
++
++      return c;
++}
++
+ /*
+  * Broadwell:
+  *
+@@ -3398,7 +3460,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
+       return x86_event_sysfs_show(page, config, event);
+ }
+ 
+-struct intel_shared_regs *allocate_shared_regs(int cpu)
++static struct intel_shared_regs *allocate_shared_regs(int cpu)
+ {
+       struct intel_shared_regs *regs;
+       int i;
+@@ -3430,23 +3492,24 @@ static struct intel_excl_cntrs 
*allocate_excl_cntrs(int cpu)
+       return c;
+ }
+ 
+-static int intel_pmu_cpu_prepare(int cpu)
+-{
+-      struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ 
++int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
++{
+       if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+               cpuc->shared_regs = allocate_shared_regs(cpu);
+               if (!cpuc->shared_regs)
+                       goto err;
+       }
+ 
+-      if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
++      if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
+               size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
+ 
+-              cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
++              cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, 
cpu_to_node(cpu));
+               if (!cpuc->constraint_list)
+                       goto err_shared_regs;
++      }
+ 
++      if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+               cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
+               if (!cpuc->excl_cntrs)
+                       goto err_constraint_list;
+@@ -3468,6 +3531,11 @@ err:
+       return -ENOMEM;
+ }
+ 
++static int intel_pmu_cpu_prepare(int cpu)
++{
++      return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
++}
++
+ static void flip_smm_bit(void *data)
+ {
+       unsigned long set = *(unsigned long *)data;
+@@ -3542,9 +3610,8 @@ static void intel_pmu_cpu_starting(int cpu)
+       }
+ }
+ 
+-static void free_excl_cntrs(int cpu)
++static void free_excl_cntrs(struct cpu_hw_events *cpuc)
+ {
+-      struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct intel_excl_cntrs *c;
+ 
+       c = cpuc->excl_cntrs;
+@@ -3552,9 +3619,10 @@ static void free_excl_cntrs(int cpu)
+               if (c->core_id == -1 || --c->refcnt == 0)
+                       kfree(c);
+               cpuc->excl_cntrs = NULL;
+-              kfree(cpuc->constraint_list);
+-              cpuc->constraint_list = NULL;
+       }
++
++      kfree(cpuc->constraint_list);
++      cpuc->constraint_list = NULL;
+ }
+ 
+ static void intel_pmu_cpu_dying(int cpu)
+@@ -3565,9 +3633,8 @@ static void intel_pmu_cpu_dying(int cpu)
+               disable_counter_freeze();
+ }
+ 
+-static void intel_pmu_cpu_dead(int cpu)
++void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+ {
+-      struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct intel_shared_regs *pc;
+ 
+       pc = cpuc->shared_regs;
+@@ -3577,7 +3644,12 @@ static void intel_pmu_cpu_dead(int cpu)
+               cpuc->shared_regs = NULL;
+       }
+ 
+-      free_excl_cntrs(cpu);
++      free_excl_cntrs(cpuc);
++}
++
++static void intel_pmu_cpu_dead(int cpu)
++{
++      intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
+ }
+ 
+ static void intel_pmu_sched_task(struct perf_event_context *ctx,
+@@ -4070,8 +4142,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
+        NULL
+ };
+ 
++DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
++
+ static struct attribute *intel_pmu_attrs[] = {
+       &dev_attr_freeze_on_smi.attr,
++      NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
+       NULL,
+ };
+ 
+@@ -4564,6 +4639,15 @@ __init int intel_pmu_init(void)
+               tsx_attr = hsw_tsx_events_attrs;
+               intel_pmu_pebs_data_source_skl(
+                       boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
++
++              if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
++                      x86_pmu.flags |= PMU_FL_TFA;
++                      x86_pmu.get_event_constraints = 
tfa_get_event_constraints;
++                      x86_pmu.enable_all = intel_tfa_pmu_enable_all;
++                      x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
++                      intel_pmu_attrs[1] = 
&dev_attr_allow_tsx_force_abort.attr.attr;
++              }
++
+               pr_cont("Skylake events, ");
+               name = "skylake";
+               break;
+@@ -4715,7 +4799,7 @@ static __init int fixup_ht_bug(void)
+       hardlockup_detector_perf_restart();
+ 
+       for_each_online_cpu(c)
+-              free_excl_cntrs(c);
++              free_excl_cntrs(&per_cpu(cpu_hw_events, c));
+ 
+       cpus_read_unlock();
+       pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index d46fd6754d92..a345d079f876 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -242,6 +242,11 @@ struct cpu_hw_events {
+       struct intel_excl_cntrs         *excl_cntrs;
+       int excl_thread_id; /* 0 or 1 */
+ 
++      /*
++       * SKL TSX_FORCE_ABORT shadow
++       */
++      u64                             tfa_shadow;
++
+       /*
+        * AMD specific bits
+        */
+@@ -681,6 +686,7 @@ do {                                                       
                \
+ #define PMU_FL_EXCL_CNTRS     0x4 /* has exclusive counter requirements  */
+ #define PMU_FL_EXCL_ENABLED   0x8 /* exclusive counter active */
+ #define PMU_FL_PEBS_ALL               0x10 /* all events are valid PEBS 
events */
++#define PMU_FL_TFA            0x20 /* deal with TSX force abort */
+ 
+ #define EVENT_VAR(_id)  event_attr_##_id
+ #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
+@@ -889,7 +895,8 @@ struct event_constraint *
+ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+                         struct perf_event *event);
+ 
+-struct intel_shared_regs *allocate_shared_regs(int cpu);
++extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
++extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
+ 
+ int intel_pmu_init(void);
+ 
+@@ -1025,9 +1032,13 @@ static inline int intel_pmu_init(void)
+       return 0;
+ }
+ 
+-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
++static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
++{
++      return 0;
++}
++
++static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
+ {
+-      return NULL;
+ }
+ 
+ static inline int is_ht_workaround_enabled(void)
+diff --git a/arch/x86/include/asm/cpufeatures.h 
b/arch/x86/include/asm/cpufeatures.h
+index 6d6122524711..981ff9479648 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -344,6 +344,7 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW     (18*32+ 2) /* AVX-512 Neural Network 
Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS     (18*32+ 3) /* AVX-512 Multiply 
Accumulation Single precision */
++#define X86_FEATURE_TSX_FORCE_ABORT   (18*32+13) /* "" TSX_FORCE_ABORT */
+ #define X86_FEATURE_PCONFIG           (18*32+18) /* Intel PCONFIG */
+ #define X86_FEATURE_SPEC_CTRL         (18*32+26) /* "" Speculation Control 
(IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP               (18*32+27) /* "" Single Thread 
Indirect Branch Predictors */
+diff --git a/arch/x86/include/asm/msr-index.h 
b/arch/x86/include/asm/msr-index.h
+index 8e40c2446fd1..ca5bc0eacb95 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -666,6 +666,12 @@
+ 
+ #define MSR_IA32_TSC_DEADLINE         0x000006E0
+ 
++
++#define MSR_TSX_FORCE_ABORT           0x0000010F
++
++#define MSR_TFA_RTM_FORCE_ABORT_BIT   0
++#define MSR_TFA_RTM_FORCE_ABORT               
BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
++
+ /* P4/Xeon+ specific */
+ #define MSR_IA32_MCG_EAX              0x00000180
+ #define MSR_IA32_MCG_EBX              0x00000181
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 30a5111ae5fd..527e69b12002 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev)
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+                             PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
+ 
++static void quirk_intel_th_dnv(struct pci_dev *dev)
++{
++      struct resource *r = &dev->resource[4];
++
++      /*
++       * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
++       * appears to be 4 MB in reality.
++       */
++      if (r->end == r->start + 0x7ff) {
++              r->start = 0;
++              r->end   = 0x3fffff;
++              r->flags |= IORESOURCE_UNSET;
++      }
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
++
+ #ifdef CONFIG_PHYS_ADDR_T_64BIT
+ 
+ #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index 6bc8e6640d71..c51462f5aa1e 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int 
type)
+       case ISCSI_BOOT_TGT_NIC_ASSOC:
+       case ISCSI_BOOT_TGT_CHAP_TYPE:
+               rc = S_IRUGO;
++              break;
+       case ISCSI_BOOT_TGT_NAME:
+               if (tgt->tgt_name_len)
+                       rc = S_IRUGO;
+diff --git a/drivers/input/mouse/elan_i2c_core.c 
b/drivers/input/mouse/elan_i2c_core.c
+index 225ae6980182..628ef617bb2f 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+       { "ELAN0000", 0 },
+       { "ELAN0100", 0 },
+       { "ELAN0600", 0 },
++      { "ELAN0601", 0 },
+       { "ELAN0602", 0 },
+       { "ELAN0605", 0 },
+       { "ELAN0608", 0 },
+diff --git a/drivers/input/tablet/wacom_serial4.c 
b/drivers/input/tablet/wacom_serial4.c
+index 38bfaca48eab..150f9eecaca7 100644
+--- a/drivers/input/tablet/wacom_serial4.c
++++ b/drivers/input/tablet/wacom_serial4.c
+@@ -187,6 +187,7 @@ enum {
+       MODEL_DIGITIZER_II      = 0x5544, /* UD */
+       MODEL_GRAPHIRE          = 0x4554, /* ET */
+       MODEL_PENPARTNER        = 0x4354, /* CT */
++      MODEL_ARTPAD_II         = 0x4B54, /* KT */
+ };
+ 
+ static void wacom_handle_model_response(struct wacom *wacom)
+@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom 
*wacom)
+               wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
+               break;
+ 
++      case MODEL_ARTPAD_II:
+       case MODEL_DIGITIZER_II:
+               wacom->dev->name = "Wacom Digitizer II";
+               wacom->dev->id.version = MODEL_DIGITIZER_II;
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 66a174979b3c..81745644f720 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -274,6 +274,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
+                                     unsigned int new_keycode)
+ {
+       int old_keycode = rc_map->scan[index].keycode;
++      int i;
+ 
+       /* Did the user wish to remove the mapping? */
+       if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) {
+@@ -288,9 +289,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev,
+                       old_keycode == KEY_RESERVED ? "New" : "Replacing",
+                       rc_map->scan[index].scancode, new_keycode);
+               rc_map->scan[index].keycode = new_keycode;
++              __set_bit(new_keycode, dev->input_dev->keybit);
+       }
+ 
+       if (old_keycode != KEY_RESERVED) {
++              /* A previous mapping was updated... */
++              __clear_bit(old_keycode, dev->input_dev->keybit);
++              /* ... but another scancode might use the same keycode */
++              for (i = 0; i < rc_map->len; i++) {
++                      if (rc_map->scan[i].keycode == old_keycode) {
++                              __set_bit(old_keycode, dev->input_dev->keybit);
++                              break;
++                      }
++              }
++
+               /* Possibly shrink the keytable, failure is not a problem */
+               ir_resize_table(dev, rc_map, GFP_ATOMIC);
+       }
+@@ -1750,7 +1762,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
+       set_bit(EV_REP, dev->input_dev->evbit);
+       set_bit(EV_MSC, dev->input_dev->evbit);
+       set_bit(MSC_SCAN, dev->input_dev->mscbit);
+-      bitmap_fill(dev->input_dev->keybit, KEY_CNT);
+ 
+       /* Pointer/mouse events */
+       set_bit(EV_REL, dev->input_dev->evbit);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c 
b/drivers/media/usb/uvc/uvc_driver.c
+index b62cbd800111..33a22c016456 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1106,11 +1106,19 @@ static int uvc_parse_standard_control(struct 
uvc_device *dev,
+                       return -EINVAL;
+               }
+ 
+-              /* Make sure the terminal type MSB is not null, otherwise it
+-               * could be confused with a unit.
++              /*
++               * Reject invalid terminal types that would cause issues:
++               *
++               * - The high byte must be non-zero, otherwise it would be
++               *   confused with a unit.
++               *
++               * - Bit 15 must be 0, as we use it internally as a terminal
++               *   direction flag.
++               *
++               * Other unknown types are accepted.
+                */
+               type = get_unaligned_le16(&buffer[4]);
+-              if ((type & 0xff00) == 0) {
++              if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
+                       uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
+                               "interface %d INPUT_TERMINAL %d has invalid "
+                               "type 0x%04x, skipping\n", udev->devnum,
+diff --git a/drivers/net/wireless/ath/ath9k/init.c 
b/drivers/net/wireless/ath/ath9k/init.c
+index c070a9e51ebf..fae572b38416 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
+               ret = ath9k_eeprom_request(sc, eeprom_name);
+               if (ret)
+                       return ret;
++
++              ah->ah_flags &= ~AH_USE_EEPROM;
++              ah->ah_flags |= AH_NO_EEP_SWAP;
+       }
+ 
+       mac = of_get_mac_address(np);
+       if (mac)
+               ether_addr_copy(common->macaddr, mac);
+ 
+-      ah->ah_flags &= ~AH_USE_EEPROM;
+-      ah->ah_flags |= AH_NO_EEP_SWAP;
+-
+       return 0;
+ }
+ 
+diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
+index 0dbcf429089f..1a8b85051b1b 100644
+--- a/drivers/pci/pcie/pme.c
++++ b/drivers/pci/pcie/pme.c
+@@ -432,31 +432,6 @@ static void pcie_pme_remove(struct pcie_device *srv)
+       kfree(get_service_data(srv));
+ }
+ 
+-static int pcie_pme_runtime_suspend(struct pcie_device *srv)
+-{
+-      struct pcie_pme_service_data *data = get_service_data(srv);
+-
+-      spin_lock_irq(&data->lock);
+-      pcie_pme_interrupt_enable(srv->port, false);
+-      pcie_clear_root_pme_status(srv->port);
+-      data->noirq = true;
+-      spin_unlock_irq(&data->lock);
+-
+-      return 0;
+-}
+-
+-static int pcie_pme_runtime_resume(struct pcie_device *srv)
+-{
+-      struct pcie_pme_service_data *data = get_service_data(srv);
+-
+-      spin_lock_irq(&data->lock);
+-      pcie_pme_interrupt_enable(srv->port, true);
+-      data->noirq = false;
+-      spin_unlock_irq(&data->lock);
+-
+-      return 0;
+-}
+-
+ static struct pcie_port_service_driver pcie_pme_driver = {
+       .name           = "pcie_pme",
+       .port_type      = PCI_EXP_TYPE_ROOT_PORT,
+@@ -464,8 +439,6 @@ static struct pcie_port_service_driver pcie_pme_driver = {
+ 
+       .probe          = pcie_pme_probe,
+       .suspend        = pcie_pme_suspend,
+-      .runtime_suspend = pcie_pme_runtime_suspend,
+-      .runtime_resume = pcie_pme_runtime_resume,
+       .resume         = pcie_pme_resume,
+       .remove         = pcie_pme_remove,
+ };
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index d5a6aa9676c8..a3adc954f40f 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1303,8 +1303,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct 
fib * fibptr)
+                                 ADD : DELETE;
+                               break;
+                       }
+-                      case AifBuManagerEvent:
+-                              aac_handle_aif_bu(dev, aifcmd);
++                      break;
++              case AifBuManagerEvent:
++                      aac_handle_aif_bu(dev, aifcmd);
+                       break;
+               }
+ 
+diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
+index 5596c52e246d..ecc51ef0753f 100644
+--- a/drivers/staging/erofs/namei.c
++++ b/drivers/staging/erofs/namei.c
+@@ -15,74 +15,77 @@
+ 
+ #include <trace/events/erofs.h>
+ 
+-/* based on the value of qn->len is accurate */
+-static inline int dirnamecmp(struct qstr *qn,
+-      struct qstr *qd, unsigned int *matched)
++struct erofs_qstr {
++      const unsigned char *name;
++      const unsigned char *end;
++};
++
++/* based on the end of qn is accurate and it must have the trailing '\0' */
++static inline int dirnamecmp(const struct erofs_qstr *qn,
++                           const struct erofs_qstr *qd,
++                           unsigned int *matched)
+ {
+-      unsigned int i = *matched, len = min(qn->len, qd->len);
+-loop:
+-      if (unlikely(i >= len)) {
+-              *matched = i;
+-              if (qn->len < qd->len) {
+-                      /*
+-                       * actually (qn->len == qd->len)
+-                       * when qd->name[i] == '\0'
+-                       */
+-                      return qd->name[i] == '\0' ? 0 : -1;
++      unsigned int i = *matched;
++
++      /*
++       * on-disk error, let's only BUG_ON in the debugging mode.
++       * otherwise, it will return 1 to just skip the invalid name
++       * and go on (in consideration of the lookup performance).
++       */
++      DBG_BUGON(qd->name > qd->end);
++
++      /* qd could not have trailing '\0' */
++      /* However it is absolutely safe if < qd->end */
++      while (qd->name + i < qd->end && qd->name[i] != '\0') {
++              if (qn->name[i] != qd->name[i]) {
++                      *matched = i;
++                      return qn->name[i] > qd->name[i] ? 1 : -1;
+               }
+-              return (qn->len > qd->len);
++              ++i;
+       }
+-
+-      if (qn->name[i] != qd->name[i]) {
+-              *matched = i;
+-              return qn->name[i] > qd->name[i] ? 1 : -1;
+-      }
+-
+-      ++i;
+-      goto loop;
++      *matched = i;
++      /* See comments in __d_alloc on the terminating NUL character */
++      return qn->name[i] == '\0' ? 0 : 1;
+ }
+ 
+-static struct erofs_dirent *find_target_dirent(
+-      struct qstr *name,
+-      u8 *data, int maxsize)
++#define nameoff_from_disk(off, sz)    (le16_to_cpu(off) & ((sz) - 1))
++
++static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
++                                             u8 *data,
++                                             unsigned int dirblksize,
++                                             const int ndirents)
+ {
+-      unsigned int ndirents, head, back;
++      int head, back;
+       unsigned int startprfx, endprfx;
+       struct erofs_dirent *const de = (struct erofs_dirent *)data;
+ 
+-      /* make sure that maxsize is valid */
+-      BUG_ON(maxsize < sizeof(struct erofs_dirent));
+-
+-      ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
+-
+-      /* corrupted dir (may be unnecessary...) */
+-      BUG_ON(!ndirents);
+-
+-      head = 0;
++      /* since the 1st dirent has been evaluated previously */
++      head = 1;
+       back = ndirents - 1;
+       startprfx = endprfx = 0;
+ 
+       while (head <= back) {
+-              unsigned int mid = head + (back - head) / 2;
+-              unsigned int nameoff = le16_to_cpu(de[mid].nameoff);
++              const int mid = head + (back - head) / 2;
++              const int nameoff = nameoff_from_disk(de[mid].nameoff,
++                                                    dirblksize);
+               unsigned int matched = min(startprfx, endprfx);
+-
+-              struct qstr dname = QSTR_INIT(data + nameoff,
+-                      unlikely(mid >= ndirents - 1) ?
+-                              maxsize - nameoff :
+-                              le16_to_cpu(de[mid + 1].nameoff) - nameoff);
++              struct erofs_qstr dname = {
++                      .name = data + nameoff,
++                      .end = unlikely(mid >= ndirents - 1) ?
++                              data + dirblksize :
++                              data + nameoff_from_disk(de[mid + 1].nameoff,
++                                                       dirblksize)
++              };
+ 
+               /* string comparison without already matched prefix */
+               int ret = dirnamecmp(name, &dname, &matched);
+ 
+-              if (unlikely(!ret))
++              if (unlikely(!ret)) {
+                       return de + mid;
+-              else if (ret > 0) {
++              } else if (ret > 0) {
+                       head = mid + 1;
+                       startprfx = matched;
+-              } else if (unlikely(mid < 1))   /* fix "mid" overflow */
+-                      break;
+-              else {
++              } else {
+                       back = mid - 1;
+                       endprfx = matched;
+               }
+@@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
+       return ERR_PTR(-ENOENT);
+ }
+ 
+-static struct page *find_target_block_classic(
+-      struct inode *dir,
+-      struct qstr *name, int *_diff)
++static struct page *find_target_block_classic(struct inode *dir,
++                                            struct erofs_qstr *name,
++                                            int *_ndirents)
+ {
+       unsigned int startprfx, endprfx;
+-      unsigned int head, back;
++      int head, back;
+       struct address_space *const mapping = dir->i_mapping;
+       struct page *candidate = ERR_PTR(-ENOENT);
+ 
+@@ -105,41 +108,43 @@ static struct page *find_target_block_classic(
+       back = inode_datablocks(dir) - 1;
+ 
+       while (head <= back) {
+-              unsigned int mid = head + (back - head) / 2;
++              const int mid = head + (back - head) / 2;
+               struct page *page = read_mapping_page(mapping, mid, NULL);
+ 
+-              if (IS_ERR(page)) {
+-exact_out:
+-                      if (!IS_ERR(candidate)) /* valid candidate */
+-                              put_page(candidate);
+-                      return page;
+-              } else {
+-                      int diff;
+-                      unsigned int ndirents, matched;
+-                      struct qstr dname;
++              if (!IS_ERR(page)) {
+                       struct erofs_dirent *de = kmap_atomic(page);
+-                      unsigned int nameoff = le16_to_cpu(de->nameoff);
+-
+-                      ndirents = nameoff / sizeof(*de);
++                      const int nameoff = nameoff_from_disk(de->nameoff,
++                                                            EROFS_BLKSIZ);
++                      const int ndirents = nameoff / sizeof(*de);
++                      int diff;
++                      unsigned int matched;
++                      struct erofs_qstr dname;
+ 
+-                      /* corrupted dir (should have one entry at least) */
+-                      BUG_ON(!ndirents || nameoff > PAGE_SIZE);
++                      if (unlikely(!ndirents)) {
++                              DBG_BUGON(1);
++                              kunmap_atomic(de);
++                              put_page(page);
++                              page = ERR_PTR(-EIO);
++                              goto out;
++                      }
+ 
+                       matched = min(startprfx, endprfx);
+ 
+                       dname.name = (u8 *)de + nameoff;
+-                      dname.len = ndirents == 1 ?
+-                              /* since the rest of the last page is 0 */
+-                              EROFS_BLKSIZ - nameoff
+-                              : le16_to_cpu(de[1].nameoff) - nameoff;
++                      if (ndirents == 1)
++                              dname.end = (u8 *)de + EROFS_BLKSIZ;
++                      else
++                              dname.end = (u8 *)de +
++                                      nameoff_from_disk(de[1].nameoff,
++                                                        EROFS_BLKSIZ);
+ 
+                       /* string comparison without already matched prefix */
+                       diff = dirnamecmp(name, &dname, &matched);
+                       kunmap_atomic(de);
+ 
+                       if (unlikely(!diff)) {
+-                              *_diff = 0;
+-                              goto exact_out;
++                              *_ndirents = 0;
++                              goto out;
+                       } else if (diff > 0) {
+                               head = mid + 1;
+                               startprfx = matched;
+@@ -147,45 +152,51 @@ exact_out:
+                               if (likely(!IS_ERR(candidate)))
+                                       put_page(candidate);
+                               candidate = page;
++                              *_ndirents = ndirents;
+                       } else {
+                               put_page(page);
+ 
+-                              if (unlikely(mid < 1))  /* fix "mid" overflow */
+-                                      break;
+-
+                               back = mid - 1;
+                               endprfx = matched;
+                       }
++                      continue;
+               }
++out:          /* free if the candidate is valid */
++              if (!IS_ERR(candidate))
++                      put_page(candidate);
++              return page;
+       }
+-      *_diff = 1;
+       return candidate;
+ }
+ 
+ int erofs_namei(struct inode *dir,
+-      struct qstr *name,
+-      erofs_nid_t *nid, unsigned int *d_type)
++              struct qstr *name,
++              erofs_nid_t *nid, unsigned int *d_type)
+ {
+-      int diff;
++      int ndirents;
+       struct page *page;
+-      u8 *data;
++      void *data;
+       struct erofs_dirent *de;
++      struct erofs_qstr qn;
+ 
+       if (unlikely(!dir->i_size))
+               return -ENOENT;
+ 
+-      diff = 1;
+-      page = find_target_block_classic(dir, name, &diff);
++      qn.name = name->name;
++      qn.end = name->name + name->len;
++
++      ndirents = 0;
++      page = find_target_block_classic(dir, &qn, &ndirents);
+ 
+       if (unlikely(IS_ERR(page)))
+               return PTR_ERR(page);
+ 
+       data = kmap_atomic(page);
+       /* the target page has been mapped */
+-      de = likely(diff) ?
+-              /* since the rest of the last page is 0 */
+-              find_target_dirent(name, data, EROFS_BLKSIZ) :
+-              (struct erofs_dirent *)data;
++      if (ndirents)
++              de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
++      else
++              de = (struct erofs_dirent *)data;
+ 
+       if (likely(!IS_ERR(de))) {
+               *nid = le64_to_cpu(de->nid);
+diff --git a/drivers/staging/erofs/unzip_vle.c 
b/drivers/staging/erofs/unzip_vle.c
+index ca2e8fd78959..ab30d14ded06 100644
+--- a/drivers/staging/erofs/unzip_vle.c
++++ b/drivers/staging/erofs/unzip_vle.c
+@@ -1017,11 +1017,10 @@ repeat:
+       if (llen > grp->llen)
+               llen = grp->llen;
+ 
+-      err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
+-              clusterpages, pages, llen, work->pageofs,
+-              z_erofs_onlinepage_endio);
++      err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
++                                          pages, llen, work->pageofs);
+       if (err != -ENOTSUPP)
+-              goto out_percpu;
++              goto out;
+ 
+       if (sparsemem_pages >= nr_pages)
+               goto skip_allocpage;
+@@ -1042,8 +1041,25 @@ skip_allocpage:
+       erofs_vunmap(vout, nr_pages);
+ 
+ out:
++      /* must handle all compressed pages before endding pages */
++      for (i = 0; i < clusterpages; ++i) {
++              page = compressed_pages[i];
++
++#ifdef EROFS_FS_HAS_MANAGED_CACHE
++              if (page->mapping == MNGD_MAPPING(sbi))
++                      continue;
++#endif
++              /* recycle all individual staging pages */
++              (void)z_erofs_gather_if_stagingpage(page_pool, page);
++
++              WRITE_ONCE(compressed_pages[i], NULL);
++      }
++
+       for (i = 0; i < nr_pages; ++i) {
+               page = pages[i];
++              if (!page)
++                      continue;
++
+               DBG_BUGON(!page->mapping);
+ 
+               /* recycle all individual staging pages */
+@@ -1056,20 +1072,6 @@ out:
+               z_erofs_onlinepage_endio(page);
+       }
+ 
+-out_percpu:
+-      for (i = 0; i < clusterpages; ++i) {
+-              page = compressed_pages[i];
+-
+-#ifdef EROFS_FS_HAS_MANAGED_CACHE
+-              if (page->mapping == MNGD_MAPPING(sbi))
+-                      continue;
+-#endif
+-              /* recycle all individual staging pages */
+-              (void)z_erofs_gather_if_stagingpage(page_pool, page);
+-
+-              WRITE_ONCE(compressed_pages[i], NULL);
+-      }
+-
+       if (pages == z_pagemap_global)
+               mutex_unlock(&z_pagemap_global_lock);
+       else if (unlikely(pages != pages_onstack))
+diff --git a/drivers/staging/erofs/unzip_vle.h 
b/drivers/staging/erofs/unzip_vle.h
+index 5a4e1b62c0d1..c0dfd6906aa8 100644
+--- a/drivers/staging/erofs/unzip_vle.h
++++ b/drivers/staging/erofs/unzip_vle.h
+@@ -218,8 +218,7 @@ extern int z_erofs_vle_plain_copy(struct page 
**compressed_pages,
+ 
+ extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
+       unsigned clusterpages, struct page **pages,
+-      unsigned outlen, unsigned short pageofs,
+-      void (*endio)(struct page *));
++      unsigned int outlen, unsigned short pageofs);
+ 
+ extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
+       unsigned clusterpages, void *vaddr, unsigned llen,
+diff --git a/drivers/staging/erofs/unzip_vle_lz4.c 
b/drivers/staging/erofs/unzip_vle_lz4.c
+index 52797bd89da1..f471b894c848 100644
+--- a/drivers/staging/erofs/unzip_vle_lz4.c
++++ b/drivers/staging/erofs/unzip_vle_lz4.c
+@@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page 
**compressed_pages,
+                                 unsigned int clusterpages,
+                                 struct page **pages,
+                                 unsigned int outlen,
+-                                unsigned short pageofs,
+-                                void (*endio)(struct page *))
++                                unsigned short pageofs)
+ {
+       void *vin, *vout;
+       unsigned int nr_pages, i, j;
+@@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page 
**compressed_pages,
+       ret = z_erofs_unzip_lz4(vin, vout + pageofs,
+                               clusterpages * PAGE_SIZE, outlen);
+ 
+-      if (ret >= 0) {
+-              outlen = ret;
+-              ret = 0;
+-      }
++      if (ret < 0)
++              goto out;
++      ret = 0;
+ 
+       for (i = 0; i < nr_pages; ++i) {
+               j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
+ 
+               if (pages[i]) {
+-                      if (ret < 0) {
+-                              SetPageError(pages[i]);
+-                      } else if (clusterpages == 1 &&
+-                                 pages[i] == compressed_pages[0]) {
++                      if (clusterpages == 1 &&
++                          pages[i] == compressed_pages[0]) {
+                               memcpy(vin + pageofs, vout + pageofs, j);
+                       } else {
+                               void *dst = kmap_atomic(pages[i]);
+@@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page 
**compressed_pages,
+                               memcpy(dst + pageofs, vout + pageofs, j);
+                               kunmap_atomic(dst);
+                       }
+-                      endio(pages[i]);
+               }
+               vout += PAGE_SIZE;
+               outlen -= j;
+               pageofs = 0;
+       }
++
++out:
+       preempt_enable();
+ 
+       if (clusterpages == 1)
+diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
+index b92740edc416..4b038f25f256 100644
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, 
unsigned int mode,
+ 
+ static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
+ {
+-      u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
++      u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
+ 
+       return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
+ }
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index bfe1639df02d..97fc498dc767 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
+       return false;
+ #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+       return false;
++#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++      /*
++       * The DRM driver stack is designed to work with cache coherent devices
++       * only, but permits an optimization to be enabled in some cases, where
++       * for some buffers, both the CPU and the GPU use uncached mappings,
++       * removing the need for DMA snooping and allocation in the CPU caches.
++       *
++       * The use of uncached GPU mappings relies on the correct implementation
++       * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
++       * will use cached mappings nonetheless. On x86 platforms, this does not
++       * seem to matter, as uncached CPU mappings will snoop the caches in any
++       * case. However, on ARM and arm64, enabling this optimization on a
++       * platform where NoSnoop is ignored results in loss of coherency, which
++       * breaks correct operation of the device. Since we have no way of
++       * detecting whether NoSnoop works or not, just disable this
++       * optimization entirely for ARM and arm64.
++       */
++      return false;
+ #else
+       return true;
+ #endif
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 8c826603bf36..8bc0ba1ebabe 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -545,6 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct 
*gc)
+       struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
+ 
+       /* No sk_callback_lock since already detached. */
++      strp_stop(&psock->parser.strp);
+       strp_done(&psock->parser.strp);
+ 
+       cancel_work_sync(&psock->work);
+diff --git a/scripts/gdb/linux/constants.py.in 
b/scripts/gdb/linux/constants.py.in
+index 7aad82406422..d3319a80788a 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -37,12 +37,12 @@
+ import gdb
+ 
+ /* linux/fs.h */
+-LX_VALUE(MS_RDONLY)
+-LX_VALUE(MS_SYNCHRONOUS)
+-LX_VALUE(MS_MANDLOCK)
+-LX_VALUE(MS_DIRSYNC)
+-LX_VALUE(MS_NOATIME)
+-LX_VALUE(MS_NODIRATIME)
++LX_VALUE(SB_RDONLY)
++LX_VALUE(SB_SYNCHRONOUS)
++LX_VALUE(SB_MANDLOCK)
++LX_VALUE(SB_DIRSYNC)
++LX_VALUE(SB_NOATIME)
++LX_VALUE(SB_NODIRATIME)
+ 
+ /* linux/mount.h */
+ LX_VALUE(MNT_NOSUID)
+diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
+index 0aebd7565b03..2f01a958eb22 100644
+--- a/scripts/gdb/linux/proc.py
++++ b/scripts/gdb/linux/proc.py
+@@ -114,11 +114,11 @@ def info_opts(lst, opt):
+     return opts
+ 
+ 
+-FS_INFO = {constants.LX_MS_SYNCHRONOUS: ",sync",
+-           constants.LX_MS_MANDLOCK: ",mand",
+-           constants.LX_MS_DIRSYNC: ",dirsync",
+-           constants.LX_MS_NOATIME: ",noatime",
+-           constants.LX_MS_NODIRATIME: ",nodiratime"}
++FS_INFO = {constants.LX_SB_SYNCHRONOUS: ",sync",
++           constants.LX_SB_MANDLOCK: ",mand",
++           constants.LX_SB_DIRSYNC: ",dirsync",
++           constants.LX_SB_NOATIME: ",noatime",
++           constants.LX_SB_NODIRATIME: ",nodiratime"}
+ 
+ MNT_INFO = {constants.LX_MNT_NOSUID: ",nosuid",
+             constants.LX_MNT_NODEV: ",nodev",
+@@ -184,7 +184,7 @@ values of that process namespace"""
+             fstype = superblock['s_type']['name'].string()
+             s_flags = int(superblock['s_flags'])
+             m_flags = int(vfs['mnt']['mnt_flags'])
+-            rd = "ro" if (s_flags & constants.LX_MS_RDONLY) else "rw"
++            rd = "ro" if (s_flags & constants.LX_SB_RDONLY) else "rw"
+ 
+             gdb.write(
+                 "{} {} {} {}{}{} 0 0\n"

Reply via email to