commit:     d763e643caecaf529b1a9a2cfa9981ed82043b40
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 20 11:57:16 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 20 11:57:16 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d763e643

Linux patch 4.19.112

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README               |    4 +
 1111_linux-4.19.112.patch | 1986 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1990 insertions(+)

diff --git a/0000_README b/0000_README
index 7eab5d8..e470ab2 100644
--- a/0000_README
+++ b/0000_README
@@ -483,6 +483,10 @@ Patch:  1110_linux-4.19.111.patch
 From:   https://www.kernel.org
 Desc:   Linux 4.19.111
 
+Patch:  1111_linux-4.19.112.patch
+From:   https://www.kernel.org
+Desc:   Linux 4.19.112
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1111_linux-4.19.112.patch b/1111_linux-4.19.112.patch
new file mode 100644
index 0000000..47b3ac8
--- /dev/null
+++ b/1111_linux-4.19.112.patch
@@ -0,0 +1,1986 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt 
b/Documentation/admin-guide/kernel-parameters.txt
+index 8bf0c0532046..1a5101b7e853 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -136,6 +136,10 @@
+                       dynamic table installation which will install SSDT
+                       tables to /sys/firmware/acpi/tables/dynamic.
+ 
++      acpi_no_watchdog        [HW,ACPI,WDT]
++                      Ignore the ACPI-based watchdog interface (WDAT) and let
++                      a native driver control the watchdog device instead.
++
+       acpi_rsdp=      [ACPI,EFI,KEXEC]
+                       Pass the RSDP address to the kernel, mostly used
+                       on machines running EFI runtime service to boot the
+diff --git a/Documentation/driver-api/device_link.rst 
b/Documentation/driver-api/device_link.rst
+index d6763272e747..e8b0a8fd1ae0 100644
+--- a/Documentation/driver-api/device_link.rst
++++ b/Documentation/driver-api/device_link.rst
+@@ -25,8 +25,8 @@ suspend/resume and shutdown ordering.
+ 
+ Device links allow representation of such dependencies in the driver core.
+ 
+-In its standard form, a device link combines *both* dependency types:
+-It guarantees correct suspend/resume and shutdown ordering between a
++In its standard or *managed* form, a device link combines *both* dependency
++types:  It guarantees correct suspend/resume and shutdown ordering between a
+ "supplier" device and its "consumer" devices, and it guarantees driver
+ presence on the supplier.  The consumer devices are not probed before the
+ supplier is bound to a driver, and they're unbound before the supplier
+@@ -59,18 +59,24 @@ device ``->probe`` callback or a boot-time PCI quirk.
+ 
+ Another example for an inconsistent state would be a device link that
+ represents a driver presence dependency, yet is added from the consumer's
+-``->probe`` callback while the supplier hasn't probed yet:  Had the driver
+-core known about the device link earlier, it wouldn't have probed the
++``->probe`` callback while the supplier hasn't started to probe yet:  Had the
++driver core known about the device link earlier, it wouldn't have probed the
+ consumer in the first place.  The onus is thus on the consumer to check
+ presence of the supplier after adding the link, and defer probing on
+-non-presence.
+-
+-If a device link is added in the ``->probe`` callback of the supplier or
+-consumer driver, it is typically deleted in its ``->remove`` callback for
+-symmetry.  That way, if the driver is compiled as a module, the device
+-link is added on module load and orderly deleted on unload.  The same
+-restrictions that apply to device link addition (e.g. exclusion of a
+-parallel suspend/resume transition) apply equally to deletion.
++non-presence.  [Note that it is valid to create a link from the consumer's
++``->probe`` callback while the supplier is still probing, but the consumer 
must
++know that the supplier is functional already at the link creation time (that 
is
++the case, for instance, if the consumer has just acquired some resources that
++would not have been available had the supplier not been functional then).]
++
++If a device link with ``DL_FLAG_STATELESS`` set (i.e. a stateless device link)
++is added in the ``->probe`` callback of the supplier or consumer driver, it is
++typically deleted in its ``->remove`` callback for symmetry.  That way, if the
++driver is compiled as a module, the device link is added on module load and
++orderly deleted on unload.  The same restrictions that apply to device link
++addition (e.g. exclusion of a parallel suspend/resume transition) apply 
equally
++to deletion.  Device links managed by the driver core are deleted 
automatically
++by it.
+ 
+ Several flags may be specified on device link addition, two of which
+ have already been mentioned above:  ``DL_FLAG_STATELESS`` to express that no
+@@ -83,22 +89,37 @@ link is added from the consumer's ``->probe`` callback:  
``DL_FLAG_RPM_ACTIVE``
+ can be specified to runtime resume the supplier upon addition of the
+ device link.  ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be
+ automatically purged when the consumer fails to probe or later unbinds.
+-This obviates the need to explicitly delete the link in the ``->remove``
+-callback or in the error path of the ``->probe`` callback.
+ 
+ Similarly, when the device link is added from supplier's ``->probe`` callback,
+ ``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically
+ purged when the supplier fails to probe or later unbinds.
+ 
++If neither ``DL_FLAG_AUTOREMOVE_CONSUMER`` nor ``DL_FLAG_AUTOREMOVE_SUPPLIER``
++is set, ``DL_FLAG_AUTOPROBE_CONSUMER`` can be used to request the driver core
++to probe for a driver for the consumer driver on the link automatically after
++a driver has been bound to the supplier device.
++
++Note, however, that any combinations of ``DL_FLAG_AUTOREMOVE_CONSUMER``,
++``DL_FLAG_AUTOREMOVE_SUPPLIER`` or ``DL_FLAG_AUTOPROBE_CONSUMER`` with
++``DL_FLAG_STATELESS`` are invalid and cannot be used.
++
+ Limitations
+ ===========
+ 
+-Driver authors should be aware that a driver presence dependency (i.e. when
+-``DL_FLAG_STATELESS`` is not specified on link addition) may cause probing of
+-the consumer to be deferred indefinitely.  This can become a problem if the
+-consumer is required to probe before a certain initcall level is reached.
+-Worse, if the supplier driver is blacklisted or missing, the consumer will
+-never be probed.
++Driver authors should be aware that a driver presence dependency for managed
++device links (i.e. when ``DL_FLAG_STATELESS`` is not specified on link 
addition)
++may cause probing of the consumer to be deferred indefinitely.  This can 
become
++a problem if the consumer is required to probe before a certain initcall level
++is reached.  Worse, if the supplier driver is blacklisted or missing, the
++consumer will never be probed.
++
++Moreover, managed device links cannot be deleted directly.  They are deleted
++by the driver core when they are not necessary any more in accordance with the
++``DL_FLAG_AUTOREMOVE_CONSUMER`` and ``DL_FLAG_AUTOREMOVE_SUPPLIER`` flags.
++However, stateless device links (i.e. device links with ``DL_FLAG_STATELESS``
++set) are expected to be removed by whoever called :c:func:`device_link_add()`
++to add them with the help of either :c:func:`device_link_del()` or
++:c:func:`device_link_remove()`.
+ 
+ Sometimes drivers depend on optional resources.  They are able to operate
+ in a degraded mode (reduced feature set or performance) when those resources
+@@ -282,4 +303,4 @@ API
+ ===
+ 
+ .. kernel-doc:: drivers/base/core.c
+-   :functions: device_link_add device_link_del
++   :functions: device_link_add device_link_del device_link_remove
+diff --git a/Makefile b/Makefile
+index fed04bdef0a3..bd57e085188d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 111
++SUBLEVEL = 112
+ EXTRAVERSION =
+ NAME = "People's Front"
+ 
+diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
+index e8cda5e02b4e..a51b7ff0a85f 100644
+--- a/arch/arm/kernel/vdso.c
++++ b/arch/arm/kernel/vdso.c
+@@ -103,6 +103,8 @@ static bool __init cntvct_functional(void)
+        * this.
+        */
+       np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
++      if (!np)
++              np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+       if (!np)
+               goto out_put;
+ 
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index 6709a8d33963..f1e34f16cfab 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -100,7 +100,7 @@ ENTRY(arm_copy_from_user)
+ 
+ ENDPROC(arm_copy_from_user)
+ 
+-      .pushsection .fixup,"ax"
++      .pushsection .text.fixup,"ax"
+       .align 0
+       copy_abort_preamble
+       ldmfd   sp!, {r1, r2, r3}
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index baa7e36073f9..604a8558752d 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -193,20 +193,18 @@ static int amd_uncore_event_init(struct perf_event 
*event)
+ 
+       /*
+        * NB and Last level cache counters (MSRs) are shared across all cores
+-       * that share the same NB / Last level cache. Interrupts can be directed
+-       * to a single target core, however, event counts generated by processes
+-       * running on other cores cannot be masked out. So we do not support
+-       * sampling and per-thread events.
++       * that share the same NB / Last level cache.  On family 16h and below,
++       * Interrupts can be directed to a single target core, however, event
++       * counts generated by processes running on other cores cannot be masked
++       * out. So we do not support sampling and per-thread events via
++       * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
+        */
+-      if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+-              return -EINVAL;
+ 
+       /* NB and Last level cache counters do not have usr/os/guest/host bits 
*/
+       if (event->attr.exclude_user || event->attr.exclude_kernel ||
+           event->attr.exclude_host || event->attr.exclude_guest)
+               return -EINVAL;
+ 
+-      /* and we do not enable counter overflow interrupts */
+       hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
+       hwc->idx = -1;
+ 
+@@ -314,6 +312,7 @@ static struct pmu amd_nb_pmu = {
+       .start          = amd_uncore_start,
+       .stop           = amd_uncore_stop,
+       .read           = amd_uncore_read,
++      .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
+ };
+ 
+ static struct pmu amd_llc_pmu = {
+@@ -324,6 +323,7 @@ static struct pmu amd_llc_pmu = {
+       .start          = amd_uncore_start,
+       .stop           = amd_uncore_stop,
+       .read           = amd_uncore_read,
++      .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
+ };
+ 
+ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
+diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
+index 23cde3d8e8fb..0bd1899a287f 100644
+--- a/drivers/acpi/acpi_watchdog.c
++++ b/drivers/acpi/acpi_watchdog.c
+@@ -58,12 +58,14 @@ static bool acpi_watchdog_uses_rtc(const struct 
acpi_table_wdat *wdat)
+ }
+ #endif
+ 
++static bool acpi_no_watchdog;
++
+ static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+ {
+       const struct acpi_table_wdat *wdat = NULL;
+       acpi_status status;
+ 
+-      if (acpi_disabled)
++      if (acpi_disabled || acpi_no_watchdog)
+               return NULL;
+ 
+       status = acpi_get_table(ACPI_SIG_WDAT, 0,
+@@ -91,6 +93,14 @@ bool acpi_has_watchdog(void)
+ }
+ EXPORT_SYMBOL_GPL(acpi_has_watchdog);
+ 
++/* ACPI watchdog can be disabled on boot command line */
++static int __init disable_acpi_watchdog(char *str)
++{
++      acpi_no_watchdog = true;
++      return 1;
++}
++__setup("acpi_no_watchdog", disable_acpi_watchdog);
++
+ void __init acpi_watchdog_init(void)
+ {
+       const struct acpi_wdat_entry *entries;
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 742bc60e9cca..928fc1532a70 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -124,6 +124,50 @@ static int device_is_dependent(struct device *dev, void 
*target)
+       return ret;
+ }
+ 
++static void device_link_init_status(struct device_link *link,
++                                  struct device *consumer,
++                                  struct device *supplier)
++{
++      switch (supplier->links.status) {
++      case DL_DEV_PROBING:
++              switch (consumer->links.status) {
++              case DL_DEV_PROBING:
++                      /*
++                       * A consumer driver can create a link to a supplier
++                       * that has not completed its probing yet as long as it
++                       * knows that the supplier is already functional (for
++                       * example, it has just acquired some resources from the
++                       * supplier).
++                       */
++                      link->status = DL_STATE_CONSUMER_PROBE;
++                      break;
++              default:
++                      link->status = DL_STATE_DORMANT;
++                      break;
++              }
++              break;
++      case DL_DEV_DRIVER_BOUND:
++              switch (consumer->links.status) {
++              case DL_DEV_PROBING:
++                      link->status = DL_STATE_CONSUMER_PROBE;
++                      break;
++              case DL_DEV_DRIVER_BOUND:
++                      link->status = DL_STATE_ACTIVE;
++                      break;
++              default:
++                      link->status = DL_STATE_AVAILABLE;
++                      break;
++              }
++              break;
++      case DL_DEV_UNBINDING:
++              link->status = DL_STATE_SUPPLIER_UNBIND;
++              break;
++      default:
++              link->status = DL_STATE_DORMANT;
++              break;
++      }
++}
++
+ static int device_reorder_to_tail(struct device *dev, void *not_used)
+ {
+       struct device_link *link;
+@@ -165,6 +209,13 @@ void device_pm_move_to_tail(struct device *dev)
+       device_links_read_unlock(idx);
+ }
+ 
++#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
++                             DL_FLAG_AUTOREMOVE_SUPPLIER | \
++                             DL_FLAG_AUTOPROBE_CONSUMER)
++
++#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
++                          DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
++
+ /**
+  * device_link_add - Create a link between two devices.
+  * @consumer: Consumer end of the link.
+@@ -179,14 +230,38 @@ void device_pm_move_to_tail(struct device *dev)
+  * of the link.  If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+  * ignored.
+  *
+- * If the DL_FLAG_AUTOREMOVE_CONSUMER flag is set, the link will be removed
+- * automatically when the consumer device driver unbinds from it.  
Analogously,
+- * if DL_FLAG_AUTOREMOVE_SUPPLIER is set in @flags, the link will be removed
+- * automatically when the supplier device driver unbinds from it.
+- *
+- * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
+- * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid 
and
+- * will cause NULL to be returned upfront.
++ * If DL_FLAG_STATELESS is set in @flags, the caller of this function is
++ * expected to release the link returned by it directly with the help of 
either
++ * device_link_del() or device_link_remove().
++ *
++ * If that flag is not set, however, the caller of this function is handing 
the
++ * management of the link over to the driver core entirely and its return 
value
++ * can only be used to check whether or not the link is present.  In that 
case,
++ * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
++ * flags can be used to indicate to the driver core when the link can be 
safely
++ * deleted.  Namely, setting one of them in @flags indicates to the driver 
core
++ * that the link is not going to be used (by the given caller of this 
function)
++ * after unbinding the consumer or supplier driver, respectively, from its
++ * device, so the link can be deleted at that point.  If none of them is set,
++ * the link will be maintained until one of the devices pointed to by it 
(either
++ * the consumer or the supplier) is unregistered.
++ *
++ * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
++ * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
++ * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag 
can
++ * be used to request the driver core to automaticall probe for a consmer
++ * driver after successfully binding a driver to the supplier device.
++ *
++ * The combination of DL_FLAG_STATELESS and one of 
DL_FLAG_AUTOREMOVE_CONSUMER,
++ * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
++ * the same time is invalid and will cause NULL to be returned upfront.
++ * However, if a device link between the given @consumer and @supplier pair
++ * exists already when this function is called for them, the existing link 
will
++ * be returned regardless of its current type and status (the link's flags may
++ * be modified then).  The caller of this function is then expected to treat
++ * the link as though it has just been created, so (in particular) if
++ * DL_FLAG_STATELESS was passed in @flags, the link needs to be released
++ * explicitly when not needed any more (as stated above).
+  *
+  * A side effect of the link creation is re-ordering of dpm_list and the
+  * devices_kset list by moving the consumer device and all devices depending
+@@ -202,9 +277,11 @@ struct device_link *device_link_add(struct device 
*consumer,
+ {
+       struct device_link *link;
+ 
+-      if (!consumer || !supplier ||
+-          (flags & DL_FLAG_STATELESS &&
+-           flags & (DL_FLAG_AUTOREMOVE_CONSUMER | 
DL_FLAG_AUTOREMOVE_SUPPLIER)))
++      if (!consumer || !supplier || flags & ~DL_ADD_VALID_FLAGS ||
++          (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
++          (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
++           flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
++                    DL_FLAG_AUTOREMOVE_SUPPLIER)))
+               return NULL;
+ 
+       if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
+@@ -214,6 +291,9 @@ struct device_link *device_link_add(struct device 
*consumer,
+               }
+       }
+ 
++      if (!(flags & DL_FLAG_STATELESS))
++              flags |= DL_FLAG_MANAGED;
++
+       device_links_write_lock();
+       device_pm_lock();
+ 
+@@ -228,25 +308,18 @@ struct device_link *device_link_add(struct device 
*consumer,
+               goto out;
+       }
+ 
++      /*
++       * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
++       * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
++       * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
++       */
++      if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
++              flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
++
+       list_for_each_entry(link, &supplier->links.consumers, s_node) {
+               if (link->consumer != consumer)
+                       continue;
+ 
+-              /*
+-               * Don't return a stateless link if the caller wants a stateful
+-               * one and vice versa.
+-               */
+-              if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & 
DL_FLAG_STATELESS))) {
+-                      link = NULL;
+-                      goto out;
+-              }
+-
+-              if (flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+-                      link->flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
+-
+-              if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+-                      link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
+-
+               if (flags & DL_FLAG_PM_RUNTIME) {
+                       if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+                               pm_runtime_new_link(consumer);
+@@ -256,7 +329,31 @@ struct device_link *device_link_add(struct device 
*consumer,
+                               refcount_inc(&link->rpm_active);
+               }
+ 
+-              kref_get(&link->kref);
++              if (flags & DL_FLAG_STATELESS) {
++                      link->flags |= DL_FLAG_STATELESS;
++                      kref_get(&link->kref);
++                      goto out;
++              }
++
++              /*
++               * If the life time of the link following from the new flags is
++               * longer than indicated by the flags of the existing link,
++               * update the existing link to stay around longer.
++               */
++              if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
++                      if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
++                              link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
++                              link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
++                      }
++              } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
++                      link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
++                                       DL_FLAG_AUTOREMOVE_SUPPLIER);
++              }
++              if (!(link->flags & DL_FLAG_MANAGED)) {
++                      kref_get(&link->kref);
++                      link->flags |= DL_FLAG_MANAGED;
++                      device_link_init_status(link, consumer, supplier);
++              }
+               goto out;
+       }
+ 
+@@ -283,39 +380,18 @@ struct device_link *device_link_add(struct device 
*consumer,
+       kref_init(&link->kref);
+ 
+       /* Determine the initial link state. */
+-      if (flags & DL_FLAG_STATELESS) {
++      if (flags & DL_FLAG_STATELESS)
+               link->status = DL_STATE_NONE;
+-      } else {
+-              switch (supplier->links.status) {
+-              case DL_DEV_DRIVER_BOUND:
+-                      switch (consumer->links.status) {
+-                      case DL_DEV_PROBING:
+-                              /*
+-                               * Some callers expect the link creation during
+-                               * consumer driver probe to resume the supplier
+-                               * even without DL_FLAG_RPM_ACTIVE.
+-                               */
+-                              if (flags & DL_FLAG_PM_RUNTIME)
+-                                      pm_runtime_resume(supplier);
+-
+-                              link->status = DL_STATE_CONSUMER_PROBE;
+-                              break;
+-                      case DL_DEV_DRIVER_BOUND:
+-                              link->status = DL_STATE_ACTIVE;
+-                              break;
+-                      default:
+-                              link->status = DL_STATE_AVAILABLE;
+-                              break;
+-                      }
+-                      break;
+-              case DL_DEV_UNBINDING:
+-                      link->status = DL_STATE_SUPPLIER_UNBIND;
+-                      break;
+-              default:
+-                      link->status = DL_STATE_DORMANT;
+-                      break;
+-              }
+-      }
++      else
++              device_link_init_status(link, consumer, supplier);
++
++      /*
++       * Some callers expect the link creation during consumer driver probe to
++       * resume the supplier even without DL_FLAG_RPM_ACTIVE.
++       */
++      if (link->status == DL_STATE_CONSUMER_PROBE &&
++          flags & DL_FLAG_PM_RUNTIME)
++              pm_runtime_resume(supplier);
+ 
+       /*
+        * Move the consumer and all of the devices depending on it to the end
+@@ -389,8 +465,16 @@ static void __device_link_del(struct kref *kref)
+ }
+ #endif /* !CONFIG_SRCU */
+ 
++static void device_link_put_kref(struct device_link *link)
++{
++      if (link->flags & DL_FLAG_STATELESS)
++              kref_put(&link->kref, __device_link_del);
++      else
++              WARN(1, "Unable to drop a managed device link reference\n");
++}
++
+ /**
+- * device_link_del - Delete a link between two devices.
++ * device_link_del - Delete a stateless link between two devices.
+  * @link: Device link to delete.
+  *
+  * The caller must ensure proper synchronization of this function with runtime
+@@ -402,14 +486,14 @@ void device_link_del(struct device_link *link)
+ {
+       device_links_write_lock();
+       device_pm_lock();
+-      kref_put(&link->kref, __device_link_del);
++      device_link_put_kref(link);
+       device_pm_unlock();
+       device_links_write_unlock();
+ }
+ EXPORT_SYMBOL_GPL(device_link_del);
+ 
+ /**
+- * device_link_remove - remove a link between two devices.
++ * device_link_remove - Delete a stateless link between two devices.
+  * @consumer: Consumer end of the link.
+  * @supplier: Supplier end of the link.
+  *
+@@ -428,7 +512,7 @@ void device_link_remove(void *consumer, struct device 
*supplier)
+ 
+       list_for_each_entry(link, &supplier->links.consumers, s_node) {
+               if (link->consumer == consumer) {
+-                      kref_put(&link->kref, __device_link_del);
++                      device_link_put_kref(link);
+                       break;
+               }
+       }
+@@ -461,7 +545,7 @@ static void device_links_missing_supplier(struct device 
*dev)
+  * mark the link as "consumer probe in progress" to make the supplier removal
+  * wait for us to complete (or bad things may happen).
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
+  */
+ int device_links_check_suppliers(struct device *dev)
+ {
+@@ -471,7 +555,7 @@ int device_links_check_suppliers(struct device *dev)
+       device_links_write_lock();
+ 
+       list_for_each_entry(link, &dev->links.suppliers, c_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               if (link->status != DL_STATE_AVAILABLE) {
+@@ -496,7 +580,7 @@ int device_links_check_suppliers(struct device *dev)
+  *
+  * Also change the status of @dev's links to suppliers to "active".
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
+  */
+ void device_links_driver_bound(struct device *dev)
+ {
+@@ -505,15 +589,28 @@ void device_links_driver_bound(struct device *dev)
+       device_links_write_lock();
+ 
+       list_for_each_entry(link, &dev->links.consumers, s_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
++                      continue;
++
++              /*
++               * Links created during consumer probe may be in the "consumer
++               * probe" state to start with if the supplier is still probing
++               * when they are created and they may become "active" if the
++               * consumer probe returns first.  Skip them here.
++               */
++              if (link->status == DL_STATE_CONSUMER_PROBE ||
++                  link->status == DL_STATE_ACTIVE)
+                       continue;
+ 
+               WARN_ON(link->status != DL_STATE_DORMANT);
+               WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
++
++              if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
++                      driver_deferred_probe_add(link->consumer);
+       }
+ 
+       list_for_each_entry(link, &dev->links.suppliers, c_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+@@ -525,6 +622,13 @@ void device_links_driver_bound(struct device *dev)
+       device_links_write_unlock();
+ }
+ 
++static void device_link_drop_managed(struct device_link *link)
++{
++      link->flags &= ~DL_FLAG_MANAGED;
++      WRITE_ONCE(link->status, DL_STATE_NONE);
++      kref_put(&link->kref, __device_link_del);
++}
++
+ /**
+  * __device_links_no_driver - Update links of a device without a driver.
+  * @dev: Device without a drvier.
+@@ -535,29 +639,60 @@ void device_links_driver_bound(struct device *dev)
+  * unless they already are in the "supplier unbind in progress" state in which
+  * case they need not be updated.
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
+  */
+ static void __device_links_no_driver(struct device *dev)
+ {
+       struct device_link *link, *ln;
+ 
+       list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, 
c_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+-                      kref_put(&link->kref, __device_link_del);
+-              else if (link->status != DL_STATE_SUPPLIER_UNBIND)
++                      device_link_drop_managed(link);
++              else if (link->status == DL_STATE_CONSUMER_PROBE ||
++                       link->status == DL_STATE_ACTIVE)
+                       WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+       }
+ 
+       dev->links.status = DL_DEV_NO_DRIVER;
+ }
+ 
++/**
++ * device_links_no_driver - Update links after failing driver probe.
++ * @dev: Device whose driver has just failed to probe.
++ *
++ * Clean up leftover links to consumers for @dev and invoke
++ * %__device_links_no_driver() to update links to suppliers for it as
++ * appropriate.
++ *
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
++ */
+ void device_links_no_driver(struct device *dev)
+ {
++      struct device_link *link;
++
+       device_links_write_lock();
++
++      list_for_each_entry(link, &dev->links.consumers, s_node) {
++              if (!(link->flags & DL_FLAG_MANAGED))
++                      continue;
++
++              /*
++               * The probe has failed, so if the status of the link is
++               * "consumer probe" or "active", it must have been added by
++               * a probing consumer while this device was still probing.
++               * Change its state to "dormant", as it represents a valid
++               * relationship, but it is not functionally meaningful.
++               */
++              if (link->status == DL_STATE_CONSUMER_PROBE ||
++                  link->status == DL_STATE_ACTIVE)
++                      WRITE_ONCE(link->status, DL_STATE_DORMANT);
++      }
++
+       __device_links_no_driver(dev);
++
+       device_links_write_unlock();
+ }
+ 
+@@ -569,7 +704,7 @@ void device_links_no_driver(struct device *dev)
+  * invoke %__device_links_no_driver() to update links to suppliers for it as
+  * appropriate.
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
+  */
+ void device_links_driver_cleanup(struct device *dev)
+ {
+@@ -578,7 +713,7 @@ void device_links_driver_cleanup(struct device *dev)
+       device_links_write_lock();
+ 
+       list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+@@ -591,7 +726,7 @@ void device_links_driver_cleanup(struct device *dev)
+                */
+               if (link->status == DL_STATE_SUPPLIER_UNBIND &&
+                   link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+-                      kref_put(&link->kref, __device_link_del);
++                      device_link_drop_managed(link);
+ 
+               WRITE_ONCE(link->status, DL_STATE_DORMANT);
+       }
+@@ -613,7 +748,7 @@ void device_links_driver_cleanup(struct device *dev)
+  *
+  * Return 'false' if there are no probing or active consumers.
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
+  */
+ bool device_links_busy(struct device *dev)
+ {
+@@ -623,7 +758,7 @@ bool device_links_busy(struct device *dev)
+       device_links_write_lock();
+ 
+       list_for_each_entry(link, &dev->links.consumers, s_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               if (link->status == DL_STATE_CONSUMER_PROBE
+@@ -653,7 +788,7 @@ bool device_links_busy(struct device *dev)
+  * driver to unbind and start over (the consumer will not re-probe as we have
+  * changed the state of the link already).
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links without the DL_FLAG_MANAGED flag set are ignored.
+  */
+ void device_links_unbind_consumers(struct device *dev)
+ {
+@@ -665,7 +800,7 @@ void device_links_unbind_consumers(struct device *dev)
+       list_for_each_entry(link, &dev->links.consumers, s_node) {
+               enum device_link_state status;
+ 
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               status = link->status;
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 5f6416e6ba96..caaeb7910a04 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -116,7 +116,7 @@ static void deferred_probe_work_func(struct work_struct 
*work)
+ }
+ static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
+ 
+-static void driver_deferred_probe_add(struct device *dev)
++void driver_deferred_probe_add(struct device *dev)
+ {
+       mutex_lock(&deferred_probe_mutex);
+       if (list_empty(&dev->p->deferred_probe)) {
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 303ce7d54a30..2c99f93020bc 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1531,7 +1531,7 @@ void pm_runtime_remove(struct device *dev)
+  * runtime PM references to the device, drop the usage counter of the device
+  * (as many times as needed).
+  *
+- * Links with the DL_FLAG_STATELESS flag set are ignored.
++ * Links with the DL_FLAG_MANAGED flag unset are ignored.
+  *
+  * Since the device is guaranteed to be runtime-active at the point this is
+  * called, nothing else needs to be done here.
+@@ -1548,7 +1548,7 @@ void pm_runtime_clean_up_links(struct device *dev)
+       idx = device_links_read_lock();
+ 
+       list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+-              if (link->flags & DL_FLAG_STATELESS)
++              if (!(link->flags & DL_FLAG_MANAGED))
+                       continue;
+ 
+               while (refcount_dec_not_one(&link->rpm_active))
+diff --git a/drivers/firmware/efi/runtime-wrappers.c 
b/drivers/firmware/efi/runtime-wrappers.c
+index 92e519d58618..b31e3d3729a6 100644
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -62,7 +62,7 @@ struct efi_runtime_work efi_rts_work;
+       efi_rts_work.status = EFI_ABORTED;                              \
+                                                                       \
+       init_completion(&efi_rts_work.efi_rts_comp);                    \
+-      INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);            \
++      INIT_WORK(&efi_rts_work.work, efi_call_rts);                    \
+       efi_rts_work.arg1 = _arg1;                                      \
+       efi_rts_work.arg2 = _arg2;                                      \
+       efi_rts_work.arg3 = _arg3;                                      \
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index d0a81a03ddbd..8ab8f2350bbc 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -343,7 +343,8 @@ static int apple_input_mapping(struct hid_device *hdev, 
struct hid_input *hi,
+               unsigned long **bit, int *max)
+ {
+       if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+-                      usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
++                      usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
++                      usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
+               /* The fn key on Apple USB keyboards */
+               set_bit(EV_REP, hi->input->evbit);
+               hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
+diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
+index 8cb63ea9977d..fab8fd7082e0 100644
+--- a/drivers/hid/hid-google-hammer.c
++++ b/drivers/hid/hid-google-hammer.c
+@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
++      { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
++                   USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index f491092f36ff..b2fff44c8109 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -468,6 +468,7 @@
+ #define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
+ #define USB_DEVICE_ID_GOOGLE_MASTERBALL       0x503c
+ #define USB_DEVICE_ID_GOOGLE_MAGNEMITE        0x503d
++#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
+ 
+ #define USB_VENDOR_ID_GOTOP           0x08f2
+ #define USB_DEVICE_ID_SUPER_Q2                0x007f
+diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c 
b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+index 10af8585c820..95052373a828 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+@@ -341,6 +341,14 @@ static const struct dmi_system_id 
i2c_hid_dmi_desc_override_table[] = {
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
++      {
++              .ident = "Trekstor SURFBOOK E11B",
++              .matches = {
++                      DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
++                      DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
++              },
++              .driver_data = (void *)&sipodev_desc
++      },
+       {
+               .ident = "Direkt-Tek DTLAPY116-2",
+               .matches = {
+diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
+index 694d0828215d..b7f809aa40c2 100644
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -935,6 +935,8 @@ config MMC_SDHCI_XENON
+ config MMC_SDHCI_OMAP
+       tristate "TI SDHCI Controller Support"
+       depends on MMC_SDHCI_PLTFM && OF
++      select THERMAL
++      imply TI_SOC_THERMAL
+       help
+         This selects the Secure Digital Host Controller Interface (SDHCI)
+         support present in TI's DRA7 SOCs. The controller supports
+diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
+index d02f5cf76b3d..e9793d8e83a0 100644
+--- a/drivers/mmc/host/sdhci-omap.c
++++ b/drivers/mmc/host/sdhci-omap.c
+@@ -27,6 +27,7 @@
+ #include <linux/regulator/consumer.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/sys_soc.h>
++#include <linux/thermal.h>
+ 
+ #include "sdhci-pltfm.h"
+ 
+@@ -115,6 +116,7 @@ struct sdhci_omap_host {
+ 
+       struct pinctrl          *pinctrl;
+       struct pinctrl_state    **pinctrl_state;
++      bool                    is_tuning;
+ };
+ 
+ static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
+@@ -289,15 +291,19 @@ static int sdhci_omap_execute_tuning(struct mmc_host 
*mmc, u32 opcode)
+       struct sdhci_host *host = mmc_priv(mmc);
+       struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+       struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
++      struct thermal_zone_device *thermal_dev;
+       struct device *dev = omap_host->dev;
+       struct mmc_ios *ios = &mmc->ios;
+       u32 start_window = 0, max_window = 0;
++      bool single_point_failure = false;
+       bool dcrc_was_enabled = false;
+       u8 cur_match, prev_match = 0;
+       u32 length = 0, max_len = 0;
+       u32 phase_delay = 0;
++      int temperature;
+       int ret = 0;
+       u32 reg;
++      int i;
+ 
+       pltfm_host = sdhci_priv(host);
+       omap_host = sdhci_pltfm_priv(pltfm_host);
+@@ -311,6 +317,16 @@ static int sdhci_omap_execute_tuning(struct mmc_host 
*mmc, u32 opcode)
+       if (ios->timing == MMC_TIMING_UHS_SDR50 && !(reg & CAPA2_TSDR50))
+               return 0;
+ 
++      thermal_dev = thermal_zone_get_zone_by_name("cpu_thermal");
++      if (IS_ERR(thermal_dev)) {
++              dev_err(dev, "Unable to get thermal zone for tuning\n");
++              return PTR_ERR(thermal_dev);
++      }
++
++      ret = thermal_zone_get_temp(thermal_dev, &temperature);
++      if (ret)
++              return ret;
++
+       reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_DLL);
+       reg |= DLL_SWT;
+       sdhci_omap_writel(omap_host, SDHCI_OMAP_DLL, reg);
+@@ -326,6 +342,13 @@ static int sdhci_omap_execute_tuning(struct mmc_host 
*mmc, u32 opcode)
+               dcrc_was_enabled = true;
+       }
+ 
++      omap_host->is_tuning = true;
++
++      /*
++       * Stage 1: Search for a maximum pass window ignoring any
++       * any single point failures. If the tuning value ends up
++       * near it, move away from it in stage 2 below
++       */
+       while (phase_delay <= MAX_PHASE_DELAY) {
+               sdhci_omap_set_dll(omap_host, phase_delay);
+ 
+@@ -333,10 +356,15 @@ static int sdhci_omap_execute_tuning(struct mmc_host 
*mmc, u32 opcode)
+               if (cur_match) {
+                       if (prev_match) {
+                               length++;
++                      } else if (single_point_failure) {
++                              /* ignore single point failure */
++                              length++;
+                       } else {
+                               start_window = phase_delay;
+                               length = 1;
+                       }
++              } else {
++                      single_point_failure = prev_match;
+               }
+ 
+               if (length > max_len) {
+@@ -354,18 +382,84 @@ static int sdhci_omap_execute_tuning(struct mmc_host 
*mmc, u32 opcode)
+               goto tuning_error;
+       }
+ 
++      /*
++       * Assign tuning value as a ratio of maximum pass window based
++       * on temperature
++       */
++      if (temperature < -20000)
++              phase_delay = min(max_window + 4 * (max_len - 1) - 24,
++                                max_window +
++                                DIV_ROUND_UP(13 * max_len, 16) * 4);
++      else if (temperature < 20000)
++              phase_delay = max_window + DIV_ROUND_UP(9 * max_len, 16) * 4;
++      else if (temperature < 40000)
++              phase_delay = max_window + DIV_ROUND_UP(8 * max_len, 16) * 4;
++      else if (temperature < 70000)
++              phase_delay = max_window + DIV_ROUND_UP(7 * max_len, 16) * 4;
++      else if (temperature < 90000)
++              phase_delay = max_window + DIV_ROUND_UP(5 * max_len, 16) * 4;
++      else if (temperature < 120000)
++              phase_delay = max_window + DIV_ROUND_UP(4 * max_len, 16) * 4;
++      else
++              phase_delay = max_window + DIV_ROUND_UP(3 * max_len, 16) * 4;
++
++      /*
++       * Stage 2: Search for a single point failure near the chosen tuning
++       * value in two steps. First in the +3 to +10 range and then in the
++       * +2 to -10 range. If found, move away from it in the appropriate
++       * direction by the appropriate amount depending on the temperature.
++       */
++      for (i = 3; i <= 10; i++) {
++              sdhci_omap_set_dll(omap_host, phase_delay + i);
++
++              if (mmc_send_tuning(mmc, opcode, NULL)) {
++                      if (temperature < 10000)
++                              phase_delay += i + 6;
++                      else if (temperature < 20000)
++                              phase_delay += i - 12;
++                      else if (temperature < 70000)
++                              phase_delay += i - 8;
++                      else
++                              phase_delay += i - 6;
++
++                      goto single_failure_found;
++              }
++      }
++
++      for (i = 2; i >= -10; i--) {
++              sdhci_omap_set_dll(omap_host, phase_delay + i);
++
++              if (mmc_send_tuning(mmc, opcode, NULL)) {
++                      if (temperature < 10000)
++                              phase_delay += i + 12;
++                      else if (temperature < 20000)
++                              phase_delay += i + 8;
++                      else if (temperature < 70000)
++                              phase_delay += i + 8;
++                      else if (temperature < 90000)
++                              phase_delay += i + 10;
++                      else
++                              phase_delay += i + 12;
++
++                      goto single_failure_found;
++              }
++      }
++
++single_failure_found:
+       reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
+       if (!(reg & AC12_SCLK_SEL)) {
+               ret = -EIO;
+               goto tuning_error;
+       }
+ 
+-      phase_delay = max_window + 4 * (max_len >> 1);
+       sdhci_omap_set_dll(omap_host, phase_delay);
+ 
++      omap_host->is_tuning = false;
++
+       goto ret;
+ 
+ tuning_error:
++      omap_host->is_tuning = false;
+       dev_err(dev, "Tuning failed\n");
+       sdhci_omap_disable_tuning(omap_host);
+ 
+@@ -695,6 +789,55 @@ static void sdhci_omap_set_uhs_signaling(struct 
sdhci_host *host,
+       sdhci_omap_start_clock(omap_host);
+ }
+ 
++void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
++{
++      struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++      struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
++
++      /* Don't reset data lines during tuning operation */
++      if (omap_host->is_tuning)
++              mask &= ~SDHCI_RESET_DATA;
++
++      sdhci_reset(host, mask);
++}
++
++#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
++                    SDHCI_INT_TIMEOUT)
++#define CMD_MASK (CMD_ERR_MASK | SDHCI_INT_RESPONSE)
++
++static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask)
++{
++      struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++      struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
++
++      if (omap_host->is_tuning && host->cmd && !host->data_early &&
++          (intmask & CMD_ERR_MASK)) {
++
++              /*
++               * Since we are not resetting data lines during tuning
++               * operation, data error or data complete interrupts
++               * might still arrive. Mark this request as a failure
++               * but still wait for the data interrupt
++               */
++              if (intmask & SDHCI_INT_TIMEOUT)
++                      host->cmd->error = -ETIMEDOUT;
++              else
++                      host->cmd->error = -EILSEQ;
++
++              host->cmd = NULL;
++
++              /*
++               * Sometimes command error interrupts and command complete
++               * interrupt will arrive together. Clear all command related
++               * interrupts here.
++               */
++              sdhci_writel(host, intmask & CMD_MASK, SDHCI_INT_STATUS);
++              intmask &= ~CMD_MASK;
++      }
++
++      return intmask;
++}
++
+ static struct sdhci_ops sdhci_omap_ops = {
+       .set_clock = sdhci_omap_set_clock,
+       .set_power = sdhci_omap_set_power,
+@@ -703,8 +846,9 @@ static struct sdhci_ops sdhci_omap_ops = {
+       .get_min_clock = sdhci_omap_get_min_clock,
+       .set_bus_width = sdhci_omap_set_bus_width,
+       .platform_send_init_74_clocks = sdhci_omap_init_74_clocks,
+-      .reset = sdhci_reset,
++      .reset = sdhci_omap_reset,
+       .set_uhs_signaling = sdhci_omap_set_uhs_signaling,
++      .irq = sdhci_omap_irq,
+ };
+ 
+ static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c 
b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+index 6b19607a4caa..9deec13d98e9 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+@@ -309,6 +309,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, 
unsigned int rq_depth,
+       }
+ 
+       hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
++      hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
+ 
+       hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
+       hw_ioctxt.cmdq_depth = 0;
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h 
b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+index 0f5563f3b779..a011fd2d2627 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+@@ -104,8 +104,8 @@ struct hinic_cmd_hw_ioctxt {
+ 
+       u8      rsvd2;
+       u8      rsvd3;
++      u8      ppf_idx;
+       u8      rsvd4;
+-      u8      rsvd5;
+ 
+       u16     rq_depth;
+       u16     rx_buf_sz_idx;
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h 
b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+index 5b4760c0e9f5..f683ccbdfca0 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+@@ -146,6 +146,7 @@
+ #define HINIC_HWIF_FUNC_IDX(hwif)       ((hwif)->attr.func_idx)
+ #define HINIC_HWIF_PCI_INTF(hwif)       ((hwif)->attr.pci_intf_idx)
+ #define HINIC_HWIF_PF_IDX(hwif)         ((hwif)->attr.pf_idx)
++#define HINIC_HWIF_PPF_IDX(hwif)        ((hwif)->attr.ppf_idx)
+ 
+ #define HINIC_FUNC_TYPE(hwif)           ((hwif)->attr.func_type)
+ #define HINIC_IS_PF(hwif)               (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 
b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+index 6c84f83ec283..d46cfd4fbbbc 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
++++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+@@ -103,6 +103,7 @@ struct hinic_rq {
+ 
+       struct hinic_wq         *wq;
+ 
++      struct cpumask          affinity_mask;
+       u32                     irq;
+       u16                     msix_entry;
+ 
+diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c 
b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+index 06b24a92ed7d..3467d84d96c3 100644
+--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
++++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+@@ -414,7 +414,6 @@ static int rx_request_irq(struct hinic_rxq *rxq)
+       struct hinic_hwdev *hwdev = nic_dev->hwdev;
+       struct hinic_rq *rq = rxq->rq;
+       struct hinic_qp *qp;
+-      struct cpumask mask;
+       int err;
+ 
+       rx_add_napi(rxq);
+@@ -431,8 +430,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
+       }
+ 
+       qp = container_of(rq, struct hinic_qp, rq);
+-      cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
+-      return irq_set_affinity_hint(rq->irq, &mask);
++      cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
++      return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ }
+ 
+ static void rx_free_irq(struct hinic_rxq *rxq)
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c 
b/drivers/net/ethernet/micrel/ks8851_mll.c
+index 9de59facec21..a5525bf977e2 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -832,14 +832,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
+ {
+       struct net_device *netdev = pw;
+       struct ks_net *ks = netdev_priv(netdev);
++      unsigned long flags;
+       u16 status;
+ 
++      spin_lock_irqsave(&ks->statelock, flags);
+       /*this should be the first in IRQ handler */
+       ks_save_cmd_reg(ks);
+ 
+       status = ks_rdreg16(ks, KS_ISR);
+       if (unlikely(!status)) {
+               ks_restore_cmd_reg(ks);
++              spin_unlock_irqrestore(&ks->statelock, flags);
+               return IRQ_NONE;
+       }
+ 
+@@ -865,6 +868,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
+               ks->netdev->stats.rx_over_errors++;
+       /* this should be the last in IRQ handler*/
+       ks_restore_cmd_reg(ks);
++      spin_unlock_irqrestore(&ks->statelock, flags);
+       return IRQ_HANDLED;
+ }
+ 
+@@ -934,6 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
+ 
+       /* shutdown RX/TX QMU */
+       ks_disable_qmu(ks);
++      ks_disable_int(ks);
+ 
+       /* set powermode to soft power down to save power */
+       ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
+@@ -990,10 +995,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, 
struct net_device *netdev)
+ {
+       netdev_tx_t retv = NETDEV_TX_OK;
+       struct ks_net *ks = netdev_priv(netdev);
++      unsigned long flags;
+ 
+-      disable_irq(netdev->irq);
+-      ks_disable_int(ks);
+-      spin_lock(&ks->statelock);
++      spin_lock_irqsave(&ks->statelock, flags);
+ 
+       /* Extra space are required:
+       *  4 byte for alignment, 4 for status/length, 4 for CRC
+@@ -1007,9 +1011,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, 
struct net_device *netdev)
+               dev_kfree_skb(skb);
+       } else
+               retv = NETDEV_TX_BUSY;
+-      spin_unlock(&ks->statelock);
+-      ks_enable_int(ks);
+-      enable_irq(netdev->irq);
++      spin_unlock_irqrestore(&ks->statelock, flags);
+       return retv;
+ }
+ 
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+index 4c476fac7835..37786affa975 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -22,25 +22,6 @@
+ #include "rmnet_vnd.h"
+ #include "rmnet_private.h"
+ 
+-/* Locking scheme -
+- * The shared resource which needs to be protected is 
realdev->rx_handler_data.
+- * For the writer path, this is using rtnl_lock(). The writer paths are
+- * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). 
These
+- * paths are already called with rtnl_lock() acquired in. There is also an
+- * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
+- * dereference here, we will need to use rtnl_dereference(). Dev list writing
+- * needs to happen with rtnl_lock() acquired for 
netdev_master_upper_dev_link().
+- * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
+- * path. We only need rcu_read_lock() for these scenarios. In these cases,
+- * the rcu_read_lock() is held in __dev_queue_xmit() and
+- * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
+- * to get the relevant information. For dev list reading, we again acquire
+- * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
+- * We also use unregister_netdevice_many() to free all rmnet devices in
+- * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free 
in
+- * same context.
+- */
+-
+ /* Local Definitions and Declarations */
+ 
+ static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
+@@ -60,9 +41,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
+       return rtnl_dereference(real_dev->rx_handler_data);
+ }
+ 
+-static int rmnet_unregister_real_device(struct net_device *real_dev,
+-                                      struct rmnet_port *port)
++static int rmnet_unregister_real_device(struct net_device *real_dev)
+ {
++      struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
++
+       if (port->nr_rmnet_devs)
+               return -EINVAL;
+ 
+@@ -70,9 +52,6 @@ static int rmnet_unregister_real_device(struct net_device 
*real_dev,
+ 
+       kfree(port);
+ 
+-      /* release reference on real_dev */
+-      dev_put(real_dev);
+-
+       netdev_dbg(real_dev, "Removed from rmnet\n");
+       return 0;
+ }
+@@ -98,9 +77,6 @@ static int rmnet_register_real_device(struct net_device 
*real_dev)
+               return -EBUSY;
+       }
+ 
+-      /* hold on to real dev for MAP data */
+-      dev_hold(real_dev);
+-
+       for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
+               INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+ 
+@@ -108,28 +84,33 @@ static int rmnet_register_real_device(struct net_device 
*real_dev)
+       return 0;
+ }
+ 
+-static void rmnet_unregister_bridge(struct net_device *dev,
+-                                  struct rmnet_port *port)
++static void rmnet_unregister_bridge(struct rmnet_port *port)
+ {
+-      struct rmnet_port *bridge_port;
+-      struct net_device *bridge_dev;
++      struct net_device *bridge_dev, *real_dev, *rmnet_dev;
++      struct rmnet_port *real_port;
+ 
+       if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
+               return;
+ 
+-      /* bridge slave handling */
++      rmnet_dev = port->rmnet_dev;
+       if (!port->nr_rmnet_devs) {
+-              bridge_dev = port->bridge_ep;
++              /* bridge device */
++              real_dev = port->bridge_ep;
++              bridge_dev = port->dev;
+ 
+-              bridge_port = rmnet_get_port_rtnl(bridge_dev);
+-              bridge_port->bridge_ep = NULL;
+-              bridge_port->rmnet_mode = RMNET_EPMODE_VND;
++              real_port = rmnet_get_port_rtnl(real_dev);
++              real_port->bridge_ep = NULL;
++              real_port->rmnet_mode = RMNET_EPMODE_VND;
+       } else {
++              /* real device */
+               bridge_dev = port->bridge_ep;
+ 
+-              bridge_port = rmnet_get_port_rtnl(bridge_dev);
+-              rmnet_unregister_real_device(bridge_dev, bridge_port);
++              port->bridge_ep = NULL;
++              port->rmnet_mode = RMNET_EPMODE_VND;
+       }
++
++      netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
++      rmnet_unregister_real_device(bridge_dev);
+ }
+ 
+ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
+@@ -144,6 +125,11 @@ static int rmnet_newlink(struct net *src_net, struct 
net_device *dev,
+       int err = 0;
+       u16 mux_id;
+ 
++      if (!tb[IFLA_LINK]) {
++              NL_SET_ERR_MSG_MOD(extack, "link not specified");
++              return -EINVAL;
++      }
++
+       real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+       if (!real_dev || !dev)
+               return -ENODEV;
+@@ -166,7 +152,12 @@ static int rmnet_newlink(struct net *src_net, struct 
net_device *dev,
+       if (err)
+               goto err1;
+ 
++      err = netdev_upper_dev_link(real_dev, dev, extack);
++      if (err < 0)
++              goto err2;
++
+       port->rmnet_mode = mode;
++      port->rmnet_dev = dev;
+ 
+       hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+ 
+@@ -182,8 +173,11 @@ static int rmnet_newlink(struct net *src_net, struct 
net_device *dev,
+ 
+       return 0;
+ 
++err2:
++      unregister_netdevice(dev);
++      rmnet_vnd_dellink(mux_id, port, ep);
+ err1:
+-      rmnet_unregister_real_device(real_dev, port);
++      rmnet_unregister_real_device(real_dev);
+ err0:
+       kfree(ep);
+       return err;
+@@ -192,77 +186,74 @@ err0:
+ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
+ {
+       struct rmnet_priv *priv = netdev_priv(dev);
+-      struct net_device *real_dev;
++      struct net_device *real_dev, *bridge_dev;
++      struct rmnet_port *real_port, *bridge_port;
+       struct rmnet_endpoint *ep;
+-      struct rmnet_port *port;
+-      u8 mux_id;
++      u8 mux_id = priv->mux_id;
+ 
+       real_dev = priv->real_dev;
+ 
+-      if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
++      if (!rmnet_is_real_dev_registered(real_dev))
+               return;
+ 
+-      port = rmnet_get_port_rtnl(real_dev);
+-
+-      mux_id = rmnet_vnd_get_mux(dev);
++      real_port = rmnet_get_port_rtnl(real_dev);
++      bridge_dev = real_port->bridge_ep;
++      if (bridge_dev) {
++              bridge_port = rmnet_get_port_rtnl(bridge_dev);
++              rmnet_unregister_bridge(bridge_port);
++      }
+ 
+-      ep = rmnet_get_endpoint(port, mux_id);
++      ep = rmnet_get_endpoint(real_port, mux_id);
+       if (ep) {
+               hlist_del_init_rcu(&ep->hlnode);
+-              rmnet_unregister_bridge(dev, port);
+-              rmnet_vnd_dellink(mux_id, port, ep);
++              rmnet_vnd_dellink(mux_id, real_port, ep);
+               kfree(ep);
+       }
+-      rmnet_unregister_real_device(real_dev, port);
+ 
++      netdev_upper_dev_unlink(real_dev, dev);
++      rmnet_unregister_real_device(real_dev);
+       unregister_netdevice_queue(dev, head);
+ }
+ 
+-static void rmnet_force_unassociate_device(struct net_device *dev)
++static void rmnet_force_unassociate_device(struct net_device *real_dev)
+ {
+-      struct net_device *real_dev = dev;
+       struct hlist_node *tmp_ep;
+       struct rmnet_endpoint *ep;
+       struct rmnet_port *port;
+       unsigned long bkt_ep;
+       LIST_HEAD(list);
+ 
+-      if (!rmnet_is_real_dev_registered(real_dev))
+-              return;
+-
+-      ASSERT_RTNL();
+-
+-      port = rmnet_get_port_rtnl(dev);
+-
+-      rcu_read_lock();
+-      rmnet_unregister_bridge(dev, port);
+-
+-      hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+-              unregister_netdevice_queue(ep->egress_dev, &list);
+-              rmnet_vnd_dellink(ep->mux_id, port, ep);
++      port = rmnet_get_port_rtnl(real_dev);
+ 
+-              hlist_del_init_rcu(&ep->hlnode);
+-              kfree(ep);
++      if (port->nr_rmnet_devs) {
++              /* real device */
++              rmnet_unregister_bridge(port);
++              hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
++                      unregister_netdevice_queue(ep->egress_dev, &list);
++                      netdev_upper_dev_unlink(real_dev, ep->egress_dev);
++                      rmnet_vnd_dellink(ep->mux_id, port, ep);
++                      hlist_del_init_rcu(&ep->hlnode);
++                      kfree(ep);
++              }
++              rmnet_unregister_real_device(real_dev);
++              unregister_netdevice_many(&list);
++      } else {
++              rmnet_unregister_bridge(port);
+       }
+-
+-      rcu_read_unlock();
+-      unregister_netdevice_many(&list);
+-
+-      rmnet_unregister_real_device(real_dev, port);
+ }
+ 
+ static int rmnet_config_notify_cb(struct notifier_block *nb,
+                                 unsigned long event, void *data)
+ {
+-      struct net_device *dev = netdev_notifier_info_to_dev(data);
++      struct net_device *real_dev = netdev_notifier_info_to_dev(data);
+ 
+-      if (!dev)
++      if (!rmnet_is_real_dev_registered(real_dev))
+               return NOTIFY_DONE;
+ 
+       switch (event) {
+       case NETDEV_UNREGISTER:
+-              netdev_dbg(dev, "Kernel unregister\n");
+-              rmnet_force_unassociate_device(dev);
++              netdev_dbg(real_dev, "Kernel unregister\n");
++              rmnet_force_unassociate_device(real_dev);
+               break;
+ 
+       default:
+@@ -304,16 +295,18 @@ static int rmnet_changelink(struct net_device *dev, 
struct nlattr *tb[],
+       if (!dev)
+               return -ENODEV;
+ 
+-      real_dev = __dev_get_by_index(dev_net(dev),
+-                                    nla_get_u32(tb[IFLA_LINK]));
+-
+-      if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
++      real_dev = priv->real_dev;
++      if (!rmnet_is_real_dev_registered(real_dev))
+               return -ENODEV;
+ 
+       port = rmnet_get_port_rtnl(real_dev);
+ 
+       if (data[IFLA_RMNET_MUX_ID]) {
+               mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
++              if (rmnet_get_endpoint(port, mux_id)) {
++                      NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
++                      return -EINVAL;
++              }
+               ep = rmnet_get_endpoint(port, priv->mux_id);
+               if (!ep)
+                       return -ENODEV;
+@@ -388,11 +381,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+       .fill_info      = rmnet_fill_info,
+ };
+ 
+-/* Needs either rcu_read_lock() or rtnl lock */
+-struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
++struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
+ {
+       if (rmnet_is_real_dev_registered(real_dev))
+-              return rcu_dereference_rtnl(real_dev->rx_handler_data);
++              return rcu_dereference_bh(real_dev->rx_handler_data);
+       else
+               return NULL;
+ }
+@@ -418,7 +410,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
+       struct rmnet_port *port, *slave_port;
+       int err;
+ 
+-      port = rmnet_get_port(real_dev);
++      port = rmnet_get_port_rtnl(real_dev);
+ 
+       /* If there is more than one rmnet dev attached, its probably being
+        * used for muxing. Skip the briding in that case
+@@ -426,6 +418,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
+       if (port->nr_rmnet_devs > 1)
+               return -EINVAL;
+ 
++      if (port->rmnet_mode != RMNET_EPMODE_VND)
++              return -EINVAL;
++
+       if (rmnet_is_real_dev_registered(slave_dev))
+               return -EBUSY;
+ 
+@@ -433,9 +428,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
+       if (err)
+               return -EBUSY;
+ 
+-      slave_port = rmnet_get_port(slave_dev);
++      err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
++                                         extack);
++      if (err) {
++              rmnet_unregister_real_device(slave_dev);
++              return err;
++      }
++
++      slave_port = rmnet_get_port_rtnl(slave_dev);
+       slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+       slave_port->bridge_ep = real_dev;
++      slave_port->rmnet_dev = rmnet_dev;
+ 
+       port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+       port->bridge_ep = slave_dev;
+@@ -447,16 +450,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
+ int rmnet_del_bridge(struct net_device *rmnet_dev,
+                    struct net_device *slave_dev)
+ {
+-      struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+-      struct net_device *real_dev = priv->real_dev;
+-      struct rmnet_port *port, *slave_port;
++      struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
+ 
+-      port = rmnet_get_port(real_dev);
+-      port->rmnet_mode = RMNET_EPMODE_VND;
+-      port->bridge_ep = NULL;
+-
+-      slave_port = rmnet_get_port(slave_dev);
+-      rmnet_unregister_real_device(slave_dev, slave_port);
++      rmnet_unregister_bridge(port);
+ 
+       netdev_dbg(slave_dev, "removed from rmnet as slave\n");
+       return 0;
+@@ -482,8 +478,8 @@ static int __init rmnet_init(void)
+ 
+ static void __exit rmnet_exit(void)
+ {
+-      unregister_netdevice_notifier(&rmnet_dev_notifier);
+       rtnl_link_unregister(&rmnet_link_ops);
++      unregister_netdevice_notifier(&rmnet_dev_notifier);
+ }
+ 
+ module_init(rmnet_init)
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+index 34ac45a774e7..7691d1abe6e2 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+@@ -37,6 +37,7 @@ struct rmnet_port {
+       u8 rmnet_mode;
+       struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
+       struct net_device *bridge_ep;
++      struct net_device *rmnet_dev;
+ };
+ 
+ extern struct rtnl_link_ops rmnet_link_ops;
+@@ -74,7 +75,7 @@ struct rmnet_priv {
+       struct rmnet_priv_stats stats;
+ };
+ 
+-struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
++struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
+ struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
+ int rmnet_add_bridge(struct net_device *rmnet_dev,
+                    struct net_device *slave_dev,
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+index 11167abe5934..c9d43bad1e2f 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+@@ -168,6 +168,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
+ static void
+ rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
+ {
++      if (skb_mac_header_was_set(skb))
++              skb_push(skb, skb->mac_len);
++
+       if (bridge_dev) {
+               skb->dev = bridge_dev;
+               dev_queue_xmit(skb);
+@@ -193,7 +196,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+               return RX_HANDLER_PASS;
+ 
+       dev = skb->dev;
+-      port = rmnet_get_port(dev);
++      port = rmnet_get_port_rcu(dev);
+ 
+       switch (port->rmnet_mode) {
+       case RMNET_EPMODE_VND:
+@@ -226,7 +229,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
+       skb->dev = priv->real_dev;
+       mux_id = priv->mux_id;
+ 
+-      port = rmnet_get_port(skb->dev);
++      port = rmnet_get_port_rcu(skb->dev);
+       if (!port)
+               goto drop;
+ 
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+index d11c16aeb19a..ed02926f5fe9 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+@@ -276,14 +276,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+       return 0;
+ }
+ 
+-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
+-{
+-      struct rmnet_priv *priv;
+-
+-      priv = netdev_priv(rmnet_dev);
+-      return priv->mux_id;
+-}
+-
+ int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
+ {
+       netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h 
b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+index 71e4c3286951..22cfdfd5625a 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+@@ -25,6 +25,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+                     struct rmnet_endpoint *ep);
+ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+ void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
+ void rmnet_vnd_setup(struct net_device *dev);
+ #endif /* _RMNET_VND_H_ */
+diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
+index cc8fbf398c0d..d47151dbe804 100644
+--- a/drivers/net/ethernet/sfc/ptp.c
++++ b/drivers/net/ethernet/sfc/ptp.c
+@@ -563,13 +563,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+                                   u32 nic_major, u32 nic_minor,
+                                   s32 correction)
+ {
++      u32 sync_timestamp;
+       ktime_t kt = { 0 };
++      s16 delta;
+ 
+       if (!(nic_major & 0x80000000)) {
+               WARN_ON_ONCE(nic_major >> 16);
+-              /* Use the top bits from the latest sync event. */
+-              nic_major &= 0xffff;
+-              nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
++
++              /* Medford provides 48 bits of timestamp, so we must get the top
++               * 16 bits from the timesync event state.
++               *
++               * We only have the lower 16 bits of the time now, but we do
++               * have a full resolution timestamp at some point in past. As
++               * long as the difference between the (real) now and the sync
++               * is less than 2^15, then we can reconstruct the difference
++               * between those two numbers using only the lower 16 bits of
++               * each.
++               *
++               * Put another way
++               *
++               * a - b = ((a mod k) - b) mod k
++               *
++               * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
++               * (a mod k) and b, so can calculate the delta, a - b.
++               *
++               */
++              sync_timestamp = last_sync_timestamp_major(efx);
++
++              /* Because delta is s16 this does an implicit mask down to
++               * 16 bits which is what we need, assuming
++               * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
++               * we can deal with the (unlikely) case of sync timestamps
++               * arriving from the future.
++               */
++              delta = nic_major - sync_timestamp;
++
++              /* Recover the fully specified time now, by applying the offset
++               * to the (fully specified) sync time.
++               */
++              nic_major = sync_timestamp + delta;
+ 
+               kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+                                            correction);
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index 93f303ec17e2..5d864f812955 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -863,7 +863,10 @@ err_free_chan:
+       tty->disc_data = NULL;
+       clear_bit(SLF_INUSE, &sl->flags);
+       sl_free_netdev(sl->dev);
++      /* do not call free_netdev before rtnl_unlock */
++      rtnl_unlock();
+       free_netdev(sl->dev);
++      return err;
+ 
+ err_exit:
+       rtnl_unlock();
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index a04f8577d9f2..1d60ccd48ec2 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -274,6 +274,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
+               netdev_dbg(net, "mode: raw IP\n");
+       } else if (!net->header_ops) { /* don't bother if already set */
+               ether_setup(net);
++              /* Restoring min/max mtu values set originally by usbnet */
++              net->min_mtu = 0;
++              net->max_mtu = ETH_MAX_MTU;
+               clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
+               netdev_dbg(net, "mode: Ethernet\n");
+       }
+diff --git a/drivers/net/wimax/i2400m/op-rfkill.c 
b/drivers/net/wimax/i2400m/op-rfkill.c
+index b0dba35a8ad2..dc6fe93ce71f 100644
+--- a/drivers/net/wimax/i2400m/op-rfkill.c
++++ b/drivers/net/wimax/i2400m/op-rfkill.c
+@@ -147,6 +147,7 @@ error_msg_to_dev:
+ error_alloc:
+       d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
+               wimax_dev, state, result);
++      kfree(cmd);
+       return result;
+ }
+ 
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index f969a71348ef..8839f509b19a 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -640,6 +640,8 @@ redisc:
+       }
+ out:
+       kref_put(&rdata->kref, fc_rport_destroy);
++      if (!IS_ERR(fp))
++              fc_frame_free(fp);
+ }
+ 
+ /**
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 04ffef9cea8c..43693b679710 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1045,8 +1045,8 @@ static bool jbd2_write_access_granted(handle_t *handle, 
struct buffer_head *bh,
+       /* For undo access buffer must have data copied */
+       if (undo && !jh->b_committed_data)
+               goto out;
+-      if (jh->b_transaction != handle->h_transaction &&
+-          jh->b_next_transaction != handle->h_transaction)
++      if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
++          READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
+               goto out;
+       /*
+        * There are two reasons for the barrier here:
+@@ -2467,8 +2467,8 @@ void __jbd2_journal_refile_buffer(struct journal_head 
*jh)
+        * our jh reference and thus __jbd2_journal_file_buffer() must not
+        * take a new one.
+        */
+-      jh->b_transaction = jh->b_next_transaction;
+-      jh->b_next_transaction = NULL;
++      WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
++      WRITE_ONCE(jh->b_next_transaction, NULL);
+       if (buffer_freed(bh))
+               jlist = BJ_Forget;
+       else if (jh->b_modified)
+diff --git a/include/linux/device.h b/include/linux/device.h
+index c74ce473589a..b1c8150e9ea5 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -339,6 +339,7 @@ struct device *driver_find_device(struct device_driver 
*drv,
+                                 struct device *start, void *data,
+                                 int (*match)(struct device *dev, void *data));
+ 
++void driver_deferred_probe_add(struct device *dev);
+ int driver_deferred_probe_check_state(struct device *dev);
+ 
+ /**
+@@ -819,17 +820,21 @@ enum device_link_state {
+ /*
+  * Device link flags.
+  *
+- * STATELESS: The core won't track the presence of supplier/consumer drivers.
++ * STATELESS: The core will not remove this link automatically.
+  * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver 
unbind.
+  * PM_RUNTIME: If set, the runtime PM framework will use this link.
+  * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+  * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver 
unbind.
++ * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier 
binds.
++ * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
+  */
+ #define DL_FLAG_STATELESS             BIT(0)
+ #define DL_FLAG_AUTOREMOVE_CONSUMER   BIT(1)
+ #define DL_FLAG_PM_RUNTIME            BIT(2)
+ #define DL_FLAG_RPM_ACTIVE            BIT(3)
+ #define DL_FLAG_AUTOREMOVE_SUPPLIER   BIT(4)
++#define DL_FLAG_AUTOPROBE_CONSUMER    BIT(5)
++#define DL_FLAG_MANAGED                       BIT(6)
+ 
+ /**
+  * struct device_link - Device link representation.
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 08911bb6fe9a..c42eaf39b572 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -407,27 +407,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t 
flags, int override_rlimi
+ {
+       struct sigqueue *q = NULL;
+       struct user_struct *user;
++      int sigpending;
+ 
+       /*
+        * Protect access to @t credentials. This can go away when all
+        * callers hold rcu read lock.
++       *
++       * NOTE! A pending signal will hold on to the user refcount,
++       * and we get/put the refcount only when the sigpending count
++       * changes from/to zero.
+        */
+       rcu_read_lock();
+-      user = get_uid(__task_cred(t)->user);
+-      atomic_inc(&user->sigpending);
++      user = __task_cred(t)->user;
++      sigpending = atomic_inc_return(&user->sigpending);
++      if (sigpending == 1)
++              get_uid(user);
+       rcu_read_unlock();
+ 
+-      if (override_rlimit ||
+-          atomic_read(&user->sigpending) <=
+-                      task_rlimit(t, RLIMIT_SIGPENDING)) {
++      if (override_rlimit || likely(sigpending <= task_rlimit(t, 
RLIMIT_SIGPENDING))) {
+               q = kmem_cache_alloc(sigqueue_cachep, flags);
+       } else {
+               print_dropped_signal(sig);
+       }
+ 
+       if (unlikely(q == NULL)) {
+-              atomic_dec(&user->sigpending);
+-              free_uid(user);
++              if (atomic_dec_and_test(&user->sigpending))
++                      free_uid(user);
+       } else {
+               INIT_LIST_HEAD(&q->list);
+               q->flags = 0;
+@@ -441,8 +446,8 @@ static void __sigqueue_free(struct sigqueue *q)
+ {
+       if (q->flags & SIGQUEUE_PREALLOC)
+               return;
+-      atomic_dec(&q->user->sigpending);
+-      free_uid(q->user);
++      if (atomic_dec_and_test(&q->user->sigpending))
++              free_uid(q->user);
+       kmem_cache_free(sigqueue_cachep, q);
+ }
+ 
+diff --git a/mm/slub.c b/mm/slub.c
+index 9c3937c5ce38..764a1023ed87 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -3103,6 +3103,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t 
flags, size_t size,
+               void *object = c->freelist;
+ 
+               if (unlikely(!object)) {
++                      /*
++                       * We may have removed an object from c->freelist using
++                       * the fastpath in the previous iteration; in that case,
++                       * c->tid has not been bumped yet.
++                       * Since ___slab_alloc() may reenable interrupts while
++                       * allocating memory, we should bump c->tid now.
++                       */
++                      c->tid = next_tid(c->tid);
++
+                       /*
+                        * Invoking slow path likely have side-effect
+                        * of re-populating per CPU c->freelist
+diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
+index f0165c5f376b..1c21dc5d6dd4 100644
+--- a/net/ipv4/cipso_ipv4.c
++++ b/net/ipv4/cipso_ipv4.c
+@@ -1738,6 +1738,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 
gateway)
+ {
+       unsigned char optbuf[sizeof(struct ip_options) + 40];
+       struct ip_options *opt = (struct ip_options *)optbuf;
++      int res;
+ 
+       if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
+               return;
+@@ -1749,7 +1750,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 
gateway)
+ 
+       memset(opt, 0, sizeof(struct ip_options));
+       opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+-      if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
++      rcu_read_lock();
++      res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
++      rcu_read_unlock();
++
++      if (res)
+               return;
+ 
+       if (gateway)
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 02d0b22d0114..c7c456c86b0d 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4042,7 +4042,7 @@ void __ieee80211_check_fast_rx_iface(struct 
ieee80211_sub_if_data *sdata)
+ 
+       lockdep_assert_held(&local->sta_mtx);
+ 
+-      list_for_each_entry_rcu(sta, &local->sta_list, list) {
++      list_for_each_entry(sta, &local->sta_list, list) {
+               if (sdata != sta->sdata &&
+                   (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
+                       continue;
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 5c75118539bb..82d38f93c81a 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -203,7 +203,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, 
struct sk_buff *skb,
+       hdr->size = cpu_to_le32(len);
+       hdr->confirm_rx = 0;
+ 
+-      skb_put_padto(skb, ALIGN(len, 4));
++      skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+ 
+       mutex_lock(&node->ep_lock);
+       if (node->ep)
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 018c60be153a..32f575857e41 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2269,7 +2269,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
+                       break;
+       }
+ 
+-      if (IS_ERR(reg_rule)) {
++      if (IS_ERR_OR_NULL(reg_rule)) {
+               pr_debug("Disabling freq %d MHz as custom regd has no rule that 
fits it\n",
+                        chan->center_freq);
+               if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {

Reply via email to