Muehlenhoff has uploaded a new change for review. ( 
https://gerrit.wikimedia.org/r/328649 )

Change subject: Update to 4.4.39
......................................................................

Update to 4.4.39

Change-Id: Ice952e652eec0d3d4616fddb7ffe6e23c32e3e11
---
M debian/changelog
A debian/patches/bugfix/all/stable-4.4.37.patch
A debian/patches/bugfix/all/stable-4.4.38.patch
A debian/patches/bugfix/all/stable-4.4.39.patch
M debian/patches/series
5 files changed, 1,909 insertions(+), 0 deletions(-)


  git pull ssh://gerrit.wikimedia.org:29418/operations/debs/linux44 
refs/changes/49/328649/1

diff --git a/debian/changelog b/debian/changelog
index e6e59bc..f22bea7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -39,6 +39,8 @@
     - CVE-2016-8655 [84ac7260236a49c79eede91617700174c2c19b0c]
     - CVE-2016-9576 [a0ac402cfcdc904f9772e1762b3fda112dcc56a0]
     - CVE-2016-9793 [b98b0bc8c431e3ceb4b26b0dfc8db509518fb290]
+  * Update to 4.4.39:
+    https://cdn.kernel.org/pub/linux/kernel/v4.x/ChangeLog-4.4.39
 
  -- Moritz Muehlenhoff <mmuhlenh...@wikimedia.org>  Tue, 15 Nov 2016 14:42:40 
+0100
 
diff --git a/debian/patches/bugfix/all/stable-4.4.37.patch 
b/debian/patches/bugfix/all/stable-4.4.37.patch
new file mode 100644
index 0000000..c41e2df
--- /dev/null
+++ b/debian/patches/bugfix/all/stable-4.4.37.patch
@@ -0,0 +1,377 @@
+diff --git a/Makefile b/Makefile
+index 705eb9e38fce..b57ec79b4941 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
+index 08e7e2a16ac1..a36e8601114d 100644
+--- a/arch/arc/include/asm/delay.h
++++ b/arch/arc/include/asm/delay.h
+@@ -22,10 +22,11 @@
+ static inline void __delay(unsigned long loops)
+ {
+       __asm__ __volatile__(
+-      "       lp  1f  \n"
+-      "       nop     \n"
+-      "1:             \n"
+-      : "+l"(loops));
++      "       mov lp_count, %0        \n"
++      "       lp  1f                  \n"
++      "       nop                     \n"
++      "1:                             \n"
++      : : "r"(loops));
+ }
+ 
+ extern void __bad_udelay(void);
+diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
+index 8136afc9df0d..8884b5d5f48c 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -77,7 +77,7 @@ struct arm64_cpu_capabilities {
+       const char *desc;
+       u16 capability;
+       bool (*matches)(const struct arm64_cpu_capabilities *);
+-      void (*enable)(void *);         /* Called on all active CPUs */
++      int (*enable)(void *);          /* Called on all active CPUs */
+       union {
+               struct {        /* To be used for erratum handling only */
+                       u32 midr_model;
+diff --git a/arch/arm64/include/asm/processor.h 
b/arch/arm64/include/asm/processor.h
+index 4acb7ca94fcd..d08559528927 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -186,6 +186,6 @@ static inline void spin_lock_prefetch(const void *x)
+ 
+ #endif
+ 
+-void cpu_enable_pan(void *__unused);
++int cpu_enable_pan(void *__unused);
+ 
+ #endif /* __ASM_PROCESSOR_H */
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 0669c63281ea..2735bf814592 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -19,7 +19,9 @@
+ #define pr_fmt(fmt) "CPU features: " fmt
+ 
+ #include <linux/bsearch.h>
++#include <linux/cpumask.h>
+ #include <linux/sort.h>
++#include <linux/stop_machine.h>
+ #include <linux/types.h>
+ #include <asm/cpu.h>
+ #include <asm/cpufeature.h>
+@@ -764,7 +766,13 @@ static void enable_cpu_capabilities(const struct 
arm64_cpu_capabilities *caps)
+ 
+       for (i = 0; caps[i].desc; i++)
+               if (caps[i].enable && cpus_have_cap(caps[i].capability))
+-                      on_each_cpu(caps[i].enable, NULL, true);
++                      /*
++                       * Use stop_machine() as it schedules the work allowing
++                       * us to modify PSTATE, instead of on_each_cpu() which
++                       * uses an IPI, giving us a PSTATE that disappears when
++                       * we return.
++                       */
++                      stop_machine(caps[i].enable, NULL, cpu_online_mask);
+ }
+ 
+ #ifdef CONFIG_HOTPLUG_CPU
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 1095aa483a1c..00c1372bf57b 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,7 +1,9 @@
+ #include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
++#include <asm/alternative.h>
+ #include <asm/cacheflush.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/pgtable.h>
+ #include <asm/memory.h>
+@@ -111,6 +113,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned 
long))
+               set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ 
+               /*
++               * PSTATE was not saved over suspend/resume, re-enable any
++               * detected features that might not have been set correctly.
++               */
++              asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
++                              CONFIG_ARM64_PAN));
++
++              /*
+                * Restore HW breakpoint registers to sane values
+                * before debug exceptions are possibly reenabled
+                * through local_dbg_restore.
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 4c1a118c1d09..247bae758e1e 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -29,7 +29,9 @@
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
+ #include <linux/perf_event.h>
++#include <linux/preempt.h>
+ 
++#include <asm/bug.h>
+ #include <asm/cpufeature.h>
+ #include <asm/exception.h>
+ #include <asm/debug-monitors.h>
+@@ -606,8 +608,16 @@ asmlinkage int __exception do_debug_exception(unsigned 
long addr,
+ }
+ 
+ #ifdef CONFIG_ARM64_PAN
+-void cpu_enable_pan(void *__unused)
++int cpu_enable_pan(void *__unused)
+ {
++      /*
++       * We modify PSTATE. This won't work from irq context as the PSTATE
++       * is discarded once we return from the exception.
++       */
++      WARN_ON_ONCE(in_interrupt());
++
+       config_sctlr_el1(SCTLR_EL1_SPAN, 0);
++      asm(SET_PSTATE_PAN(1));
++      return 0;
+ }
+ #endif /* CONFIG_ARM64_PAN */
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 6bc9ae24b6d2..8f1a3f443f7d 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -571,7 +571,7 @@ early_idt_handler_common:
+       movl %eax,%ds
+       movl %eax,%es
+ 
+-      cmpl $(__KERNEL_CS),32(%esp)
++      cmpw $(__KERNEL_CS),32(%esp)
+       jne 10f
+ 
+       leal 28(%esp),%eax      # Pointer to %eip
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 370c2f76016d..1770c455dfdd 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1368,7 +1368,8 @@ static ssize_t hot_remove_store(struct class *class,
+       zram = idr_find(&zram_index_idr, dev_id);
+       if (zram) {
+               ret = zram_remove(zram);
+-              idr_remove(&zram_index_idr, dev_id);
++              if (!ret)
++                      idr_remove(&zram_index_idr, dev_id);
+       } else {
+               ret = -ENODEV;
+       }
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c 
b/drivers/net/wireless/mwifiex/cfg80211.c
+index 4073116e6e9f..c3331d6201c3 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -2144,8 +2144,9 @@ done:
+                       is_scanning_required = 1;
+               } else {
+                       mwifiex_dbg(priv->adapter, MSG,
+-                                  "info: trying to associate to '%s' bssid 
%pM\n",
+-                                  (char *)req_ssid.ssid, bss->bssid);
++                                  "info: trying to associate to '%.*s' bssid 
%pM\n",
++                                  req_ssid.ssid_len, (char *)req_ssid.ssid,
++                                  bss->bssid);
+                       memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
+                       break;
+               }
+@@ -2202,8 +2203,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct 
net_device *dev,
+       }
+ 
+       mwifiex_dbg(adapter, INFO,
+-                  "info: Trying to associate to %s and bssid %pM\n",
+-                  (char *)sme->ssid, sme->bssid);
++                  "info: Trying to associate to %.*s and bssid %pM\n",
++                  (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
+ 
+       ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
+                                    priv->bss_mode, sme->channel, sme, 0);
+@@ -2333,8 +2334,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct 
net_device *dev,
+       }
+ 
+       mwifiex_dbg(priv->adapter, MSG,
+-                  "info: trying to join to %s and bssid %pM\n",
+-                  (char *)params->ssid, params->bssid);
++                  "info: trying to join to %.*s and bssid %pM\n",
++                  params->ssid_len, (char *)params->ssid, params->bssid);
+ 
+       mwifiex_set_ibss_params(priv, params);
+ 
+diff --git a/drivers/pci/pcie/aer/aer_inject.c 
b/drivers/pci/pcie/aer/aer_inject.c
+index 182224acedbe..58f1419a68ae 100644
+--- a/drivers/pci/pcie/aer/aer_inject.c
++++ b/drivers/pci/pcie/aer/aer_inject.c
+@@ -283,20 +283,6 @@ out:
+       return 0;
+ }
+ 
+-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+-{
+-      while (1) {
+-              if (!pci_is_pcie(dev))
+-                      break;
+-              if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+-                      return dev;
+-              if (!dev->bus->self)
+-                      break;
+-              dev = dev->bus->self;
+-      }
+-      return NULL;
+-}
+-
+ static int find_aer_device_iter(struct device *device, void *data)
+ {
+       struct pcie_device **result = data;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 9757cf9037a2..b5843c255263 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1415,6 +1415,21 @@ static void program_hpp_type1(struct pci_dev *dev, 
struct hpp_type1 *hpp)
+               dev_warn(&dev->dev, "PCI-X settings not supported\n");
+ }
+ 
++static bool pcie_root_rcb_set(struct pci_dev *dev)
++{
++      struct pci_dev *rp = pcie_find_root_port(dev);
++      u16 lnkctl;
++
++      if (!rp)
++              return false;
++
++      pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
++      if (lnkctl & PCI_EXP_LNKCTL_RCB)
++              return true;
++
++      return false;
++}
++
+ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ {
+       int pos;
+@@ -1444,9 +1459,20 @@ static void program_hpp_type2(struct pci_dev *dev, 
struct hpp_type2 *hpp)
+                       ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
+ 
+       /* Initialize Link Control Register */
+-      if (pcie_cap_has_lnkctl(dev))
++      if (pcie_cap_has_lnkctl(dev)) {
++
++              /*
++               * If the Root Port supports Read Completion Boundary of
++               * 128, set RCB to 128.  Otherwise, clear it.
++               */
++              hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
++              hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
++              if (pcie_root_rcb_set(dev))
++                      hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
++
+               pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+                       ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
++      }
+ 
+       /* Find Advanced Error Reporting Enhanced Capability */
+       pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
+index c20163b92991..375008e2be20 100644
+--- a/drivers/pwm/sysfs.c
++++ b/drivers/pwm/sysfs.c
+@@ -366,6 +366,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+               if (test_bit(PWMF_EXPORTED, &pwm->flags))
+                       pwm_unexport_child(parent, pwm);
+       }
++
++      put_device(parent);
+ }
+ 
+ static int __init pwm_sysfs_init(void)
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index eeae401a2412..287e698c28de 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -251,7 +251,9 @@
+ #endif
+ #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+ 
+-#if GCC_VERSION >= 50000
++#if GCC_VERSION >= 70000
++#define KASAN_ABI_VERSION 5
++#elif GCC_VERSION >= 50000
+ #define KASAN_ABI_VERSION 4
+ #elif GCC_VERSION >= 40902
+ #define KASAN_ABI_VERSION 3
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index e89c7ee7e803..5f37614f2451 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1802,6 +1802,20 @@ static inline int pci_pcie_type(const struct pci_dev 
*dev)
+       return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+ }
+ 
++static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
++{
++      while (1) {
++              if (!pci_is_pcie(dev))
++                      break;
++              if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
++                      return dev;
++              if (!dev->bus->self)
++                      break;
++              dev = dev->bus->self;
++      }
++      return NULL;
++}
++
+ void pci_request_acs(void);
+ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+ bool pci_acs_path_enabled(struct pci_dev *start,
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index 630c19772630..32cbe72bf545 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2275,6 +2275,7 @@ static int rcu_nocb_kthread(void *arg)
+                               cl++;
+                       c++;
+                       local_bh_enable();
++                      cond_resched_rcu_qs();
+                       list = next;
+               }
+               trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
+index 4f6c62e5c21e..37ff0ab6a8ff 100644
+--- a/mm/kasan/kasan.h
++++ b/mm/kasan/kasan.h
+@@ -52,6 +52,9 @@ struct kasan_global {
+ #if KASAN_ABI_VERSION >= 4
+       struct kasan_source_location *location;
+ #endif
++#if KASAN_ABI_VERSION >= 5
++      char *odr_indicator;
++#endif
+ };
+ 
+ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 3a9b66c6e09c..0aca39762ed0 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -1886,8 +1886,8 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream 
*substream)
+               snd_timer_interrupt(substream->timer, 1);
+ #endif
+  _end:
+-      snd_pcm_stream_unlock_irqrestore(substream, flags);
+       kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
++      snd_pcm_stream_unlock_irqrestore(substream, flags);
+ }
+ 
+ EXPORT_SYMBOL(snd_pcm_period_elapsed);
diff --git a/debian/patches/bugfix/all/stable-4.4.38.patch 
b/debian/patches/bugfix/all/stable-4.4.38.patch
new file mode 100644
index 0000000..4185e34
--- /dev/null
+++ b/debian/patches/bugfix/all/stable-4.4.38.patch
@@ -0,0 +1,1016 @@
+diff --git a/Makefile b/Makefile
+index b57ec79b4941..6876efe0d735 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index c3c12efe0bc0..9c0c8fd0b292 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
+       sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
+ 
+       /* 1. Make sure we are not getting garbage from the user */
+-      if (!invalid_frame_pointer(sf, sizeof(*sf)))
++      if (invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv_and_exit;
+ 
+       if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ 
+       synchronize_user_stack();
+       sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
+-      if (!invalid_frame_pointer(sf, sizeof(*sf)))
++      if (invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv;
+ 
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index a5331c336b2a..3d3414c14792 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -800,8 +800,10 @@ struct mdesc_mblock {
+ };
+ static struct mdesc_mblock *mblocks;
+ static int num_mblocks;
++static int find_numa_node_for_addr(unsigned long pa,
++                                 struct node_mem_mask *pnode_mask);
+ 
+-static unsigned long ra_to_pa(unsigned long addr)
++static unsigned long __init ra_to_pa(unsigned long addr)
+ {
+       int i;
+ 
+@@ -817,8 +819,11 @@ static unsigned long ra_to_pa(unsigned long addr)
+       return addr;
+ }
+ 
+-static int find_node(unsigned long addr)
++static int __init find_node(unsigned long addr)
+ {
++      static bool search_mdesc = true;
++      static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
++      static int last_index;
+       int i;
+ 
+       addr = ra_to_pa(addr);
+@@ -828,13 +833,30 @@ static int find_node(unsigned long addr)
+               if ((addr & p->mask) == p->val)
+                       return i;
+       }
+-      /* The following condition has been observed on LDOM guests.*/
+-      WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
+-              " rule. Some physical memory will be owned by node 0.");
+-      return 0;
++      /* The following condition has been observed on LDOM guests because
++       * node_masks only contains the best latency mask and value.
++       * LDOM guest's mdesc can contain a single latency group to
++       * cover multiple address range. Print warning message only if the
++       * address cannot be found in node_masks nor mdesc.
++       */
++      if ((search_mdesc) &&
++          ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
++              /* find the available node in the mdesc */
++              last_index = find_numa_node_for_addr(addr, &last_mem_mask);
++              numadbg("find_node: latency group for address 0x%lx is %d\n",
++                      addr, last_index);
++              if ((last_index < 0) || (last_index >= num_node_masks)) {
++                      /* WARN_ONCE() and use default group 0 */
++                      WARN_ONCE(1, "find_node: A physical address doesn't 
match a NUMA node rule. Some physical memory will be owned by node 0.");
++                      search_mdesc = false;
++                      last_index = 0;
++              }
++      }
++
++      return last_index;
+ }
+ 
+-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
++static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
+ {
+       *nid = find_node(start);
+       start += PAGE_SIZE;
+@@ -1158,6 +1180,41 @@ int __node_distance(int from, int to)
+       return numa_latency[from][to];
+ }
+ 
++static int find_numa_node_for_addr(unsigned long pa,
++                                 struct node_mem_mask *pnode_mask)
++{
++      struct mdesc_handle *md = mdesc_grab();
++      u64 node, arc;
++      int i = 0;
++
++      node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
++      if (node == MDESC_NODE_NULL)
++              goto out;
++
++      mdesc_for_each_node_by_name(md, node, "group") {
++              mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
++                      u64 target = mdesc_arc_target(md, arc);
++                      struct mdesc_mlgroup *m = find_mlgroup(target);
++
++                      if (!m)
++                              continue;
++                      if ((pa & m->mask) == m->match) {
++                              if (pnode_mask) {
++                                      pnode_mask->mask = m->mask;
++                                      pnode_mask->val = m->match;
++                              }
++                              mdesc_release(md);
++                              return i;
++                      }
++              }
++              i++;
++      }
++
++out:
++      mdesc_release(md);
++      return -1;
++}
++
+ static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+ {
+       int i;
+diff --git a/block/blk-map.c b/block/blk-map.c
+index f565e11f465a..69953bd97e65 100644
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -90,6 +90,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct 
request *rq,
+       if (!iter || !iter->count)
+               return -EINVAL;
+ 
++      if (!iter_is_iovec(iter))
++              return -EINVAL;
++
+       iov_for_each(iov, i, *iter) {
+               unsigned long uaddr = (unsigned long) iov.iov_base;
+ 
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 6f946fedbb77..0864f05633a2 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1137,6 +1137,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch 
*ds, int port,
+                                  struct phy_device *phydev)
+ {
+       struct bcm_sf2_priv *priv = ds_to_priv(ds);
++      struct ethtool_eee *p = &priv->port_sts[port].eee;
+       u32 id_mode_dis = 0, port_mode;
+       const char *str = NULL;
+       u32 reg;
+@@ -1211,6 +1212,9 @@ force_link:
+               reg |= DUPLX_MODE;
+ 
+       core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
++
++      if (!phydev->is_pseudo_fixed_link)
++              p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
+ }
+ 
+ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c 
b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 0fb3f8de88e9..91627561c58d 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1168,6 +1168,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct 
net_device *dev,
+                                         struct bcmgenet_tx_ring *ring)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
++      struct device *kdev = &priv->pdev->dev;
+       struct enet_cb *tx_cb_ptr;
+       struct netdev_queue *txq;
+       unsigned int pkts_compl = 0;
+@@ -1195,7 +1196,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct 
net_device *dev,
+                       pkts_compl++;
+                       dev->stats.tx_packets++;
+                       dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+-                      dma_unmap_single(&dev->dev,
++                      dma_unmap_single(kdev,
+                                        dma_unmap_addr(tx_cb_ptr, dma_addr),
+                                        dma_unmap_len(tx_cb_ptr, dma_len),
+                                        DMA_TO_DEVICE);
+@@ -1203,7 +1204,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct 
net_device *dev,
+               } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+                       dev->stats.tx_bytes +=
+                               dma_unmap_len(tx_cb_ptr, dma_len);
+-                      dma_unmap_page(&dev->dev,
++                      dma_unmap_page(kdev,
+                                      dma_unmap_addr(tx_cb_ptr, dma_addr),
+                                      dma_unmap_len(tx_cb_ptr, dma_len),
+                                      DMA_TO_DEVICE);
+@@ -1754,6 +1755,7 @@ static int bcmgenet_alloc_rx_buffers(struct 
bcmgenet_priv *priv,
+ 
+ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+ {
++      struct device *kdev = &priv->pdev->dev;
+       struct enet_cb *cb;
+       int i;
+ 
+@@ -1761,7 +1763,7 @@ static void bcmgenet_free_rx_buffers(struct 
bcmgenet_priv *priv)
+               cb = &priv->rx_cbs[i];
+ 
+               if (dma_unmap_addr(cb, dma_addr)) {
+-                      dma_unmap_single(&priv->dev->dev,
++                      dma_unmap_single(kdev,
+                                        dma_unmap_addr(cb, dma_addr),
+                                        priv->rx_buf_len, DMA_FROM_DEVICE);
+                       dma_unmap_addr_set(cb, dma_addr, 0);
+diff --git a/drivers/net/ethernet/marvell/sky2.c 
b/drivers/net/ethernet/marvell/sky2.c
+index 5606a043063e..4b62aa1f9ff8 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, 
sky2_resume);
+ 
+ static void sky2_shutdown(struct pci_dev *pdev)
+ {
++      struct sky2_hw *hw = pci_get_drvdata(pdev);
++      int port;
++
++      for (port = 0; port < hw->ports; port++) {
++              struct net_device *ndev = hw->dev[port];
++
++              rtnl_lock();
++              if (netif_running(ndev)) {
++                      dev_close(ndev);
++                      netif_device_detach(ndev);
++              }
++              rtnl_unlock();
++      }
+       sky2_suspend(&pdev->dev);
+       pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
+       pci_set_power_state(pdev, PCI_D3hot);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c 
b/drivers/net/ethernet/renesas/sh_eth.c
+index 36fc9427418f..480f3dae0780 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -832,7 +832,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
+ 
+       .ecsr_value     = ECSR_ICD,
+       .ecsipr_value   = ECSIPR_ICDIP,
+-      .eesipr_value   = 0xff7f009f,
++      .eesipr_value   = 0xe77f009f,
+ 
+       .tx_check       = EESR_TC1 | EESR_FTC,
+       .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
+index 4827c6987ac3..f0961cbaf87e 100644
+--- a/drivers/net/geneve.c
++++ b/drivers/net/geneve.c
+@@ -815,7 +815,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct geneve_sock *gs4 = geneve->sock4;
+       struct rtable *rt = NULL;
+-      const struct iphdr *iip; /* interior IP header */
+       int err = -EINVAL;
+       struct flowi4 fl4;
+       __u8 tos, ttl;
+@@ -842,8 +841,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+       sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+       skb_reset_mac_header(skb);
+ 
+-      iip = ip_hdr(skb);
+-
+       if (info) {
+               const struct ip_tunnel_key *key = &info->key;
+               u8 *opts = NULL;
+@@ -859,7 +856,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+               if (unlikely(err))
+                       goto err;
+ 
+-              tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
++              tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+               ttl = key->ttl;
+               df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+       } else {
+@@ -869,7 +866,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+               if (unlikely(err))
+                       goto err;
+ 
+-              tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
++              tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
+               ttl = geneve->ttl;
+               if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+                       ttl = 1;
+@@ -903,7 +900,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct geneve_sock *gs6 = geneve->sock6;
+       struct dst_entry *dst = NULL;
+-      const struct iphdr *iip; /* interior IP header */
+       int err = -EINVAL;
+       struct flowi6 fl6;
+       __u8 prio, ttl;
+@@ -927,8 +923,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+       sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+       skb_reset_mac_header(skb);
+ 
+-      iip = ip_hdr(skb);
+-
+       if (info) {
+               const struct ip_tunnel_key *key = &info->key;
+               u8 *opts = NULL;
+@@ -945,7 +939,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+               if (unlikely(err))
+                       goto err;
+ 
+-              prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
++              prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
+               ttl = key->ttl;
+       } else {
+               udp_csum = false;
+@@ -954,7 +948,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, 
struct net_device *dev,
+               if (unlikely(err))
+                       goto err;
+ 
+-              prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb);
++              prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, ip_hdr(skb), skb);
+               ttl = geneve->ttl;
+               if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
+                       ttl = 1;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index f94ab786088f..0e2a19e58923 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1465,6 +1465,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
+               netif_napi_del(&vi->rq[i].napi);
+       }
+ 
++      /* We called napi_hash_del() before netif_napi_del(),
++       * we need to respect an RCU grace period before freeing vi->rq
++       */
++      synchronize_net();
++
+       kfree(vi->rq);
+       kfree(vi->sq);
+ }
+diff --git a/include/linux/uio.h b/include/linux/uio.h
+index 5f9c59da978b..e2225109b816 100644
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -101,12 +101,12 @@ int iov_iter_npages(const struct iov_iter *i, int 
maxpages);
+ 
+ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
+ 
+-static inline size_t iov_iter_count(struct iov_iter *i)
++static inline size_t iov_iter_count(const struct iov_iter *i)
+ {
+       return i->count;
+ }
+ 
+-static inline bool iter_is_iovec(struct iov_iter *i)
++static inline bool iter_is_iovec(const struct iov_iter *i)
+ {
+       return !(i->type & (ITER_BVEC | ITER_KVEC));
+ }
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 2c2eb1b629b1..2e9a1c2818c7 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -217,6 +217,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
+       bool alloc;
+       int id;
+ 
++      if (atomic_read(&net->count) == 0)
++              return NETNSA_NSID_NOT_ASSIGNED;
+       spin_lock_irqsave(&net->nsid_lock, flags);
+       alloc = atomic_read(&peer->count) == 0 ? false : true;
+       id = __peernet2id_alloc(net, peer, &alloc);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 87b91ffbdec3..b94e165a4f79 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2600,7 +2600,10 @@ nla_put_failure:
+ 
+ static inline size_t rtnl_fdb_nlmsg_size(void)
+ {
+-      return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
++      return NLMSG_ALIGN(sizeof(struct ndmsg)) +
++             nla_total_size(ETH_ALEN) +       /* NDA_LLADDR */
++             nla_total_size(sizeof(u16)) +    /* NDA_VLAN */
++             0;
+ }
+ 
+ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int 
type)
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 88f017854509..f4c0917e66b5 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -745,7 +745,7 @@ int sock_setsockopt(struct socket *sock, int level, int 
optname,
+               val = min_t(u32, val, sysctl_wmem_max);
+ set_sndbuf:
+               sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+-              sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
++              sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+               /* Wake up sending tasks if we upped the value. */
+               sk->sk_write_space(sk);
+               break;
+@@ -781,7 +781,7 @@ set_rcvbuf:
+                * returning the value we actually used in getsockopt
+                * is the most desirable behavior.
+                */
+-              sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
++              sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+               break;
+ 
+       case SO_RCVBUFFORCE:
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index 861e1fa25d5e..0759f5b9180e 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -698,6 +698,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
+ {
+       const struct dccp_hdr *dh;
+       unsigned int cscov;
++      u8 dccph_doff;
+ 
+       if (skb->pkt_type != PACKET_HOST)
+               return 1;
+@@ -719,18 +720,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
+       /*
+        * If P.Data Offset is too small for packet type, drop packet and return
+        */
+-      if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
+-              DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
++      dccph_doff = dh->dccph_doff;
++      if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
++              DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
+               return 1;
+       }
+       /*
+        * If P.Data Offset is too too large for packet, drop packet and return
+        */
+-      if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
+-              DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
++      if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
++              DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
+               return 1;
+       }
+-
++      dh = dccp_hdr(skb);
+       /*
+        * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
+        * has short sequence numbers), drop packet and return
+diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
+index d95631d09248..20fb25e3027b 100644
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff 
*skb)
+               esph = (void *)skb_push(skb, 4);
+               *seqhi = esph->spi;
+               esph->spi = esph->seq_no;
+-              esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
++              esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+               aead_request_set_callback(req, 0, esp_input_done_esn, skb);
+       }
+ 
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index f2ad5216c438..2b7283303650 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -102,6 +102,9 @@ int __ip_local_out(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+ 
+       iph->tot_len = htons(skb->len);
+       ip_send_check(iph);
++
++      skb->protocol = htons(ETH_P_IP);
++
+       return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
+                      net, sk, skb, NULL, skb_dst(skb)->dev,
+                      dst_output);
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index aa67e0e64b69..23160d2b3f71 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -660,6 +660,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, 
size_t len,
+       if (len > 0xFFFF)
+               return -EMSGSIZE;
+ 
++      /* Must have at least a full ICMP header. */
++      if (len < icmph_len)
++              return -EINVAL;
++
+       /*
+        *      Check the flags.
+        */
+diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
+index 060a60b2f8a6..111ba55fd512 100644
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff 
*skb)
+               esph = (void *)skb_push(skb, 4);
+               *seqhi = esph->spi;
+               esph->spi = esph->seq_no;
+-              esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
++              esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+               aead_request_set_callback(req, 0, esp_input_done_esn, skb);
+       }
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index e8878886eba4..2994d1f1a661 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1043,6 +1043,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+       struct ipv6_tel_txoption opt;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct net_device *tdev;
++      bool use_cache = false;
+       int mtu;
+       unsigned int max_headroom = sizeof(struct ipv6hdr);
+       u8 proto;
+@@ -1070,7 +1071,15 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+ 
+               memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+               neigh_release(neigh);
+-      } else if (!fl6->flowi6_mark)
++      } else if (!(t->parms.flags &
++                   (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
++              /* enable the cache only only if the routing decision does
++               * not depend on the current inner header value
++               */
++              use_cache = true;
++      }
++
++      if (use_cache)
+               dst = ip6_tnl_dst_get(t);
+ 
+       if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
+@@ -1134,7 +1143,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
+               skb = new_skb;
+       }
+ 
+-      if (!fl6->flowi6_mark && ndst)
++      if (use_cache && ndst)
+               ip6_tnl_dst_set(t, ndst);
+       skb_dst_set(skb, dst);
+ 
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 462f2a76b5c2..1d184322a7b1 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -148,6 +148,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, 
struct sk_buff *skb)
+       ipv6_hdr(skb)->payload_len = htons(len);
+       IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+ 
++      skb->protocol = htons(ETH_P_IPV6);
++
+       return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
+                      net, sk, skb, NULL, skb_dst(skb)->dev,
+                      dst_output);
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 42de4ccd159f..d0e906d39642 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       int ret;
+       int chk_addr_ret;
+ 
+-      if (!sock_flag(sk, SOCK_ZAPPED))
+-              return -EINVAL;
+       if (addr_len < sizeof(struct sockaddr_l2tpip))
+               return -EINVAL;
+       if (addr->l2tp_family != AF_INET)
+@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       read_unlock_bh(&l2tp_ip_lock);
+ 
+       lock_sock(sk);
++      if (!sock_flag(sk, SOCK_ZAPPED))
++              goto out;
++
+       if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct 
sockaddr_l2tpip))
+               goto out;
+ 
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 9ee4ddb6b397..3c4f867d3633 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -266,8 +266,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       int addr_type;
+       int err;
+ 
+-      if (!sock_flag(sk, SOCK_ZAPPED))
+-              return -EINVAL;
+       if (addr->l2tp_family != AF_INET6)
+               return -EINVAL;
+       if (addr_len < sizeof(*addr))
+@@ -293,6 +291,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr 
*uaddr, int addr_len)
+       lock_sock(sk);
+ 
+       err = -EINVAL;
++      if (!sock_flag(sk, SOCK_ZAPPED))
++              goto out_unlock;
++
+       if (sk->sk_state != TCP_CLOSE)
+               goto out_unlock;
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 28fc283c1ec1..360700a2f46c 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -931,7 +931,6 @@ static void netlink_sock_destruct(struct sock *sk)
+       if (nlk->cb_running) {
+               if (nlk->cb.done)
+                       nlk->cb.done(&nlk->cb);
+-
+               module_put(nlk->cb.module);
+               kfree_skb(nlk->cb.skb);
+       }
+@@ -960,6 +959,14 @@ static void netlink_sock_destruct(struct sock *sk)
+       WARN_ON(nlk_sk(sk)->groups);
+ }
+ 
++static void netlink_sock_destruct_work(struct work_struct *work)
++{
++      struct netlink_sock *nlk = container_of(work, struct netlink_sock,
++                                              work);
++
++      sk_free(&nlk->sk);
++}
++
+ /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
+  * SMP. Look, when several writers sleep and reader wakes them up, all but one
+  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
+@@ -1265,8 +1272,18 @@ out_module:
+ static void deferred_put_nlk_sk(struct rcu_head *head)
+ {
+       struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
++      struct sock *sk = &nlk->sk;
++
++      if (!atomic_dec_and_test(&sk->sk_refcnt))
++              return;
++
++      if (nlk->cb_running && nlk->cb.done) {
++              INIT_WORK(&nlk->work, netlink_sock_destruct_work);
++              schedule_work(&nlk->work);
++              return;
++      }
+ 
+-      sock_put(&nlk->sk);
++      sk_free(sk);
+ }
+ 
+ static int netlink_release(struct socket *sock)
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 14437d9b1965..df32cb92d9fc 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/rhashtable.h>
+ #include <linux/atomic.h>
++#include <linux/workqueue.h>
+ #include <net/sock.h>
+ 
+ #define NLGRPSZ(x)    (ALIGN(x, sizeof(unsigned long) * 8) / 8)
+@@ -53,6 +54,7 @@ struct netlink_sock {
+ 
+       struct rhash_head       node;
+       struct rcu_head         rcu;
++      struct work_struct      work;
+ };
+ 
+ static inline struct netlink_sock *nlk_sk(struct sock *sk)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 34e4fcfd240b..f223d1c80ccf 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3572,19 +3572,25 @@ packet_setsockopt(struct socket *sock, int level, int 
optname, char __user *optv
+ 
+               if (optlen != sizeof(val))
+                       return -EINVAL;
+-              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-                      return -EBUSY;
+               if (copy_from_user(&val, optval, sizeof(val)))
+                       return -EFAULT;
+               switch (val) {
+               case TPACKET_V1:
+               case TPACKET_V2:
+               case TPACKET_V3:
+-                      po->tp_version = val;
+-                      return 0;
++                      break;
+               default:
+                       return -EINVAL;
+               }
++              lock_sock(sk);
++              if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++                      ret = -EBUSY;
++              } else {
++                      po->tp_version = val;
++                      ret = 0;
++              }
++              release_sock(sk);
++              return ret;
+       }
+       case PACKET_RESERVE:
+       {
+@@ -4067,6 +4073,7 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+       /* Added to avoid minimal code churn */
+       struct tpacket_req *req = &req_u->req;
+ 
++      lock_sock(sk);
+       /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+       if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+               WARN(1, "Tx-ring is not supported.\n");
+@@ -4148,7 +4155,6 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+                       goto out;
+       }
+ 
+-      lock_sock(sk);
+ 
+       /* Detach socket from network */
+       spin_lock(&po->bind_lock);
+@@ -4197,11 +4203,11 @@ static int packet_set_ring(struct sock *sk, union 
tpacket_req_u *req_u,
+               if (!tx_ring)
+                       prb_shutdown_retire_blk_timer(po, rb_queue);
+       }
+-      release_sock(sk);
+ 
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++      release_sock(sk);
+       return err;
+ }
+ 
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index e38a7701f154..c3434e902445 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -104,6 +104,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int 
bind)
+       kfree(keys);
+ }
+ 
++static bool offset_valid(struct sk_buff *skb, int offset)
++{
++      if (offset > 0 && offset > skb->len)
++              return false;
++
++      if  (offset < 0 && -offset > skb_headroom(skb))
++              return false;
++
++      return true;
++}
++
+ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
+                    struct tcf_result *res)
+ {
+@@ -130,6 +141,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct 
tc_action *a,
+                       if (tkey->offmask) {
+                               char *d, _d;
+ 
++                              if (!offset_valid(skb, off + tkey->at)) {
++                                      pr_info("tc filter pedit 'at' offset %d 
out of bounds\n",
++                                              off + tkey->at);
++                                      goto bad;
++                              }
+                               d = skb_header_pointer(skb, off + tkey->at, 1,
+                                                      &_d);
+                               if (!d)
+@@ -142,10 +158,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct 
tc_action *a,
+                                       " offset must be on 32 bit 
boundaries\n");
+                               goto bad;
+                       }
+-                      if (offset > 0 && offset > skb->len) {
+-                              pr_info("tc filter pedit"
+-                                      " offset %d can't exceed pkt length 
%d\n",
+-                                     offset, skb->len);
++
++                      if (!offset_valid(skb, off + offset)) {
++                              pr_info("tc filter pedit offset %d out of 
bounds\n",
++                                      offset);
+                               goto bad;
+                       }
+ 
+diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
+index 0b8c3ace671f..1bf1f4517db6 100644
+--- a/net/sched/cls_basic.c
++++ b/net/sched/cls_basic.c
+@@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 
handle)
+       struct basic_head *head = rtnl_dereference(tp->root);
+       struct basic_filter *f;
+ 
+-      if (head == NULL)
+-              return 0UL;
+-
+       list_for_each_entry(f, &head->flist, link) {
+               if (f->handle == handle) {
+                       l = (unsigned long) f;
+@@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force)
+               tcf_unbind_filter(tp, &f->res);
+               call_rcu(&f->rcu, basic_delete_filter);
+       }
+-      RCU_INIT_POINTER(tp->root, NULL);
+       kfree_rcu(head, rcu);
+       return true;
+ }
+diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
+index 5faaa5425f7b..3eef0215e53f 100644
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -199,7 +199,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool 
force)
+               call_rcu(&prog->rcu, __cls_bpf_delete_prog);
+       }
+ 
+-      RCU_INIT_POINTER(tp->root, NULL);
+       kfree_rcu(head, rcu);
+       return true;
+ }
+@@ -210,9 +209,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 
handle)
+       struct cls_bpf_prog *prog;
+       unsigned long ret = 0UL;
+ 
+-      if (head == NULL)
+-              return 0UL;
+-
+       list_for_each_entry(prog, &head->plist, link) {
+               if (prog->handle == handle) {
+                       ret = (unsigned long) prog;
+diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
+index 4c85bd3a750c..c104c2019feb 100644
+--- a/net/sched/cls_cgroup.c
++++ b/net/sched/cls_cgroup.c
+@@ -130,11 +130,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, 
bool force)
+ 
+       if (!force)
+               return false;
+-
+-      if (head) {
+-              RCU_INIT_POINTER(tp->root, NULL);
++      /* Head can still be NULL due to cls_cgroup_init(). */
++      if (head)
+               call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
+-      }
++
+       return true;
+ }
+ 
+diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
+index fbfec6a18839..d7ba2b4ff0f3 100644
+--- a/net/sched/cls_flow.c
++++ b/net/sched/cls_flow.c
+@@ -583,7 +583,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force)
+               list_del_rcu(&f->list);
+               call_rcu(&f->rcu, flow_destroy_filter);
+       }
+-      RCU_INIT_POINTER(tp->root, NULL);
+       kfree_rcu(head, rcu);
+       return true;
+ }
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 95b021243233..e5a58c82728a 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/rhashtable.h>
++#include <linux/workqueue.h>
+ 
+ #include <linux/if_ether.h>
+ #include <linux/in6.h>
+@@ -55,7 +56,10 @@ struct cls_fl_head {
+       bool mask_assigned;
+       struct list_head filters;
+       struct rhashtable_params ht_params;
+-      struct rcu_head rcu;
++      union {
++              struct work_struct work;
++              struct rcu_head rcu;
++      };
+ };
+ 
+ struct cls_fl_filter {
+@@ -165,6 +169,24 @@ static void fl_destroy_filter(struct rcu_head *head)
+       kfree(f);
+ }
+ 
++static void fl_destroy_sleepable(struct work_struct *work)
++{
++      struct cls_fl_head *head = container_of(work, struct cls_fl_head,
++                                              work);
++      if (head->mask_assigned)
++              rhashtable_destroy(&head->ht);
++      kfree(head);
++      module_put(THIS_MODULE);
++}
++
++static void fl_destroy_rcu(struct rcu_head *rcu)
++{
++      struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
++
++      INIT_WORK(&head->work, fl_destroy_sleepable);
++      schedule_work(&head->work);
++}
++
+ static bool fl_destroy(struct tcf_proto *tp, bool force)
+ {
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+@@ -177,10 +199,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
+               list_del_rcu(&f->list);
+               call_rcu(&f->rcu, fl_destroy_filter);
+       }
+-      RCU_INIT_POINTER(tp->root, NULL);
+-      if (head->mask_assigned)
+-              rhashtable_destroy(&head->ht);
+-      kfree_rcu(head, rcu);
++
++      __module_get(THIS_MODULE);
++      call_rcu(&head->rcu, fl_destroy_rcu);
+       return true;
+ }
+ 
+diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
+index f9c9fc075fe6..9992dfac6938 100644
+--- a/net/sched/cls_rsvp.h
++++ b/net/sched/cls_rsvp.h
+@@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct 
tcf_proto *tp,
+               return -1;
+       nhptr = ip_hdr(skb);
+ #endif
+-
++      if (unlikely(!head))
++              return -1;
+ restart:
+ 
+ #if RSVP_DST_LEN == 4
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 944c8ff45055..403746b20263 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -503,7 +503,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool 
force)
+       walker.fn = tcindex_destroy_element;
+       tcindex_walk(tp, &walker);
+ 
+-      RCU_INIT_POINTER(tp->root, NULL);
+       call_rcu(&p->rcu, __tcindex_destroy);
+       return true;
+ }
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 824cc1e160bc..73f75258ce46 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2194,7 +2194,8 @@ out:
+  *    Sleep until more data has arrived. But check for races..
+  */
+ static long unix_stream_data_wait(struct sock *sk, long timeo,
+-                                struct sk_buff *last, unsigned int last_len)
++                                struct sk_buff *last, unsigned int last_len,
++                                bool freezable)
+ {
+       struct sk_buff *tail;
+       DEFINE_WAIT(wait);
+@@ -2215,7 +2216,10 @@ static long unix_stream_data_wait(struct sock *sk, long 
timeo,
+ 
+               sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+               unix_state_unlock(sk);
+-              timeo = freezable_schedule_timeout(timeo);
++              if (freezable)
++                      timeo = freezable_schedule_timeout(timeo);
++              else
++                      timeo = schedule_timeout(timeo);
+               unix_state_lock(sk);
+ 
+               if (sock_flag(sk, SOCK_DEAD))
+@@ -2245,7 +2249,8 @@ struct unix_stream_read_state {
+       unsigned int splice_flags;
+ };
+ 
+-static int unix_stream_read_generic(struct unix_stream_read_state *state)
++static int unix_stream_read_generic(struct unix_stream_read_state *state,
++                                  bool freezable)
+ {
+       struct scm_cookie scm;
+       struct socket *sock = state->socket;
+@@ -2324,7 +2329,7 @@ again:
+                       mutex_unlock(&u->iolock);
+ 
+                       timeo = unix_stream_data_wait(sk, timeo, last,
+-                                                    last_len);
++                                                    last_len, freezable);
+ 
+                       if (signal_pending(current)) {
+                               err = sock_intr_errno(timeo);
+@@ -2466,7 +2471,7 @@ static int unix_stream_recvmsg(struct socket *sock, 
struct msghdr *msg,
+               .flags = flags
+       };
+ 
+-      return unix_stream_read_generic(&state);
++      return unix_stream_read_generic(&state, true);
+ }
+ 
+ static ssize_t skb_unix_socket_splice(struct sock *sk,
+@@ -2512,7 +2517,7 @@ static ssize_t unix_stream_splice_read(struct socket 
*sock,  loff_t *ppos,
+           flags & SPLICE_F_NONBLOCK)
+               state.flags = MSG_DONTWAIT;
+ 
+-      return unix_stream_read_generic(&state);
++      return unix_stream_read_generic(&state, false);
+ }
+ 
+ static int unix_shutdown(struct socket *sock, int mode)
diff --git a/debian/patches/bugfix/all/stable-4.4.39.patch 
b/debian/patches/bugfix/all/stable-4.4.39.patch
new file mode 100644
index 0000000..5b3ea9d
--- /dev/null
+++ b/debian/patches/bugfix/all/stable-4.4.39.patch
@@ -0,0 +1,513 @@
+diff --git a/Makefile b/Makefile
+index 6876efe0d735..88d26a632bef 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 007a69fc4f40..5f3ab8c1db55 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+               return -EFAULT;
+ 
+       asm volatile("// futex_atomic_cmpxchg_inatomic\n"
++ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+ "     prfm    pstl1strm, %2\n"
+ "1:   ldxr    %w1, %2\n"
+ "     sub     %w3, %w1, %w4\n"
+@@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ "     .align  3\n"
+ "     .quad   1b, 4b, 2b, 4b\n"
+ "     .popsection\n"
++ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
+       : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
+       : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
+       : "memory");
+diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
+index d28fa8fe26fe..c598d847d56b 100644
+--- a/arch/m68k/include/asm/delay.h
++++ b/arch/m68k/include/asm/delay.h
+@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
+  */
+ #define       HZSCALE         (268435456 / (1000000 / HZ))
+ 
+-#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * 
(loops_per_jiffy >> 11)) >> 6), 1000));
++#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * 
(loops_per_jiffy >> 11)) >> 6), 1000))
+ 
+ #endif /* defined(_M68K_DELAY_H) */
+diff --git a/arch/parisc/include/asm/pgtable.h 
b/arch/parisc/include/asm/pgtable.h
+index c2c43f714684..3a4ed9f91d57 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, 
unsigned long addr)
+               unsigned long flags;                            \
+               spin_lock_irqsave(&pa_tlb_lock, flags);         \
+               old_pte = *ptep;                                \
+-              set_pte(ptep, pteval);                          \
+               if (pte_inserted(old_pte))                      \
+                       purge_tlb_entries(mm, addr);            \
++              set_pte(ptep, pteval);                          \
+               spin_unlock_irqrestore(&pa_tlb_lock, flags);    \
+       } while (0)
+ 
+@@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct 
vm_area_struct *vma, unsigned
+               spin_unlock_irqrestore(&pa_tlb_lock, flags);
+               return 0;
+       }
+-      set_pte(ptep, pte_mkold(pte));
+       purge_tlb_entries(vma->vm_mm, addr);
++      set_pte(ptep, pte_mkold(pte));
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
+       return 1;
+ }
+@@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct 
*mm, unsigned long addr,
+ 
+       spin_lock_irqsave(&pa_tlb_lock, flags);
+       old_pte = *ptep;
+-      set_pte(ptep, __pte(0));
+       if (pte_inserted(old_pte))
+               purge_tlb_entries(mm, addr);
++      set_pte(ptep, __pte(0));
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ 
+       return old_pte;
+@@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct 
*mm, unsigned long addr,
+ {
+       unsigned long flags;
+       spin_lock_irqsave(&pa_tlb_lock, flags);
+-      set_pte(ptep, pte_wrprotect(*ptep));
+       purge_tlb_entries(mm, addr);
++      set_pte(ptep, pte_wrprotect(*ptep));
+       spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ }
+ 
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index fd5979f28ada..6857a104b2f9 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -375,6 +375,15 @@ void __init parisc_setup_cache_timing(void)
+ 
+       /* calculate TLB flush threshold */
+ 
++      /* On SMP machines, skip the TLB measure of kernel text which
++       * has been mapped as huge pages. */
++      if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
++              threshold = max(cache_info.it_size, cache_info.dt_size);
++              threshold *= PAGE_SIZE;
++              threshold /= num_online_cpus();
++              goto set_tlb_threshold;
++      }
++
+       alltime = mfctl(16);
+       flush_tlb_all();
+       alltime = mfctl(16) - alltime;
+@@ -393,6 +402,8 @@ void __init parisc_setup_cache_timing(void)
+               alltime, size, rangetime);
+ 
+       threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
++
++set_tlb_threshold:
+       if (threshold)
+               parisc_tlb_flush_threshold = threshold;
+       printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index 675521919229..a4761b772406 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -886,19 +886,10 @@ ENTRY(flush_dcache_page_asm)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+-      cmpb,COND(<<)           %r28, %r25,1b
++      cmpb,COND(<<)   %r28, %r25,1b
+       fdc,m           r31(%r28)
+ 
+       sync
+-
+-#ifdef CONFIG_PA20
+-      pdtlb,l         %r0(%r25)
+-#else
+-      tlb_lock        %r20,%r21,%r22
+-      pdtlb           %r0(%r25)
+-      tlb_unlock      %r20,%r21,%r22
+-#endif
+-
+       bv              %r0(%r2)
+       nop
+       .exit
+@@ -973,17 +964,6 @@ ENTRY(flush_icache_page_asm)
+       fic,m           %r31(%sr4,%r28)
+ 
+       sync
+-
+-#ifdef CONFIG_PA20
+-      pdtlb,l         %r0(%r28)
+-      pitlb,l         %r0(%sr4,%r25)
+-#else
+-      tlb_lock        %r20,%r21,%r22
+-      pdtlb           %r0(%r28)
+-      pitlb           %r0(%sr4,%r25)
+-      tlb_unlock      %r20,%r21,%r22
+-#endif
+-
+       bv              %r0(%r2)
+       nop
+       .exit
+diff --git a/arch/powerpc/kernel/eeh_driver.c 
b/arch/powerpc/kernel/eeh_driver.c
+index c07bfb52275e..300382e5a2cc 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -612,8 +612,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct 
pci_bus *bus)
+ 
+       /* Clear frozen state */
+       rc = eeh_clear_pe_frozen_state(pe, false);
+-      if (rc)
++      if (rc) {
++              pci_unlock_rescan_remove();
+               return rc;
++      }
+ 
+       /* Give the system 5 seconds to finish running the user-space
+        * hotplug shutdown scripts, e.g. ifdown for ethernet.  Yes,
+diff --git a/arch/x86/kernel/cpu/perf_event.c 
b/arch/x86/kernel/cpu/perf_event.c
+index a3aeb2cc361e..1a8256dd6729 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -67,7 +67,7 @@ u64 x86_perf_event_update(struct perf_event *event)
+       int shift = 64 - x86_pmu.cntval_bits;
+       u64 prev_raw_count, new_raw_count;
+       int idx = hwc->idx;
+-      s64 delta;
++      u64 delta;
+ 
+       if (idx == INTEL_PMC_IDX_FIXED_BTS)
+               return 0;
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c 
b/arch/x86/kernel/cpu/perf_event_intel.c
+index 5f82cd59f0e5..5cc2242d77c6 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -3636,7 +3636,7 @@ __init int intel_pmu_init(void)
+ 
+       /* Support full width counters using alternative MSR range */
+       if (x86_pmu.intel_cap.full_width_write) {
+-              x86_pmu.max_period = x86_pmu.cntval_mask;
++              x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
+               x86_pmu.perfctr = MSR_IA32_PMC0;
+               pr_cont("full-width counters, ");
+       }
+diff --git a/crypto/Makefile b/crypto/Makefile
+index f7aba923458d..82fbff180ad3 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
+ 
+ $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
+ $(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
++$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
+ clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
+ clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
+ 
+diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
+index fe5b495a434d..a0ceb41d5ccc 100644
+--- a/crypto/mcryptd.c
++++ b/crypto/mcryptd.c
+@@ -258,18 +258,22 @@ out_free_inst:
+       goto out;
+ }
+ 
+-static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
++static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
+                                         u32 *mask)
+ {
+       struct crypto_attr_type *algt;
+ 
+       algt = crypto_get_attr_type(tb);
+       if (IS_ERR(algt))
+-              return;
+-      if ((algt->type & CRYPTO_ALG_INTERNAL))
+-              *type |= CRYPTO_ALG_INTERNAL;
+-      if ((algt->mask & CRYPTO_ALG_INTERNAL))
+-              *mask |= CRYPTO_ALG_INTERNAL;
++              return false;
++
++      *type |= algt->type & CRYPTO_ALG_INTERNAL;
++      *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
++
++      if (*type & *mask & CRYPTO_ALG_INTERNAL)
++              return true;
++      else
++              return false;
+ }
+ 
+ static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
+@@ -498,7 +502,8 @@ static int mcryptd_create_hash(struct crypto_template 
*tmpl, struct rtattr **tb,
+       u32 mask = 0;
+       int err;
+ 
+-      mcryptd_check_internal(tb, &type, &mask);
++      if (!mcryptd_check_internal(tb, &type, &mask))
++              return -EINVAL;
+ 
+       salg = shash_attr_alg(tb[1], type, mask);
+       if (IS_ERR(salg))
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index 1770c455dfdd..1648de80e230 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -1378,8 +1378,14 @@ static ssize_t hot_remove_store(struct class *class,
+       return ret ? ret : count;
+ }
+ 
++/*
++ * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
++ * sense that reading from this file does alter the state of your system -- it
++ * creates a new un-initialized zram device and returns back this device's
++ * device_id (or an error code if it fails to create a new device).
++ */
+ static struct class_attribute zram_control_class_attrs[] = {
+-      __ATTR_RO(hot_add),
++      __ATTR(hot_add, 0400, hot_add_show, NULL),
+       __ATTR_WO(hot_remove),
+       __ATTR_NULL,
+ };
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 5a2e341a6d1e..91be4575b524 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -872,23 +872,25 @@ lbl_free_candev:
+ static void peak_usb_disconnect(struct usb_interface *intf)
+ {
+       struct peak_usb_device *dev;
++      struct peak_usb_device *dev_prev_siblings;
+ 
+       /* unregister as many netdev devices as siblings */
+-      for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
++      for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
+               struct net_device *netdev = dev->netdev;
+               char name[IFNAMSIZ];
+ 
++              dev_prev_siblings = dev->prev_siblings;
+               dev->state &= ~PCAN_USB_STATE_CONNECTED;
+               strncpy(name, netdev->name, IFNAMSIZ);
+ 
+               unregister_netdev(netdev);
+-              free_candev(netdev);
+ 
+               kfree(dev->cmd_buf);
+               dev->next_siblings = NULL;
+               if (dev->adapter->dev_free)
+                       dev->adapter->dev_free(dev);
+ 
++              free_candev(netdev);
+               dev_info(&intf->dev, "%s removed\n", name);
+       }
+ 
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index d2ca8c38f9c4..3ea9aae2387d 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -131,22 +131,16 @@ enum {
+               { .notifier_call = fn, .priority = pri };       \
+       __register_cpu_notifier(&fn##_nb);                      \
+ }
+-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+-#define __cpu_notifier(fn, pri)       do { (void)(fn); } while (0)
+-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+ 
+-#ifdef CONFIG_HOTPLUG_CPU
+ extern int register_cpu_notifier(struct notifier_block *nb);
+ extern int __register_cpu_notifier(struct notifier_block *nb);
+ extern void unregister_cpu_notifier(struct notifier_block *nb);
+ extern void __unregister_cpu_notifier(struct notifier_block *nb);
+-#else
+ 
+-#ifndef MODULE
+-extern int register_cpu_notifier(struct notifier_block *nb);
+-extern int __register_cpu_notifier(struct notifier_block *nb);
+-#else
++#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
++#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
++#define __cpu_notifier(fn, pri)       do { (void)(fn); } while (0)
++
+ static inline int register_cpu_notifier(struct notifier_block *nb)
+ {
+       return 0;
+@@ -156,7 +150,6 @@ static inline int __register_cpu_notifier(struct 
notifier_block *nb)
+ {
+       return 0;
+ }
+-#endif
+ 
+ static inline void unregister_cpu_notifier(struct notifier_block *nb)
+ {
+diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
+index 9692cda5f8fc..c48d93a28d1a 100644
+--- a/include/uapi/linux/can.h
++++ b/include/uapi/linux/can.h
+@@ -196,5 +196,6 @@ struct can_filter {
+ };
+ 
+ #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
++#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via 
setsockopt() */
+ 
+ #endif /* !_UAPI_CAN_H */
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 85ff5e26e23b..cd6d1258554e 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -223,8 +223,6 @@ static int cpu_notify(unsigned long val, void *v)
+       return __cpu_notify(val, v, -1, NULL);
+ }
+ 
+-#ifdef CONFIG_HOTPLUG_CPU
+-
+ static void cpu_notify_nofail(unsigned long val, void *v)
+ {
+       BUG_ON(cpu_notify(val, v));
+@@ -246,6 +244,7 @@ void __unregister_cpu_notifier(struct notifier_block *nb)
+ }
+ EXPORT_SYMBOL(__unregister_cpu_notifier);
+ 
++#ifdef CONFIG_HOTPLUG_CPU
+ /**
+  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
+  * @cpu: a CPU id
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 8251e75dd9c0..b066724d7a5b 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex 
*lock)
+ 
+ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+ {
+-      if (!rt_mutex_has_waiters(lock))
+-              clear_rt_mutex_waiters(lock);
++      unsigned long owner, *p = (unsigned long *) &lock->owner;
++
++      if (rt_mutex_has_waiters(lock))
++              return;
++
++      /*
++       * The rbtree has no waiters enqueued, now make sure that the
++       * lock->owner still has the waiters bit set, otherwise the
++       * following can happen:
++       *
++       * CPU 0        CPU 1           CPU2
++       * l->owner=T1
++       *              rt_mutex_lock(l)
++       *              lock(l->lock)
++       *              l->owner = T1 | HAS_WAITERS;
++       *              enqueue(T2)
++       *              boost()
++       *                unlock(l->lock)
++       *              block()
++       *
++       *                              rt_mutex_lock(l)
++       *                              lock(l->lock)
++       *                              l->owner = T1 | HAS_WAITERS;
++       *                              enqueue(T3)
++       *                              boost()
++       *                                unlock(l->lock)
++       *                              block()
++       *              signal(->T2)    signal(->T3)
++       *              lock(l->lock)
++       *              dequeue(T2)
++       *              deboost()
++       *                unlock(l->lock)
++       *                              lock(l->lock)
++       *                              dequeue(T3)
++       *                               ==> wait list is empty
++       *                              deboost()
++       *                               unlock(l->lock)
++       *              lock(l->lock)
++       *              fixup_rt_mutex_waiters()
++       *                if (wait_list_empty(l) {
++       *                  l->owner = owner
++       *                  owner = l->owner & ~HAS_WAITERS;
++       *                    ==> l->owner = T1
++       *                }
++       *                              lock(l->lock)
++       * rt_mutex_unlock(l)           fixup_rt_mutex_waiters()
++       *                                if (wait_list_empty(l) {
++       *                                  owner = l->owner & ~HAS_WAITERS;
++       * cmpxchg(l->owner, T1, NULL)
++       *  ===> Success (l->owner = NULL)
++       *
++       *                                  l->owner = owner
++       *                                    ==> l->owner = T1
++       *                                }
++       *
++       * With the check for the waiter bit in place T3 on CPU2 will not
++       * overwrite. All tasks fiddling with the waiters bit are
++       * serialized by l->lock, so nothing else can modify the waiters
++       * bit. If the bit is set then nothing can change l->owner either
++       * so the simple RMW is safe. The cmpxchg() will simply fail if it
++       * happens in the middle of the RMW because the waiters bit is
++       * still set.
++       */
++      owner = READ_ONCE(*p);
++      if (owner & RT_MUTEX_HAS_WAITERS)
++              WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+ 
+ /*
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 4f5f83c7d2d3..e317e1cbb3eb 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
+ 
+ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+ {
+-      return (struct task_struct *)
+-              ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
++      unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
++
++      return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
+ }
+ 
+ /*
+diff --git a/net/batman-adv/translation-table.c 
b/net/batman-adv/translation-table.c
+index 83b0ca27a45e..f2079acb555d 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -2764,7 +2764,7 @@ static bool batadv_send_my_tt_response(struct 
batadv_priv *bat_priv,
+                                                            &tvlv_tt_data,
+                                                            &tt_change,
+                                                            &tt_len);
+-              if (!tt_len)
++              if (!tt_len || !tvlv_len)
+                       goto unlock;
+ 
+               /* Copy the last orig_node's OGM buffer */
+@@ -2782,7 +2782,7 @@ static bool batadv_send_my_tt_response(struct 
batadv_priv *bat_priv,
+                                                            &tvlv_tt_data,
+                                                            &tt_change,
+                                                            &tt_len);
+-              if (!tt_len)
++              if (!tt_len || !tvlv_len)
+                       goto out;
+ 
+               /* fill the rest of the tvlv with the real TT entries */
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 2e67b1423cd3..56af689ca999 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -499,6 +499,9 @@ static int raw_setsockopt(struct socket *sock, int level, 
int optname,
+               if (optlen % sizeof(struct can_filter) != 0)
+                       return -EINVAL;
+ 
++              if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
++                      return -EINVAL;
++
+               count = optlen / sizeof(struct can_filter);
+ 
+               if (count > 1) {
diff --git a/debian/patches/series b/debian/patches/series
index 6af421a..93b8400 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -139,3 +139,4 @@
 bugfix/all/stable-4.4.36.patch
 bugfix/all/stable-4.4.37.patch
 bugfix/all/stable-4.4.38.patch
+bugfix/all/stable-4.4.39.patch

-- 
To view, visit https://gerrit.wikimedia.org/r/328649
To unsubscribe, visit https://gerrit.wikimedia.org/r/settings

Gerrit-MessageType: newchange
Gerrit-Change-Id: Ice952e652eec0d3d4616fddb7ffe6e23c32e3e11
Gerrit-PatchSet: 1
Gerrit-Project: operations/debs/linux44
Gerrit-Branch: master
Gerrit-Owner: Muehlenhoff <mmuhlenh...@wikimedia.org>

_______________________________________________
MediaWiki-commits mailing list
MediaWiki-commits@lists.wikimedia.org
https://lists.wikimedia.org/mailman/listinfo/mediawiki-commits

Reply via email to