commit:     833529f439f2d25ab9197573f88dad1b0544ebb5
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 11 10:14:28 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 11 10:14:28 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=833529f4

Linux patch 4.4.216

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README              |    4 +
 1215_linux-4.4.216.patch | 2919 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2923 insertions(+)

diff --git a/0000_README b/0000_README
index b4dfa87..5c1fd6b 100644
--- a/0000_README
+++ b/0000_README
@@ -903,6 +903,10 @@ Patch:  1214_linux-4.4.215.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.4.215
 
+Patch:  1215_linux-4.4.216.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.4.216
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1215_linux-4.4.216.patch b/1215_linux-4.4.216.patch
new file mode 100644
index 0000000..04d770a
--- /dev/null
+++ b/1215_linux-4.4.216.patch
@@ -0,0 +1,2919 @@
+diff --git a/Makefile b/Makefile
+index 9118ca43acb4..e0bcd5a0ae9b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 4
+-SUBLEVEL = 215
++SUBLEVEL = 216
+ EXTRAVERSION =
+ NAME = Blurry Fish Butt
+ 
+diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
+index fb689d813b09..6358ea48eaf9 100644
+--- a/arch/arm/mach-imx/Makefile
++++ b/arch/arm/mach-imx/Makefile
+@@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
+ obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
+ obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
+ endif
++AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
++obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
+ obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
+ 
+ obj-$(CONFIG_SOC_IMX50) += mach-imx50.o
+diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
+index e2d53839fceb..288f57222745 100644
+--- a/arch/arm/mach-imx/common.h
++++ b/arch/arm/mach-imx/common.h
+@@ -115,17 +115,17 @@ void imx_cpu_die(unsigned int cpu);
+ int imx_cpu_kill(unsigned int cpu);
+ 
+ #ifdef CONFIG_SUSPEND
+-void v7_cpu_resume(void);
+ void imx53_suspend(void __iomem *ocram_vbase);
+ extern const u32 imx53_suspend_sz;
+ void imx6_suspend(void __iomem *ocram_vbase);
+ #else
+-static inline void v7_cpu_resume(void) {}
+ static inline void imx53_suspend(void __iomem *ocram_vbase) {}
+ static const u32 imx53_suspend_sz;
+ static inline void imx6_suspend(void __iomem *ocram_vbase) {}
+ #endif
+ 
++void v7_cpu_resume(void);
++
+ void imx6_pm_ccm_init(const char *ccm_compat);
+ void imx6q_pm_init(void);
+ void imx6dl_pm_init(void);
+diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
+new file mode 100644
+index 000000000000..5bd1ba7ef15b
+--- /dev/null
++++ b/arch/arm/mach-imx/resume-imx6.S
+@@ -0,0 +1,24 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright 2014 Freescale Semiconductor, Inc.
++ */
++
++#include <linux/linkage.h>
++#include <asm/assembler.h>
++#include <asm/asm-offsets.h>
++#include <asm/hardware/cache-l2x0.h>
++#include "hardware.h"
++
++/*
++ * The following code must assume it is running from physical address
++ * where absolute virtual addresses to the data section have to be
++ * turned into relative ones.
++ */
++
++ENTRY(v7_cpu_resume)
++      bl      v7_invalidate_l1
++#ifdef CONFIG_CACHE_L2X0
++      bl      l2c310_early_resume
++#endif
++      b       cpu_resume
++ENDPROC(v7_cpu_resume)
+diff --git a/arch/arm/mach-imx/suspend-imx6.S 
b/arch/arm/mach-imx/suspend-imx6.S
+index 76ee2ceec8d5..7d84b617af48 100644
+--- a/arch/arm/mach-imx/suspend-imx6.S
++++ b/arch/arm/mach-imx/suspend-imx6.S
+@@ -333,17 +333,3 @@ resume:
+ 
+       ret     lr
+ ENDPROC(imx6_suspend)
+-
+-/*
+- * The following code must assume it is running from physical address
+- * where absolute virtual addresses to the data section have to be
+- * turned into relative ones.
+- */
+-
+-ENTRY(v7_cpu_resume)
+-      bl      v7_invalidate_l1
+-#ifdef CONFIG_CACHE_L2X0
+-      bl      l2c310_early_resume
+-#endif
+-      b       cpu_resume
+-ENDPROC(v7_cpu_resume)
+diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
+index 9067b651c7a2..ca93984ff5a6 100644
+--- a/arch/mips/kernel/vpe.c
++++ b/arch/mips/kernel/vpe.c
+@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
+ {
+       list_del(&v->list);
+       if (v->load_addr)
+-              release_progmem(v);
++              release_progmem(v->load_addr);
+       kfree(v);
+ }
+ 
+diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
+index 7d80bfdfb15e..7ad686009795 100644
+--- a/arch/powerpc/kernel/cputable.c
++++ b/arch/powerpc/kernel/cputable.c
+@@ -2147,11 +2147,13 @@ static struct cpu_spec * __init 
setup_cpu_spec(unsigned long offset,
+                * oprofile_cpu_type already has a value, then we are
+                * possibly overriding a real PVR with a logical one,
+                * and, in that case, keep the current value for
+-               * oprofile_cpu_type.
++               * oprofile_cpu_type. Futhermore, let's ensure that the
++               * fix for the PMAO bug is enabled on compatibility mode.
+                */
+               if (old.oprofile_cpu_type != NULL) {
+                       t->oprofile_cpu_type = old.oprofile_cpu_type;
+                       t->oprofile_type = old.oprofile_type;
++                      t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
+               }
+       }
+ 
+diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
+index 7ad41be8b373..4f7dad36b3c1 100644
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -37,7 +37,8 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, 
unsigned long addr,
+                       return 0;
+               VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+               page = pte_page(pte);
+-              if (!page_cache_get_speculative(page))
++              if (WARN_ON_ONCE(page_ref_count(page) < 0)
++                  || !page_cache_get_speculative(page))
+                       return 0;
+               if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+                       put_page(page);
+@@ -76,7 +77,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, 
unsigned long addr,
+               refs++;
+       } while (addr += PAGE_SIZE, addr != end);
+ 
+-      if (!page_cache_add_speculative(head, refs)) {
++      if (WARN_ON_ONCE(page_ref_count(head) < 0)
++          || !page_cache_add_speculative(head, refs)) {
+               *nr -= refs;
+               return 0;
+       }
+diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
+index 7d2542ad346a..6612d532e42e 100644
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -95,7 +95,10 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long 
addr,
+               }
+               VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+               page = pte_page(pte);
+-              get_page(page);
++              if (unlikely(!try_get_page(page))) {
++                      pte_unmap(ptep);
++                      return 0;
++              }
+               SetPageReferenced(page);
+               pages[*nr] = page;
+               (*nr)++;
+@@ -132,6 +135,8 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long 
addr,
+ 
+       refs = 0;
+       head = pmd_page(pmd);
++      if (WARN_ON_ONCE(page_ref_count(head) <= 0))
++              return 0;
+       page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+       do {
+               VM_BUG_ON_PAGE(compound_head(page) != head, page);
+@@ -208,6 +213,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long 
addr,
+ 
+       refs = 0;
+       head = pud_page(pud);
++      if (WARN_ON_ONCE(page_ref_count(head) <= 0))
++              return 0;
+       page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+       do {
+               VM_BUG_ON_PAGE(compound_head(page) != head, page);
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index d12782dc9683..9bd4691cc5c5 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -538,7 +538,7 @@ static int skcipher_recvmsg_async(struct socket *sock, 
struct msghdr *msg,
+       lock_sock(sk);
+       tx_nents = skcipher_all_sg_nents(ctx);
+       sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
+-      if (unlikely(!sreq->tsg))
++      if (unlikely(ZERO_OR_NULL_PTR(sreq->tsg)))
+               goto unlock;
+       sg_init_table(sreq->tsg, tx_nents);
+       memcpy(iv, ctx->iv, ivsize);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 67d23ed2d1a0..29082d99264e 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -742,10 +742,14 @@ static void msg_done_handler(struct ssif_info 
*ssif_info, int result,
+       flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+       msg = ssif_info->curr_msg;
+       if (msg) {
++              if (data) {
++                      if (len > IPMI_MAX_MSG_LENGTH)
++                              len = IPMI_MAX_MSG_LENGTH;
++                      memcpy(msg->rsp, data, len);
++              } else {
++                      len = 0;
++              }
+               msg->rsp_size = len;
+-              if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
+-                      msg->rsp_size = IPMI_MAX_MSG_LENGTH;
+-              memcpy(msg->rsp, data, msg->rsp_size);
+               ssif_info->curr_msg = NULL;
+       }
+ 
+diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
+index 4746fee4cd48..546a91186de1 100644
+--- a/drivers/dma/coh901318.c
++++ b/drivers/dma/coh901318.c
+@@ -1960,8 +1960,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
+               return;
+       }
+ 
+-      spin_lock(&cohc->lock);
+-
+       /*
+        * When we reach this point, at least one queue item
+        * should have been moved over from cohc->queue to
+@@ -1982,8 +1980,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
+       if (coh901318_queue_start(cohc) == NULL)
+               cohc->busy = 0;
+ 
+-      spin_unlock(&cohc->lock);
+-
+       /*
+        * This tasklet will remove items from cohc->active
+        * and thus terminates them.
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index 67f201b8dcda..b5cf5d36de2b 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -285,7 +285,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
+ 
+       /* Do not allocate if desc are waiting for ack */
+       list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
+-              if (async_tx_test_ack(&dma_desc->txd)) {
++              if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
+                       list_del(&dma_desc->node);
+                       spin_unlock_irqrestore(&tdc->lock, flags);
+                       dma_desc->txd.flags = 0;
+@@ -754,10 +754,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
+       bool was_busy;
+ 
+       spin_lock_irqsave(&tdc->lock, flags);
+-      if (list_empty(&tdc->pending_sg_req)) {
+-              spin_unlock_irqrestore(&tdc->lock, flags);
+-              return 0;
+-      }
+ 
+       if (!tdc->busy)
+               goto skip_dma_stop;
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c 
b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+index 0455ff75074a..34220df1265f 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
+@@ -302,7 +302,7 @@ static int dsi_mgr_connector_get_modes(struct 
drm_connector *connector)
+       return num;
+ }
+ 
+-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
++static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector 
*connector,
+                               struct drm_display_mode *mode)
+ {
+       int id = dsi_mgr_connector_get_id(connector);
+@@ -434,6 +434,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge 
*bridge)
+       struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
+       struct mipi_dsi_host *host = msm_dsi->host;
+       struct drm_panel *panel = msm_dsi->panel;
++      struct msm_dsi_pll *src_pll;
+       bool is_dual_dsi = IS_DUAL_DSI();
+       int ret;
+ 
+@@ -467,6 +468,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge 
*bridge)
+                                                               id, ret);
+       }
+ 
++      /* Save PLL status if it is a clock source */
++      src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
++      msm_dsi_pll_save_state(src_pll);
++
+       ret = msm_dsi_host_power_off(host);
+       if (ret)
+               pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 16ff8d3c7cfe..325adbef134c 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1508,7 +1508,9 @@ int hid_report_raw_event(struct hid_device *hid, int 
type, u8 *data, u32 size,
+ 
+       rsize = ((report->size - 1) >> 3) + 1;
+ 
+-      if (rsize > HID_MAX_BUFFER_SIZE)
++      if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
++              rsize = HID_MAX_BUFFER_SIZE - 1;
++      else if (rsize > HID_MAX_BUFFER_SIZE)
+               rsize = HID_MAX_BUFFER_SIZE;
+ 
+       if (csize < rsize) {
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 8903ea09ac58..dbdd265075da 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -962,9 +962,9 @@ void hiddev_disconnect(struct hid_device *hid)
+       hiddev->exist = 0;
+ 
+       if (hiddev->open) {
+-              mutex_unlock(&hiddev->existancelock);
+               usbhid_close(hiddev->hid);
+               wake_up_interruptible(&hiddev->wait);
++              mutex_unlock(&hiddev->existancelock);
+       } else {
+               mutex_unlock(&hiddev->existancelock);
+               kfree(hiddev);
+diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
+index 5929e126da63..d9923d63eb4f 100644
+--- a/drivers/hwmon/adt7462.c
++++ b/drivers/hwmon/adt7462.c
+@@ -426,7 +426,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int 
which)
+                       return 0x95;
+               break;
+       }
+-      return -ENODEV;
++      return 0;
+ }
+ 
+ /* Provide labels for sysfs */
+diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
+index 4b58e8aaf5c5..ba3b94505c14 100644
+--- a/drivers/i2c/busses/i2c-jz4780.c
++++ b/drivers/i2c/busses/i2c-jz4780.c
+@@ -82,25 +82,6 @@
+ #define JZ4780_I2C_STA_TFNF           BIT(1)
+ #define JZ4780_I2C_STA_ACT            BIT(0)
+ 
+-static const char * const jz4780_i2c_abrt_src[] = {
+-      "ABRT_7B_ADDR_NOACK",
+-      "ABRT_10ADDR1_NOACK",
+-      "ABRT_10ADDR2_NOACK",
+-      "ABRT_XDATA_NOACK",
+-      "ABRT_GCALL_NOACK",
+-      "ABRT_GCALL_READ",
+-      "ABRT_HS_ACKD",
+-      "SBYTE_ACKDET",
+-      "ABRT_HS_NORSTRT",
+-      "SBYTE_NORSTRT",
+-      "ABRT_10B_RD_NORSTRT",
+-      "ABRT_MASTER_DIS",
+-      "ARB_LOST",
+-      "SLVFLUSH_TXFIFO",
+-      "SLV_ARBLOST",
+-      "SLVRD_INTX",
+-};
+-
+ #define JZ4780_I2C_INTST_IGC          BIT(11)
+ #define JZ4780_I2C_INTST_ISTT         BIT(10)
+ #define JZ4780_I2C_INTST_ISTP         BIT(9)
+@@ -538,21 +519,8 @@ done:
+ 
+ static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
+ {
+-      int i;
+-
+-      dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
+-      dev_err(&i2c->adap.dev, "device addr=%x\n",
+-              jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
+-      dev_err(&i2c->adap.dev, "send cmd count:%d  %d\n",
+-              i2c->cmd, i2c->cmd_buf[i2c->cmd]);
+-      dev_err(&i2c->adap.dev, "receive data count:%d  %d\n",
+-              i2c->cmd, i2c->data_buf[i2c->cmd]);
+-
+-      for (i = 0; i < 16; i++) {
+-              if (src & BIT(i))
+-                      dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
+-                              i, jz4780_i2c_abrt_src[i]);
+-      }
++      dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
++              src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
+ }
+ 
+ static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 941cd9b83941..53c622c99ee4 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1073,6 +1073,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device 
*device,
+                       /* Sharing an ib_cm_id with different handlers is not
+                        * supported */
+                       spin_unlock_irqrestore(&cm.lock, flags);
++                      ib_destroy_cm_id(cm_id);
+                       return ERR_PTR(-EINVAL);
+               }
+               atomic_inc(&cm_id_priv->refcount);
+diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
+index ff9163dc1596..c4b779cc3b94 100644
+--- a/drivers/infiniband/core/iwcm.c
++++ b/drivers/infiniband/core/iwcm.c
+@@ -125,8 +125,10 @@ static void dealloc_work_entries(struct iwcm_id_private 
*cm_id_priv)
+ {
+       struct list_head *e, *tmp;
+ 
+-      list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
++      list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
++              list_del(e);
+               kfree(list_entry(e, struct iwcm_work, free_list));
++      }
+ }
+ 
+ static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 531d6f3a786e..c95139fea15c 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -2193,8 +2193,8 @@ static void wait_for_migrations(struct cache *cache)
+ 
+ static void stop_worker(struct cache *cache)
+ {
+-      cancel_delayed_work(&cache->waker);
+-      flush_workqueue(cache->wq);
++      cancel_delayed_work_sync(&cache->waker);
++      drain_workqueue(cache->wq);
+ }
+ 
+ static void requeue_deferred_cells(struct cache *cache)
+diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c 
b/drivers/net/ethernet/micrel/ks8851_mll.c
+index 8dc1f0277117..d94e151cff12 100644
+--- a/drivers/net/ethernet/micrel/ks8851_mll.c
++++ b/drivers/net/ethernet/micrel/ks8851_mll.c
+@@ -474,24 +474,6 @@ static int msg_enable;
+  * chip is busy transferring packet data (RX/TX FIFO accesses).
+  */
+ 
+-/**
+- * ks_rdreg8 - read 8 bit register from device
+- * @ks          : The chip information
+- * @offset: The register address
+- *
+- * Read a 8bit register from the chip, returning the result
+- */
+-static u8 ks_rdreg8(struct ks_net *ks, int offset)
+-{
+-      u16 data;
+-      u8 shift_bit = offset & 0x03;
+-      u8 shift_data = (offset & 1) << 3;
+-      ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
+-      iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+-      data  = ioread16(ks->hw_addr);
+-      return (u8)(data >> shift_data);
+-}
+-
+ /**
+  * ks_rdreg16 - read 16 bit register from device
+  * @ks          : The chip information
+@@ -502,27 +484,11 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
+ 
+ static u16 ks_rdreg16(struct ks_net *ks, int offset)
+ {
+-      ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
++      ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+       return ioread16(ks->hw_addr);
+ }
+ 
+-/**
+- * ks_wrreg8 - write 8bit register value to chip
+- * @ks: The chip information
+- * @offset: The register address
+- * @value: The value to write
+- *
+- */
+-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
+-{
+-      u8  shift_bit = (offset & 0x03);
+-      u16 value_write = (u16)(value << ((offset & 1) << 3));
+-      ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
+-      iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+-      iowrite16(value_write, ks->hw_addr);
+-}
+-
+ /**
+  * ks_wrreg16 - write 16bit register value to chip
+  * @ks: The chip information
+@@ -533,7 +499,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 
value)
+ 
+ static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
+ {
+-      ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
++      ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+       iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
+       iowrite16(value, ks->hw_addr);
+ }
+@@ -549,7 +515,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, 
u32 len)
+ {
+       len >>= 1;
+       while (len--)
+-              *wptr++ = (u16)ioread16(ks->hw_addr);
++              *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
+ }
+ 
+ /**
+@@ -563,7 +529,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, 
u32 len)
+ {
+       len >>= 1;
+       while (len--)
+-              iowrite16(*wptr++, ks->hw_addr);
++              iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
+ }
+ 
+ static void ks_disable_int(struct ks_net *ks)
+@@ -642,8 +608,7 @@ static void ks_read_config(struct ks_net *ks)
+       u16 reg_data = 0;
+ 
+       /* Regardless of bus width, 8 bit read should always work.*/
+-      reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
+-      reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
++      reg_data = ks_rdreg16(ks, KS_CCR);
+ 
+       /* addr/data bus are multiplexed */
+       ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
+@@ -747,7 +712,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 
*buf, u32 len)
+ 
+       /* 1. set sudo DMA mode */
+       ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
+-      ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
++      ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+ 
+       /* 2. read prepend data */
+       /**
+@@ -764,7 +729,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 
*buf, u32 len)
+       ks_inblk(ks, buf, ALIGN(len, 4));
+ 
+       /* 4. reset sudo DMA Mode */
+-      ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
++      ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+ }
+ 
+ /**
+@@ -997,13 +962,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, 
u16 len)
+       ks->txh.txw[1] = cpu_to_le16(len);
+ 
+       /* 1. set sudo-DMA mode */
+-      ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
++      ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
+       /* 2. write status/lenth info */
+       ks_outblk(ks, ks->txh.txw, 4);
+       /* 3. write pkt data */
+       ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
+       /* 4. reset sudo-DMA mode */
+-      ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
++      ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+       /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
+       ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
+       /* 6. wait until TXQCR_METFE is auto-cleared */
+diff --git a/drivers/net/phy/mdio-bcm-iproc.c 
b/drivers/net/phy/mdio-bcm-iproc.c
+index 46fe1ae919a3..51ce3ea17fb3 100644
+--- a/drivers/net/phy/mdio-bcm-iproc.c
++++ b/drivers/net/phy/mdio-bcm-iproc.c
+@@ -188,6 +188,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
+       return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
++int iproc_mdio_resume(struct device *dev)
++{
++      struct platform_device *pdev = to_platform_device(dev);
++      struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
++
++      /* restore the mii clock configuration */
++      iproc_mdio_config_clk(priv->base);
++
++      return 0;
++}
++
++static const struct dev_pm_ops iproc_mdio_pm_ops = {
++      .resume = iproc_mdio_resume
++};
++#endif /* CONFIG_PM_SLEEP */
++
+ static const struct of_device_id iproc_mdio_of_match[] = {
+       { .compatible = "brcm,iproc-mdio", },
+       { /* sentinel */ },
+@@ -198,6 +215,9 @@ static struct platform_driver iproc_mdio_driver = {
+       .driver = {
+               .name = "iproc-mdio",
+               .of_match_table = iproc_mdio_of_match,
++#ifdef CONFIG_PM_SLEEP
++              .pm = &iproc_mdio_pm_ops,
++#endif
+       },
+       .probe = iproc_mdio_probe,
+       .remove = iproc_mdio_remove,
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index 0f8d5609ed51..d4a33baa33b6 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -868,7 +868,6 @@ err_free_chan:
+       tty->disc_data = NULL;
+       clear_bit(SLF_INUSE, &sl->flags);
+       sl_free_netdev(sl->dev);
+-      free_netdev(sl->dev);
+ 
+ err_exit:
+       rtnl_unlock();
+diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c 
b/drivers/net/wireless/iwlwifi/pcie/rx.c
+index d6f9858ff2de..7fdb3ad9f53d 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
+@@ -708,9 +708,13 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
+               if (err)
+                       return err;
+       }
+-      if (!rba->alloc_wq)
++      if (!rba->alloc_wq) {
+               rba->alloc_wq = alloc_workqueue("rb_allocator",
+                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
++              if (!rba->alloc_wq)
++                      return -ENOMEM;
++      }
++
+       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+ 
+       cancel_work_sync(&rba->rx_alloc);
+diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
+index fa75c53f3fa5..22522edb6341 100644
+--- a/drivers/nfc/pn544/i2c.c
++++ b/drivers/nfc/pn544/i2c.c
+@@ -241,6 +241,7 @@ static void pn544_hci_i2c_platform_init(struct 
pn544_i2c_phy *phy)
+ 
+ out:
+       gpio_set_value_cansleep(phy->gpio_en, !phy->en_polarity);
++      usleep_range(10000, 15000);
+ }
+ 
+ static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
+diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
+index 20314aad7ab7..f329459cadf1 100644
+--- a/drivers/s390/cio/blacklist.c
++++ b/drivers/s390/cio/blacklist.c
+@@ -303,8 +303,10 @@ static void *
+ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
+ {
+       struct ccwdev_iter *iter;
++      loff_t p = *offset;
+ 
+-      if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
++      (*offset)++;
++      if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+               return NULL;
+       iter = it;
+       if (iter->devno == __MAX_SUBCHANNEL) {
+@@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, 
loff_t *offset)
+                       return NULL;
+       } else
+               iter->devno++;
+-      (*offset)++;
+       return iter;
+ }
+ 
+diff --git a/drivers/tty/serial/ar933x_uart.c 
b/drivers/tty/serial/ar933x_uart.c
+index 1519d2ca7705..40194791cde0 100644
+--- a/drivers/tty/serial/ar933x_uart.c
++++ b/drivers/tty/serial/ar933x_uart.c
+@@ -294,6 +294,10 @@ static void ar933x_uart_set_termios(struct uart_port 
*port,
+       ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+                           AR933X_UART_CS_HOST_INT_EN);
+ 
++      /* enable RX and TX ready overide */
++      ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
++              AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
++
+       /* reenable the UART */
+       ar933x_uart_rmw(up, AR933X_UART_CS_REG,
+                       AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
+@@ -426,6 +430,10 @@ static int ar933x_uart_startup(struct uart_port *port)
+       ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+                           AR933X_UART_CS_HOST_INT_EN);
+ 
++      /* enable RX and TX ready overide */
++      ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
++              AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
++
+       /* Enable RX interrupts */
+       up->ier = AR933X_UART_INT_RX_VALID;
+       ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index ed27fda13387..def99f020d82 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -542,7 +542,6 @@ void __handle_sysrq(int key, bool check_mask)
+        */
+       orig_log_level = console_loglevel;
+       console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+-      pr_info("SysRq : ");
+ 
+         op_p = __sysrq_get_key_op(key);
+         if (op_p) {
+@@ -551,14 +550,15 @@ void __handle_sysrq(int key, bool check_mask)
+                * should not) and is the invoked operation enabled?
+                */
+               if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
+-                      pr_cont("%s\n", op_p->action_msg);
++                      pr_info("%s\n", op_p->action_msg);
+                       console_loglevel = orig_log_level;
+                       op_p->handler(key);
+               } else {
+-                      pr_cont("This sysrq operation is disabled.\n");
++                      pr_info("This sysrq operation is disabled.\n");
++                      console_loglevel = orig_log_level;
+               }
+       } else {
+-              pr_cont("HELP : ");
++              pr_info("HELP : ");
+               /* Only print the help msg once per handler */
+               for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
+                       if (sysrq_key_table[i]) {
+diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
+index 381a2b13682c..9dae02ce4092 100644
+--- a/drivers/tty/vt/selection.c
++++ b/drivers/tty/vt/selection.c
+@@ -13,6 +13,7 @@
+ #include <linux/tty.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
++#include <linux/mutex.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+ 
+@@ -40,6 +41,7 @@ static volatile int sel_start = -1;  /* cleared by 
clear_selection */
+ static int sel_end;
+ static int sel_buffer_lth;
+ static char *sel_buffer;
++static DEFINE_MUTEX(sel_lock);
+ 
+ /* clear_selection, highlight and highlight_pointer can be called
+    from interrupt (via scrollback/front) */
+@@ -156,14 +158,14 @@ static int store_utf8(u16 c, char *p)
+  *    The entire selection process is managed under the console_lock. It's
+  *     a lot under the lock but its hardly a performance path
+  */
+-int set_selection(const struct tiocl_selection __user *sel, struct tty_struct 
*tty)
++static int __set_selection(const struct tiocl_selection __user *sel, struct 
tty_struct *tty)
+ {
+       struct vc_data *vc = vc_cons[fg_console].d;
+       int sel_mode, new_sel_start, new_sel_end, spc;
+       char *bp, *obp;
+       int i, ps, pe, multiplier;
+       u16 c;
+-      int mode;
++      int mode, ret = 0;
+ 
+       poke_blanked_console();
+ 
+@@ -324,7 +326,21 @@ int set_selection(const struct tiocl_selection __user 
*sel, struct tty_struct *t
+               }
+       }
+       sel_buffer_lth = bp - sel_buffer;
+-      return 0;
++
++      return ret;
++}
++
++int set_selection(const struct tiocl_selection __user *v, struct tty_struct 
*tty)
++{
++      int ret;
++
++      mutex_lock(&sel_lock);
++      console_lock();
++      ret = __set_selection(v, tty);
++      console_unlock();
++      mutex_unlock(&sel_lock);
++
++      return ret;
+ }
+ 
+ /* Insert the contents of the selection buffer into the
+@@ -350,6 +366,7 @@ int paste_selection(struct tty_struct *tty)
+       tty_buffer_lock_exclusive(&vc->port);
+ 
+       add_wait_queue(&vc->paste_wait, &wait);
++      mutex_lock(&sel_lock);
+       while (sel_buffer && sel_buffer_lth > pasted) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (test_bit(TTY_THROTTLED, &tty->flags)) {
+@@ -362,6 +379,7 @@ int paste_selection(struct tty_struct *tty)
+                                             count);
+               pasted += count;
+       }
++      mutex_unlock(&sel_lock);
+       remove_wait_queue(&vc->paste_wait, &wait);
+       __set_current_state(TASK_RUNNING);
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 6779f733bb83..5b8b6ebebf3e 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -2687,9 +2687,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+       switch (type)
+       {
+               case TIOCL_SETSEL:
+-                      console_lock();
+                       ret = set_selection((struct tiocl_selection __user 
*)(p+1), tty);
+-                      console_unlock();
+                       break;
+               case TIOCL_PASTESEL:
+                       ret = paste_selection(tty);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 4c302424c97a..963dd8a4b540 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -938,13 +938,17 @@ int usb_remove_device(struct usb_device *udev)
+ {
+       struct usb_hub *hub;
+       struct usb_interface *intf;
++      int ret;
+ 
+       if (!udev->parent)      /* Can't remove a root hub */
+               return -EINVAL;
+       hub = usb_hub_to_struct_hub(udev->parent);
+       intf = to_usb_interface(hub->intfdev);
+ 
+-      usb_autopm_get_interface(intf);
++      ret = usb_autopm_get_interface(intf);
++      if (ret < 0)
++              return ret;
++
+       set_bit(udev->portnum, hub->removed_bits);
+       hub_port_logical_disconnect(hub, udev->portnum);
+       usb_autopm_put_interface(intf);
+diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
+index 5487fe308f01..1235e678184b 100644
+--- a/drivers/usb/core/port.c
++++ b/drivers/usb/core/port.c
+@@ -98,7 +98,10 @@ static int usb_port_runtime_resume(struct device *dev)
+       if (!port_dev->is_superspeed && peer)
+               pm_runtime_get_sync(&peer->dev);
+ 
+-      usb_autopm_get_interface(intf);
++      retval = usb_autopm_get_interface(intf);
++      if (retval < 0)
++              return retval;
++
+       retval = usb_hub_set_port_power(hdev, hub, port1, true);
+       msleep(hub_power_on_good_delay(hub));
+       if (udev && !retval) {
+@@ -151,7 +154,10 @@ static int usb_port_runtime_suspend(struct device *dev)
+       if (usb_port_block_power_off)
+               return -EBUSY;
+ 
+-      usb_autopm_get_interface(intf);
++      retval = usb_autopm_get_interface(intf);
++      if (retval < 0)
++              return retval;
++
+       retval = usb_hub_set_port_power(hdev, hub, port1, false);
+       usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
+       if (!port_dev->is_superspeed)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index ad8307140df8..64c03e871f2d 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -86,6 +86,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+       /* Logitech PTZ Pro Camera */
+       { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++      /* Logitech Screen Share */
++      { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
++
+       /* Logitech Quickcam Fusion */
+       { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/gadget/function/f_fs.c 
b/drivers/usb/gadget/function/f_fs.c
+index 4cb1355271ec..9536c409a90d 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -888,18 +888,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
+ {
+       struct ffs_io_data *io_data = kiocb->private;
+       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
++      unsigned long flags;
+       int value;
+ 
+       ENTER();
+ 
+-      spin_lock_irq(&epfile->ffs->eps_lock);
++      spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
+ 
+       if (likely(io_data && io_data->ep && io_data->req))
+               value = usb_ep_dequeue(io_data->ep, io_data->req);
+       else
+               value = -EINVAL;
+ 
+-      spin_unlock_irq(&epfile->ffs->eps_lock);
++      spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
+ 
+       return value;
+ }
+diff --git a/drivers/usb/gadget/function/u_serial.c 
b/drivers/usb/gadget/function/u_serial.c
+index 31e08bb3cb41..58a699cfa458 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -701,8 +701,10 @@ static int gs_start_io(struct gs_port *port)
+       port->n_read = 0;
+       started = gs_start_rx(port);
+ 
+-      /* unblock any pending writes into our circular buffer */
+       if (started) {
++              gs_start_tx(port);
++              /* Unblock any pending writes into our circular buffer, in case
++               * we didn't in gs_start_tx() */
+               tty_wakeup(port->port.tty);
+       } else {
+               gs_free_requests(ep, head, &port->read_allocated);
+diff --git a/drivers/usb/storage/unusual_devs.h 
b/drivers/usb/storage/unusual_devs.h
+index a98259e136dd..2e0a8088ffe2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1206,6 +1206,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
+               USB_SC_RBC, USB_PR_BULK, NULL,
+               0 ),
+ 
++UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
++              "Samsung",
++              "Flash Drive FIT",
++              USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++              US_FL_MAX_SECTORS_64),
++
+ /* aeb */
+ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
+               "Feiya",
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 598ec7545e84..e5231dd55e6e 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -1330,6 +1330,9 @@ static int vgacon_font_get(struct vc_data *c, struct 
console_font *font)
+ static int vgacon_resize(struct vc_data *c, unsigned int width,
+                        unsigned int height, unsigned int user)
+ {
++      if ((width << 1) * height > vga_vram_size)
++              return -EINVAL;
++
+       if (width % 2 || width > screen_info.orig_video_cols ||
+           height > (screen_info.orig_video_lines * vga_default_font_height)/
+           c->vc_font.height)
+diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
+index 7386111220d5..daeb645fcea8 100644
+--- a/drivers/watchdog/da9062_wdt.c
++++ b/drivers/watchdog/da9062_wdt.c
+@@ -126,13 +126,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
+       struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+       int ret;
+ 
+-      ret = da9062_reset_watchdog_timer(wdt);
+-      if (ret) {
+-              dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = 
%d)\n",
+-                      ret);
+-              return ret;
+-      }
+-
+       ret = regmap_update_bits(wdt->hw->regmap,
+                                DA9062AA_CONTROL_D,
+                                DA9062AA_TWDSCALE_MASK,
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index 3f93125916bf..f5b87a8f75c4 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -480,7 +480,7 @@ static void access_flags_to_mode(__le32 ace_flags, int 
type, umode_t *pmode,
+                       ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
+               *pmode |= (S_IXUGO & (*pbits_to_set));
+ 
+-      cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
++      cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
+       return;
+ }
+ 
+@@ -509,7 +509,7 @@ static void mode_to_access_flags(umode_t mode, umode_t 
bits_to_use,
+       if (mode & S_IXUGO)
+               *pace_flags |= SET_FILE_EXEC_RIGHTS;
+ 
+-      cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
++      cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
+                mode, *pace_flags);
+       return;
+ }
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 4bde8acca455..cf104bbe30a1 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3402,7 +3402,7 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+       cifs_sb->mnt_gid = pvolume_info->linux_gid;
+       cifs_sb->mnt_file_mode = pvolume_info->file_mode;
+       cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
+-      cifs_dbg(FYI, "file mode: 0x%hx  dir mode: 0x%hx\n",
++      cifs_dbg(FYI, "file mode: %04ho  dir mode: %04ho\n",
+                cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
+ 
+       cifs_sb->actimeo = pvolume_info->actimeo;
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 0a219545940d..c18c26a78453 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1540,7 +1540,7 @@ int cifs_mkdir(struct inode *inode, struct dentry 
*direntry, umode_t mode)
+       struct TCP_Server_Info *server;
+       char *full_path;
+ 
+-      cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
++      cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
+                mode, inode);
+ 
+       cifs_sb = CIFS_SB(inode->i_sb);
+@@ -1957,6 +1957,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+       struct inode *inode = d_inode(dentry);
+       struct super_block *sb = dentry->d_sb;
+       char *full_path = NULL;
++      int count = 0;
+ 
+       if (inode == NULL)
+               return -ENOENT;
+@@ -1978,15 +1979,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
+                full_path, inode, inode->i_count.counter,
+                dentry, dentry->d_time, jiffies);
+ 
++again:
+       if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
+               rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+       else
+               rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
+                                        xid, NULL);
+-
++      if (rc == -EAGAIN && count++ < 10)
++              goto again;
+ out:
+       kfree(full_path);
+       free_xid(xid);
++
+       return rc;
+ }
+ 
+diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
+index b272b778aa85..37920394c64c 100644
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -1280,7 +1280,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat 
*crypt_stat,
+               printk(KERN_ERR "Enter w/ first byte != 0x%.2x\n",
+                      ECRYPTFS_TAG_1_PACKET_TYPE);
+               rc = -EINVAL;
+-              goto out_free;
++              goto out;
+       }
+       /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
+        * at end of function upon failure */
+@@ -1330,7 +1330,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat 
*crypt_stat,
+               printk(KERN_WARNING "Tag 1 packet contains key larger "
+                      "than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES");
+               rc = -EINVAL;
+-              goto out;
++              goto out_free;
+       }
+       memcpy((*new_auth_tok)->session_key.encrypted_key,
+              &data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index e0fb7cdcee89..b041a215cd73 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -279,6 +279,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct 
super_block *sb,
+       ext4_group_t ngroups = ext4_get_groups_count(sb);
+       struct ext4_group_desc *desc;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
++      struct buffer_head *bh_p;
+ 
+       if (block_group >= ngroups) {
+               ext4_error(sb, "block_group >= groups_count - block_group = %u,"
+@@ -289,7 +290,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct 
super_block *sb,
+ 
+       group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+       offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+-      if (!sbi->s_group_desc[group_desc]) {
++      bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
++      /*
++       * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
++       * the pointer being dereferenced won't be dereferenced again. By
++       * looking at the usage in add_new_gdb() the value isn't modified,
++       * just the pointer, and so it remains valid.
++       */
++      if (!bh_p) {
+               ext4_error(sb, "Group descriptor not loaded - "
+                          "block_group = %u, group_desc = %u, desc = %u",
+                          block_group, group_desc, offset);
+@@ -297,10 +305,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct 
super_block *sb,
+       }
+ 
+       desc = (struct ext4_group_desc *)(
+-              (__u8 *)sbi->s_group_desc[group_desc]->b_data +
++              (__u8 *)bh_p->b_data +
+               offset * EXT4_DESC_SIZE(sb));
+       if (bh)
+-              *bh = sbi->s_group_desc[group_desc];
++              *bh = bh_p;
+       return desc;
+ }
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 00ab96311487..ab0f08c89d5f 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1303,7 +1303,7 @@ struct ext4_sb_info {
+       loff_t s_bitmap_maxbytes;       /* max bytes for bitmap files */
+       struct buffer_head * s_sbh;     /* Buffer containing the super block */
+       struct ext4_super_block *s_es;  /* Pointer to the super block in the 
buffer */
+-      struct buffer_head **s_group_desc;
++      struct buffer_head * __rcu *s_group_desc;
+       unsigned int s_mount_opt;
+       unsigned int s_mount_opt2;
+       unsigned int s_mount_flags;
+@@ -1363,7 +1363,7 @@ struct ext4_sb_info {
+ #endif
+ 
+       /* for buddy allocator */
+-      struct ext4_group_info ***s_group_info;
++      struct ext4_group_info ** __rcu *s_group_info;
+       struct inode *s_buddy_cache;
+       spinlock_t s_md_lock;
+       unsigned short *s_mb_offsets;
+@@ -1410,7 +1410,7 @@ struct ext4_sb_info {
+       unsigned int s_extent_max_zeroout_kb;
+ 
+       unsigned int s_log_groups_per_flex;
+-      struct flex_groups *s_flex_groups;
++      struct flex_groups * __rcu *s_flex_groups;
+       ext4_group_t s_flex_groups_allocated;
+ 
+       /* workqueue for reserved extent conversions (buffered io) */
+@@ -1491,6 +1491,23 @@ static inline void ext4_inode_aio_set(struct inode 
*inode, ext4_io_end_t *io)
+       inode->i_private = io;
+ }
+ 
++/*
++ * Returns: sbi->field[index]
++ * Used to access an array element from the following sbi fields which require
++ * rcu protection to avoid dereferencing an invalid pointer due to 
reassignment
++ * - s_group_desc
++ * - s_group_info
++ * - s_flex_group
++ */
++#define sbi_array_rcu_deref(sbi, field, index)                                
   \
++({                                                                       \
++      typeof(*((sbi)->field)) _v;                                        \
++      rcu_read_lock();                                                   \
++      _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index];          \
++      rcu_read_unlock();                                                 \
++      _v;                                                                \
++})
++
+ /*
+  * Inode dynamic state flags
+  */
+@@ -2555,6 +2572,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
+ extern int ext4_empty_dir(struct inode *inode);
+ 
+ /* resize.c */
++extern void ext4_kvfree_array_rcu(void *to_free);
+ extern int ext4_group_add(struct super_block *sb,
+                               struct ext4_new_group_data *input);
+ extern int ext4_group_extend(struct super_block *sb,
+@@ -2795,13 +2813,13 @@ static inline
+ struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+                                           ext4_group_t group)
+ {
+-       struct ext4_group_info ***grp_info;
++       struct ext4_group_info **grp_info;
+        long indexv, indexh;
+        BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
+-       grp_info = EXT4_SB(sb)->s_group_info;
+        indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+        indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+-       return grp_info[indexv][indexh];
++       grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
++       return grp_info[indexh];
+ }
+ 
+ /*
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 0963213e9cd3..c31b05f0bd69 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -331,11 +331,13 @@ void ext4_free_inode(handle_t *handle, struct inode 
*inode)
+ 
+       percpu_counter_inc(&sbi->s_freeinodes_counter);
+       if (sbi->s_log_groups_per_flex) {
+-              ext4_group_t f = ext4_flex_group(sbi, block_group);
++              struct flex_groups *fg;
+ 
+-              atomic_inc(&sbi->s_flex_groups[f].free_inodes);
++              fg = sbi_array_rcu_deref(sbi, s_flex_groups,
++                                       ext4_flex_group(sbi, block_group));
++              atomic_inc(&fg->free_inodes);
+               if (is_directory)
+-                      atomic_dec(&sbi->s_flex_groups[f].used_dirs);
++                      atomic_dec(&fg->used_dirs);
+       }
+       BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
+       fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
+@@ -376,12 +378,13 @@ static void get_orlov_stats(struct super_block *sb, 
ext4_group_t g,
+                           int flex_size, struct orlov_stats *stats)
+ {
+       struct ext4_group_desc *desc;
+-      struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
+ 
+       if (flex_size > 1) {
+-              stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+-              stats->free_clusters = 
atomic64_read(&flex_group[g].free_clusters);
+-              stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
++              struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
++                                                           s_flex_groups, g);
++              stats->free_inodes = atomic_read(&fg->free_inodes);
++              stats->free_clusters = atomic64_read(&fg->free_clusters);
++              stats->used_dirs = atomic_read(&fg->used_dirs);
+               return;
+       }
+ 
+@@ -981,7 +984,8 @@ got:
+               if (sbi->s_log_groups_per_flex) {
+                       ext4_group_t f = ext4_flex_group(sbi, group);
+ 
+-                      atomic_inc(&sbi->s_flex_groups[f].used_dirs);
++                      atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
++                                                      f)->used_dirs);
+               }
+       }
+       if (ext4_has_group_desc_csum(sb)) {
+@@ -1004,7 +1008,8 @@ got:
+ 
+       if (sbi->s_log_groups_per_flex) {
+               flex_group = ext4_flex_group(sbi, group);
+-              atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
++              atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
++                                              flex_group)->free_inodes);
+       }
+ 
+       inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 828b4c080c38..fda49f4c5a8e 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2378,7 +2378,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, 
ext4_group_t ngroups)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       unsigned size;
+-      struct ext4_group_info ***new_groupinfo;
++      struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
+ 
+       size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
+               EXT4_DESC_PER_BLOCK_BITS(sb);
+@@ -2391,13 +2391,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, 
ext4_group_t ngroups)
+               ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
+               return -ENOMEM;
+       }
+-      if (sbi->s_group_info) {
+-              memcpy(new_groupinfo, sbi->s_group_info,
++      rcu_read_lock();
++      old_groupinfo = rcu_dereference(sbi->s_group_info);
++      if (old_groupinfo)
++              memcpy(new_groupinfo, old_groupinfo,
+                      sbi->s_group_info_size * sizeof(*sbi->s_group_info));
+-              kvfree(sbi->s_group_info);
+-      }
+-      sbi->s_group_info = new_groupinfo;
++      rcu_read_unlock();
++      rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
+       sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
++      if (old_groupinfo)
++              ext4_kvfree_array_rcu(old_groupinfo);
+       ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 
+                  sbi->s_group_info_size);
+       return 0;
+@@ -2409,6 +2412,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, 
ext4_group_t group,
+ {
+       int i;
+       int metalen = 0;
++      int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_group_info **meta_group_info;
+       struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+@@ -2427,12 +2431,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, 
ext4_group_t group,
+                                "for a buddy group");
+                       goto exit_meta_group_info;
+               }
+-              sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
+-                      meta_group_info;
++              rcu_read_lock();
++              rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
++              rcu_read_unlock();
+       }
+ 
+-      meta_group_info =
+-              sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
++      meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
+       i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+ 
+       meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
+@@ -2480,8 +2484,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, 
ext4_group_t group,
+ exit_group_info:
+       /* If a meta_group_info table has been allocated, release it now */
+       if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
+-              kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+-              sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
++              struct ext4_group_info ***group_info;
++
++              rcu_read_lock();
++              group_info = rcu_dereference(sbi->s_group_info);
++              kfree(group_info[idx]);
++              group_info[idx] = NULL;
++              rcu_read_unlock();
+       }
+ exit_meta_group_info:
+       return -ENOMEM;
+@@ -2494,6 +2503,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       int err;
+       struct ext4_group_desc *desc;
++      struct ext4_group_info ***group_info;
+       struct kmem_cache *cachep;
+ 
+       err = ext4_mb_alloc_groupinfo(sb, ngroups);
+@@ -2528,11 +2538,16 @@ err_freebuddy:
+       while (i-- > 0)
+               kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+       i = sbi->s_group_info_size;
++      rcu_read_lock();
++      group_info = rcu_dereference(sbi->s_group_info);
+       while (i-- > 0)
+-              kfree(sbi->s_group_info[i]);
++              kfree(group_info[i]);
++      rcu_read_unlock();
+       iput(sbi->s_buddy_cache);
+ err_freesgi:
+-      kvfree(sbi->s_group_info);
++      rcu_read_lock();
++      kvfree(rcu_dereference(sbi->s_group_info));
++      rcu_read_unlock();
+       return -ENOMEM;
+ }
+ 
+@@ -2720,7 +2735,7 @@ int ext4_mb_release(struct super_block *sb)
+       ext4_group_t ngroups = ext4_get_groups_count(sb);
+       ext4_group_t i;
+       int num_meta_group_infos;
+-      struct ext4_group_info *grinfo;
++      struct ext4_group_info *grinfo, ***group_info;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+ 
+@@ -2738,9 +2753,12 @@ int ext4_mb_release(struct super_block *sb)
+               num_meta_group_infos = (ngroups +
+                               EXT4_DESC_PER_BLOCK(sb) - 1) >>
+                       EXT4_DESC_PER_BLOCK_BITS(sb);
++              rcu_read_lock();
++              group_info = rcu_dereference(sbi->s_group_info);
+               for (i = 0; i < num_meta_group_infos; i++)
+-                      kfree(sbi->s_group_info[i]);
+-              kvfree(sbi->s_group_info);
++                      kfree(group_info[i]);
++              kvfree(group_info);
++              rcu_read_unlock();
+       }
+       kfree(sbi->s_mb_offsets);
+       kfree(sbi->s_mb_maxs);
+@@ -2995,7 +3013,8 @@ ext4_mb_mark_diskspace_used(struct 
ext4_allocation_context *ac,
+               ext4_group_t flex_group = ext4_flex_group(sbi,
+                                                         ac->ac_b_ex.fe_group);
+               atomic64_sub(ac->ac_b_ex.fe_len,
+-                           &sbi->s_flex_groups[flex_group].free_clusters);
++                           &sbi_array_rcu_deref(sbi, s_flex_groups,
++                                                flex_group)->free_clusters);
+       }
+ 
+       err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+@@ -4887,7 +4906,8 @@ do_more:
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+               atomic64_add(count_clusters,
+-                           &sbi->s_flex_groups[flex_group].free_clusters);
++                           &sbi_array_rcu_deref(sbi, s_flex_groups,
++                                                flex_group)->free_clusters);
+       }
+ 
+       if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+@@ -5032,7 +5052,8 @@ int ext4_group_add_blocks(handle_t *handle, struct 
super_block *sb,
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+               atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
+-                           &sbi->s_flex_groups[flex_group].free_clusters);
++                           &sbi_array_rcu_deref(sbi, s_flex_groups,
++                                                flex_group)->free_clusters);
+       }
+ 
+       ext4_mb_unload_buddy(&e4b);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 5223eb25bf59..f5b6667b0ab0 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -16,6 +16,33 @@
+ 
+ #include "ext4_jbd2.h"
+ 
++struct ext4_rcu_ptr {
++      struct rcu_head rcu;
++      void *ptr;
++};
++
++static void ext4_rcu_ptr_callback(struct rcu_head *head)
++{
++      struct ext4_rcu_ptr *ptr;
++
++      ptr = container_of(head, struct ext4_rcu_ptr, rcu);
++      kvfree(ptr->ptr);
++      kfree(ptr);
++}
++
++void ext4_kvfree_array_rcu(void *to_free)
++{
++      struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
++
++      if (ptr) {
++              ptr->ptr = to_free;
++              call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
++              return;
++      }
++      synchronize_rcu();
++      kvfree(to_free);
++}
++
+ int ext4_resize_begin(struct super_block *sb)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -541,8 +568,8 @@ static int setup_new_flex_group_blocks(struct super_block 
*sb,
+                               brelse(gdb);
+                               goto out;
+                       }
+-                      memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
+-                             gdb->b_size);
++                      memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
++                              s_group_desc, j)->b_data, gdb->b_size);
+                       set_buffer_uptodate(gdb);
+ 
+                       err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+@@ -849,13 +876,15 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
+       }
+       brelse(dind);
+ 
+-      o_group_desc = EXT4_SB(sb)->s_group_desc;
++      rcu_read_lock();
++      o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+       memcpy(n_group_desc, o_group_desc,
+              EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++      rcu_read_unlock();
+       n_group_desc[gdb_num] = gdb_bh;
+-      EXT4_SB(sb)->s_group_desc = n_group_desc;
++      rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+       EXT4_SB(sb)->s_gdb_count++;
+-      kvfree(o_group_desc);
++      ext4_kvfree_array_rcu(o_group_desc);
+ 
+       le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
+       err = ext4_handle_dirty_super(handle, sb);
+@@ -903,9 +932,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+               return err;
+       }
+ 
+-      o_group_desc = EXT4_SB(sb)->s_group_desc;
++      rcu_read_lock();
++      o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
+       memcpy(n_group_desc, o_group_desc,
+              EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
++      rcu_read_unlock();
+       n_group_desc[gdb_num] = gdb_bh;
+ 
+       BUFFER_TRACE(gdb_bh, "get_write_access");
+@@ -916,9 +947,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
+               return err;
+       }
+ 
+-      EXT4_SB(sb)->s_group_desc = n_group_desc;
++      rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
+       EXT4_SB(sb)->s_gdb_count++;
+-      kvfree(o_group_desc);
++      ext4_kvfree_array_rcu(o_group_desc);
+       return err;
+ }
+ 
+@@ -1180,7 +1211,8 @@ static int ext4_add_new_descs(handle_t *handle, struct 
super_block *sb,
+                * use non-sparse filesystems anymore.  This is already checked 
above.
+                */
+               if (gdb_off) {
+-                      gdb_bh = sbi->s_group_desc[gdb_num];
++                      gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++                                                   gdb_num);
+                       BUFFER_TRACE(gdb_bh, "get_write_access");
+                       err = ext4_journal_get_write_access(handle, gdb_bh);
+ 
+@@ -1262,7 +1294,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct 
super_block *sb,
+               /*
+                * get_write_access() has been called on gdb_bh by 
ext4_add_new_desc().
+                */
+-              gdb_bh = sbi->s_group_desc[gdb_num];
++              gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
+               /* Update group descriptor block for new group */
+               gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
+                                                gdb_off * EXT4_DESC_SIZE(sb));
+@@ -1390,11 +1422,14 @@ static void ext4_update_super(struct super_block *sb,
+                  percpu_counter_read(&sbi->s_freeclusters_counter));
+       if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group;
++              struct flex_groups *fg;
++
+               flex_group = ext4_flex_group(sbi, group_data[0].group);
++              fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
+               atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
+-                           &sbi->s_flex_groups[flex_group].free_clusters);
++                           &fg->free_clusters);
+               atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
+-                         &sbi->s_flex_groups[flex_group].free_inodes);
++                         &fg->free_inodes);
+       }
+ 
+       /*
+@@ -1489,7 +1524,8 @@ exit_journal:
+               for (; gdb_num <= gdb_num_end; gdb_num++) {
+                       struct buffer_head *gdb_bh;
+ 
+-                      gdb_bh = sbi->s_group_desc[gdb_num];
++                      gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
++                                                   gdb_num);
+                       if (old_gdb == gdb_bh->b_blocknr)
+                               continue;
+                       update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index adf02b1509ca..f2e0220b00c3 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -794,6 +794,8 @@ static void ext4_put_super(struct super_block *sb)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
++      struct buffer_head **group_desc;
++      struct flex_groups **flex_groups;
+       int aborted = 0;
+       int i, err;
+ 
+@@ -826,10 +828,18 @@ static void ext4_put_super(struct super_block *sb)
+       if (!(sb->s_flags & MS_RDONLY))
+               ext4_commit_super(sb, 1);
+ 
++      rcu_read_lock();
++      group_desc = rcu_dereference(sbi->s_group_desc);
+       for (i = 0; i < sbi->s_gdb_count; i++)
+-              brelse(sbi->s_group_desc[i]);
+-      kvfree(sbi->s_group_desc);
+-      kvfree(sbi->s_flex_groups);
++              brelse(group_desc[i]);
++      kvfree(group_desc);
++      flex_groups = rcu_dereference(sbi->s_flex_groups);
++      if (flex_groups) {
++              for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++                      kvfree(flex_groups[i]);
++              kvfree(flex_groups);
++      }
++      rcu_read_unlock();
+       percpu_counter_destroy(&sbi->s_freeclusters_counter);
+       percpu_counter_destroy(&sbi->s_freeinodes_counter);
+       percpu_counter_destroy(&sbi->s_dirs_counter);
+@@ -1978,8 +1988,8 @@ done:
+ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+-      struct flex_groups *new_groups;
+-      int size;
++      struct flex_groups **old_groups, **new_groups;
++      int size, i, j;
+ 
+       if (!sbi->s_log_groups_per_flex)
+               return 0;
+@@ -1988,22 +1998,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, 
ext4_group_t ngroup)
+       if (size <= sbi->s_flex_groups_allocated)
+               return 0;
+ 
+-      size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+-      new_groups = ext4_kvzalloc(size, GFP_KERNEL);
++      new_groups = ext4_kvzalloc(roundup_pow_of_two(size *
++                                 sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
+       if (!new_groups) {
+-              ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+-                       size / (int) sizeof(struct flex_groups));
++              ext4_msg(sb, KERN_ERR,
++                       "not enough memory for %d flex group pointers", size);
+               return -ENOMEM;
+       }
+-
+-      if (sbi->s_flex_groups) {
+-              memcpy(new_groups, sbi->s_flex_groups,
+-                     (sbi->s_flex_groups_allocated *
+-                      sizeof(struct flex_groups)));
+-              kvfree(sbi->s_flex_groups);
++      for (i = sbi->s_flex_groups_allocated; i < size; i++) {
++              new_groups[i] = ext4_kvzalloc(roundup_pow_of_two(
++                                            sizeof(struct flex_groups)),
++                                            GFP_KERNEL);
++              if (!new_groups[i]) {
++                      for (j = sbi->s_flex_groups_allocated; j < i; j++)
++                              kvfree(new_groups[j]);
++                      kvfree(new_groups);
++                      ext4_msg(sb, KERN_ERR,
++                               "not enough memory for %d flex groups", size);
++                      return -ENOMEM;
++              }
+       }
+-      sbi->s_flex_groups = new_groups;
+-      sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
++      rcu_read_lock();
++      old_groups = rcu_dereference(sbi->s_flex_groups);
++      if (old_groups)
++              memcpy(new_groups, old_groups,
++                     (sbi->s_flex_groups_allocated *
++                      sizeof(struct flex_groups *)));
++      rcu_read_unlock();
++      rcu_assign_pointer(sbi->s_flex_groups, new_groups);
++      sbi->s_flex_groups_allocated = size;
++      if (old_groups)
++              ext4_kvfree_array_rcu(old_groups);
+       return 0;
+ }
+ 
+@@ -2011,6 +2036,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
+ {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_group_desc *gdp = NULL;
++      struct flex_groups *fg;
+       ext4_group_t flex_group;
+       int i, err;
+ 
+@@ -2028,12 +2054,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
+               gdp = ext4_get_group_desc(sb, i, NULL);
+ 
+               flex_group = ext4_flex_group(sbi, i);
+-              atomic_add(ext4_free_inodes_count(sb, gdp),
+-                         &sbi->s_flex_groups[flex_group].free_inodes);
++              fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
++              atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
+               atomic64_add(ext4_free_group_clusters(sb, gdp),
+-                           &sbi->s_flex_groups[flex_group].free_clusters);
+-              atomic_add(ext4_used_dirs_count(sb, gdp),
+-                         &sbi->s_flex_groups[flex_group].used_dirs);
++                           &fg->free_clusters);
++              atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
+       }
+ 
+       return 1;
+@@ -3236,9 +3261,10 @@ static void ext4_set_resv_clusters(struct super_block 
*sb)
+ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+       char *orig_data = kstrdup(data, GFP_KERNEL);
+-      struct buffer_head *bh;
++      struct buffer_head *bh, **group_desc;
+       struct ext4_super_block *es = NULL;
+       struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
++      struct flex_groups **flex_groups;
+       ext4_fsblk_t block;
+       ext4_fsblk_t sb_block = get_sb_block(&data);
+       ext4_fsblk_t logical_sb_block;
+@@ -3795,9 +3821,10 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
+                       goto failed_mount;
+               }
+       }
+-      sbi->s_group_desc = ext4_kvmalloc(db_count *
++      rcu_assign_pointer(sbi->s_group_desc,
++                         ext4_kvmalloc(db_count *
+                                         sizeof(struct buffer_head *),
+-                                        GFP_KERNEL);
++                                        GFP_KERNEL));
+       if (sbi->s_group_desc == NULL) {
+               ext4_msg(sb, KERN_ERR, "not enough memory");
+               ret = -ENOMEM;
+@@ -3807,14 +3834,19 @@ static int ext4_fill_super(struct super_block *sb, 
void *data, int silent)
+       bgl_lock_init(sbi->s_blockgroup_lock);
+ 
+       for (i = 0; i < db_count; i++) {
++              struct buffer_head *bh;
++
+               block = descriptor_loc(sb, logical_sb_block, i);
+-              sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
+-              if (!sbi->s_group_desc[i]) {
++              bh = sb_bread_unmovable(sb, block);
++              if (!bh) {
+                       ext4_msg(sb, KERN_ERR,
+                              "can't read group descriptor %d", i);
+                       db_count = i;
+                       goto failed_mount2;
+               }
++              rcu_read_lock();
++              rcu_dereference(sbi->s_group_desc)[i] = bh;
++              rcu_read_unlock();
+       }
+       sbi->s_gdb_count = db_count;
+       if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+@@ -4149,8 +4181,14 @@ failed_mount7:
+       ext4_unregister_li_request(sb);
+ failed_mount6:
+       ext4_mb_release(sb);
+-      if (sbi->s_flex_groups)
+-              kvfree(sbi->s_flex_groups);
++      rcu_read_lock();
++      flex_groups = rcu_dereference(sbi->s_flex_groups);
++      if (flex_groups) {
++              for (i = 0; i < sbi->s_flex_groups_allocated; i++)
++                      kvfree(flex_groups[i]);
++              kvfree(flex_groups);
++      }
++      rcu_read_unlock();
+       percpu_counter_destroy(&sbi->s_freeclusters_counter);
+       percpu_counter_destroy(&sbi->s_freeinodes_counter);
+       percpu_counter_destroy(&sbi->s_dirs_counter);
+@@ -4177,9 +4215,12 @@ failed_mount3:
+       if (sbi->s_mmp_tsk)
+               kthread_stop(sbi->s_mmp_tsk);
+ failed_mount2:
++      rcu_read_lock();
++      group_desc = rcu_dereference(sbi->s_group_desc);
+       for (i = 0; i < db_count; i++)
+-              brelse(sbi->s_group_desc[i]);
+-      kvfree(sbi->s_group_desc);
++              brelse(group_desc[i]);
++      kvfree(group_desc);
++      rcu_read_unlock();
+ failed_mount:
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
+diff --git a/fs/fat/inode.c b/fs/fat/inode.c
+index c81cfb79a339..5e87b9aa7ba6 100644
+--- a/fs/fat/inode.c
++++ b/fs/fat/inode.c
+@@ -653,6 +653,13 @@ static struct inode *fat_alloc_inode(struct super_block 
*sb)
+               return NULL;
+ 
+       init_rwsem(&ei->truncate_lock);
++      /* Zeroing to allow iput() even if partial initialized inode. */
++      ei->mmu_private = 0;
++      ei->i_start = 0;
++      ei->i_logstart = 0;
++      ei->i_attrs = 0;
++      ei->i_pos = 0;
++
+       return &ei->vfs_inode;
+ }
+ 
+@@ -1276,16 +1283,6 @@ out:
+       return 0;
+ }
+ 
+-static void fat_dummy_inode_init(struct inode *inode)
+-{
+-      /* Initialize this dummy inode to work as no-op. */
+-      MSDOS_I(inode)->mmu_private = 0;
+-      MSDOS_I(inode)->i_start = 0;
+-      MSDOS_I(inode)->i_logstart = 0;
+-      MSDOS_I(inode)->i_attrs = 0;
+-      MSDOS_I(inode)->i_pos = 0;
+-}
+-
+ static int fat_read_root(struct inode *inode)
+ {
+       struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+@@ -1730,13 +1727,11 @@ int fat_fill_super(struct super_block *sb, void *data, 
int silent, int isvfat,
+       fat_inode = new_inode(sb);
+       if (!fat_inode)
+               goto out_fail;
+-      fat_dummy_inode_init(fat_inode);
+       sbi->fat_inode = fat_inode;
+ 
+       fsinfo_inode = new_inode(sb);
+       if (!fsinfo_inode)
+               goto out_fail;
+-      fat_dummy_inode_init(fsinfo_inode);
+       fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
+       sbi->fsinfo_inode = fsinfo_inode;
+       insert_inode_hash(fsinfo_inode);
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index f5d2d2340b44..16891f5364af 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -2031,10 +2031,8 @@ static ssize_t fuse_dev_splice_write(struct 
pipe_inode_info *pipe,
+               rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 
1)].len;
+ 
+       ret = -EINVAL;
+-      if (rem < len) {
+-              pipe_unlock(pipe);
+-              goto out;
+-      }
++      if (rem < len)
++              goto out_free;
+ 
+       rem = len;
+       while (rem) {
+@@ -2052,7 +2050,9 @@ static ssize_t fuse_dev_splice_write(struct 
pipe_inode_info *pipe,
+                       pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
+                       pipe->nrbufs--;
+               } else {
+-                      ibuf->ops->get(pipe, ibuf);
++                      if (!pipe_buf_get(pipe, ibuf))
++                              goto out_free;
++
+                       *obuf = *ibuf;
+                       obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
+                       obuf->len = rem;
+@@ -2075,13 +2075,13 @@ static ssize_t fuse_dev_splice_write(struct 
pipe_inode_info *pipe,
+       ret = fuse_dev_do_write(fud, &cs, len);
+ 
+       pipe_lock(pipe);
++out_free:
+       for (idx = 0; idx < nbuf; idx++) {
+               struct pipe_buffer *buf = &bufs[idx];
+               buf->ops->release(pipe, buf);
+       }
+       pipe_unlock(pipe);
+ 
+-out:
+       kfree(bufs);
+       return ret;
+ }
+diff --git a/fs/namei.c b/fs/namei.c
+index 9f1aae507909..4a2b9371e00e 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1358,7 +1358,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+                       nd->path.dentry = parent;
+                       nd->seq = seq;
+                       if (unlikely(!path_connected(&nd->path)))
+-                              return -ENOENT;
++                              return -ECHILD;
+                       break;
+               } else {
+                       struct mount *mnt = real_mount(nd->path.mnt);
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 1e7263bb837a..6534470a6c19 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -178,9 +178,9 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
+  *    in the tee() system call, when we duplicate the buffers in one
+  *    pipe into another.
+  */
+-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer 
*buf)
++bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer 
*buf)
+ {
+-      page_cache_get(buf->page);
++      return try_get_page(buf->page);
+ }
+ EXPORT_SYMBOL(generic_pipe_buf_get);
+ 
+diff --git a/fs/splice.c b/fs/splice.c
+index 8398974e1538..57ccc583a172 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -1876,7 +1876,11 @@ retry:
+                        * Get a reference to this pipe buffer,
+                        * so we can copy the contents over.
+                        */
+-                      ibuf->ops->get(ipipe, ibuf);
++                      if (!pipe_buf_get(ipipe, ibuf)) {
++                              if (ret == 0)
++                                      ret = -EFAULT;
++                              break;
++                      }
+                       *obuf = *ibuf;
+ 
+                       /*
+@@ -1948,7 +1952,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
+                * Get a reference to this pipe buffer,
+                * so we can copy the contents over.
+                */
+-              ibuf->ops->get(ipipe, ibuf);
++              if (!pipe_buf_get(ipipe, ibuf)) {
++                      if (ret == 0)
++                              ret = -EFAULT;
++                      break;
++              }
+ 
+               obuf = opipe->bufs + nbuf;
+               *obuf = *ibuf;
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 83edade218fa..ce2bb045b3fd 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -3,7 +3,8 @@
+ #include <asm/types.h>
+ #include <linux/bits.h>
+ 
+-#define BITS_TO_LONGS(nr)     DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
++#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
++#define BITS_TO_LONGS(nr)     DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+ 
+ extern unsigned int __sw_hweight8(unsigned int w);
+ extern unsigned int __sw_hweight16(unsigned int w);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 5f3131885136..2149f650982e 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -451,7 +451,7 @@ struct hid_report_enum {
+ };
+ 
+ #define HID_MIN_BUFFER_SIZE   64              /* make sure there is at least 
a packet size of space */
+-#define HID_MAX_BUFFER_SIZE   4096            /* 4kb */
++#define HID_MAX_BUFFER_SIZE   8192            /* 8kb */
+ #define HID_CONTROL_FIFO_SIZE 256             /* to init devices with >100 
reports */
+ #define HID_OUTPUT_FIFO_SIZE  64
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 15f81b2b87ed..69fa3df9e712 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -488,6 +488,15 @@ static inline void get_huge_page_tail(struct page *page)
+ 
+ extern bool __get_page_tail(struct page *page);
+ 
++static inline int page_ref_count(struct page *page)
++{
++      return atomic_read(&page->_count);
++}
++
++/* 127: arbitrary random number, small enough to assemble well */
++#define page_ref_zero_or_close_to_overflow(page) \
++      ((unsigned int) atomic_read(&page->_count) + 127u <= 127u)
++
+ static inline void get_page(struct page *page)
+ {
+       if (unlikely(PageTail(page)))
+@@ -497,10 +506,22 @@ static inline void get_page(struct page *page)
+        * Getting a normal page or the head of a compound page
+        * requires to already have an elevated page->_count.
+        */
+-      VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
++      VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+       atomic_inc(&page->_count);
+ }
+ 
++static inline __must_check bool try_get_page(struct page *page)
++{
++      if (unlikely(PageTail(page)))
++              if (likely(__get_page_tail(page)))
++                      return true;
++
++      if (WARN_ON_ONCE(atomic_read(&page->_count) <= 0))
++              return false;
++      atomic_inc(&page->_count);
++      return true;
++}
++
+ static inline struct page *virt_to_head_page(const void *x)
+ {
+       struct page *page = virt_to_page(x);
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index 24f5470d3944..0b28b65c12fb 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -112,9 +112,22 @@ struct pipe_buf_operations {
+       /*
+        * Get a reference to the pipe buffer.
+        */
+-      void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
++      bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ };
+ 
++/**
++ * pipe_buf_get - get a reference to a pipe_buffer
++ * @pipe:     the pipe that the buffer belongs to
++ * @buf:      the buffer to get a reference to
++ *
++ * Return: %true if the reference was successfully obtained.
++ */
++static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
++                              struct pipe_buffer *buf)
++{
++      return buf->ops->get(pipe, buf);
++}
++
+ /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+    memory allocation, whereas PIPE_BUF makes atomicity guarantees.  */
+ #define PIPE_SIZE             PAGE_SIZE
+@@ -137,7 +150,7 @@ struct pipe_inode_info *alloc_pipe_info(void);
+ void free_pipe_info(struct pipe_inode_info *);
+ 
+ /* Generic pipe buffer ops functions */
+-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
++bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+ int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
+index 62a462413081..a5a1a1650668 100644
+--- a/include/net/flow_dissector.h
++++ b/include/net/flow_dissector.h
+@@ -4,6 +4,7 @@
+ #include <linux/types.h>
+ #include <linux/in6.h>
+ #include <linux/siphash.h>
++#include <linux/string.h>
+ #include <uapi/linux/if_ether.h>
+ 
+ /**
+@@ -185,4 +186,12 @@ static inline bool flow_keys_have_l4(struct flow_keys 
*keys)
+ 
+ u32 flow_hash_from_keys(struct flow_keys *keys);
+ 
++static inline void
++flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
++                       struct flow_dissector_key_basic *key_basic)
++{
++      memset(key_control, 0, sizeof(*key_control));
++      memset(key_basic, 0, sizeof(*key_basic));
++}
++
+ #endif
+diff --git a/kernel/audit.c b/kernel/audit.c
+index bdf0cf463815..84c445db5fe1 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -753,13 +753,11 @@ static void audit_log_feature_change(int which, u32 
old_feature, u32 new_feature
+       audit_log_end(ab);
+ }
+ 
+-static int audit_set_feature(struct sk_buff *skb)
++static int audit_set_feature(struct audit_features *uaf)
+ {
+-      struct audit_features *uaf;
+       int i;
+ 
+       BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
+-      uaf = nlmsg_data(nlmsg_hdr(skb));
+ 
+       /* if there is ever a version 2 we should handle that here */
+ 
+@@ -815,6 +813,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+ {
+       u32                     seq;
+       void                    *data;
++      int                     data_len;
+       int                     err;
+       struct audit_buffer     *ab;
+       u16                     msg_type = nlh->nlmsg_type;
+@@ -838,6 +837,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+       }
+       seq  = nlh->nlmsg_seq;
+       data = nlmsg_data(nlh);
++      data_len = nlmsg_len(nlh);
+ 
+       switch (msg_type) {
+       case AUDIT_GET: {
+@@ -859,7 +859,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+               struct audit_status     s;
+               memset(&s, 0, sizeof(s));
+               /* guard against past and future API changes */
+-              memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
++              memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
+               if (s.mask & AUDIT_STATUS_ENABLED) {
+                       err = audit_set_enabled(s.enabled);
+                       if (err < 0)
+@@ -908,7 +908,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+                       return err;
+               break;
+       case AUDIT_SET_FEATURE:
+-              err = audit_set_feature(skb);
++              if (data_len < sizeof(struct audit_features))
++                      return -EINVAL;
++              err = audit_set_feature(data);
+               if (err)
+                       return err;
+               break;
+@@ -920,6 +922,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+ 
+               err = audit_filter_user(msg_type);
+               if (err == 1) { /* match or error */
++                      char *str = data;
++
+                       err = 0;
+                       if (msg_type == AUDIT_USER_TTY) {
+                               err = tty_audit_push_current();
+@@ -928,19 +932,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+                       }
+                       mutex_unlock(&audit_cmd_mutex);
+                       audit_log_common_recv_msg(&ab, msg_type);
+-                      if (msg_type != AUDIT_USER_TTY)
++                      if (msg_type != AUDIT_USER_TTY) {
++                              /* ensure NULL termination */
++                              str[data_len - 1] = '\0';
+                               audit_log_format(ab, " msg='%.*s'",
+                                                AUDIT_MESSAGE_TEXT_MAX,
+-                                               (char *)data);
+-                      else {
+-                              int size;
+-
++                                               str);
++                      } else {
+                               audit_log_format(ab, " data=");
+-                              size = nlmsg_len(nlh);
+-                              if (size > 0 &&
+-                                  ((unsigned char *)data)[size - 1] == '\0')
+-                                      size--;
+-                              audit_log_n_untrustedstring(ab, data, size);
++                              if (data_len > 0 && str[data_len - 1] == '\0')
++                                      data_len--;
++                              audit_log_n_untrustedstring(ab, str, data_len);
+                       }
+                       audit_set_portid(ab, NETLINK_CB(skb).portid);
+                       audit_log_end(ab);
+@@ -949,7 +951,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+               break;
+       case AUDIT_ADD_RULE:
+       case AUDIT_DEL_RULE:
+-              if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
++              if (data_len < sizeof(struct audit_rule_data))
+                       return -EINVAL;
+               if (audit_enabled == AUDIT_LOCKED) {
+                       audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
+@@ -958,7 +960,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+                       return -EPERM;
+               }
+               err = audit_rule_change(msg_type, NETLINK_CB(skb).portid,
+-                                         seq, data, nlmsg_len(nlh));
++                                         seq, data, data_len);
+               break;
+       case AUDIT_LIST_RULES:
+               err = audit_list_rules_send(skb, seq);
+@@ -972,7 +974,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+       case AUDIT_MAKE_EQUIV: {
+               void *bufp = data;
+               u32 sizes[2];
+-              size_t msglen = nlmsg_len(nlh);
++              size_t msglen = data_len;
+               char *old, *new;
+ 
+               err = -EINVAL;
+@@ -1049,7 +1051,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct 
nlmsghdr *nlh)
+ 
+               memset(&s, 0, sizeof(s));
+               /* guard against past and future API changes */
+-              memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
++              memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
+               /* check if new data is valid */
+               if ((s.enabled != 0 && s.enabled != 1) ||
+                   (s.log_passwd != 0 && s.log_passwd != 1))
+diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
+index cf7aa656b308..41a668a9d561 100644
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -434,6 +434,7 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+       bufp = data->buf;
+       for (i = 0; i < data->field_count; i++) {
+               struct audit_field *f = &entry->rule.fields[i];
++              u32 f_val;
+ 
+               err = -EINVAL;
+ 
+@@ -442,12 +443,12 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+                       goto exit_free;
+ 
+               f->type = data->fields[i];
+-              f->val = data->values[i];
++              f_val = data->values[i];
+ 
+               /* Support legacy tests for a valid loginuid */
+-              if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) 
{
++              if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
+                       f->type = AUDIT_LOGINUID_SET;
+-                      f->val = 0;
++                      f_val = 0;
+                       entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
+               }
+ 
+@@ -463,7 +464,7 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+               case AUDIT_SUID:
+               case AUDIT_FSUID:
+               case AUDIT_OBJ_UID:
+-                      f->uid = make_kuid(current_user_ns(), f->val);
++                      f->uid = make_kuid(current_user_ns(), f_val);
+                       if (!uid_valid(f->uid))
+                               goto exit_free;
+                       break;
+@@ -472,11 +473,12 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+               case AUDIT_SGID:
+               case AUDIT_FSGID:
+               case AUDIT_OBJ_GID:
+-                      f->gid = make_kgid(current_user_ns(), f->val);
++                      f->gid = make_kgid(current_user_ns(), f_val);
+                       if (!gid_valid(f->gid))
+                               goto exit_free;
+                       break;
+               case AUDIT_ARCH:
++                      f->val = f_val;
+                       entry->rule.arch_f = f;
+                       break;
+               case AUDIT_SUBJ_USER:
+@@ -489,11 +491,13 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+               case AUDIT_OBJ_TYPE:
+               case AUDIT_OBJ_LEV_LOW:
+               case AUDIT_OBJ_LEV_HIGH:
+-                      str = audit_unpack_string(&bufp, &remain, f->val);
+-                      if (IS_ERR(str))
++                      str = audit_unpack_string(&bufp, &remain, f_val);
++                      if (IS_ERR(str)) {
++                              err = PTR_ERR(str);
+                               goto exit_free;
+-                      entry->rule.buflen += f->val;
+-
++                      }
++                      entry->rule.buflen += f_val;
++                      f->lsm_str = str;
+                       err = security_audit_rule_init(f->type, f->op, str,
+                                                      (void **)&f->lsm_rule);
+                       /* Keep currently invalid fields around in case they
+@@ -502,68 +506,71 @@ static struct audit_entry *audit_data_to_entry(struct 
audit_rule_data *data,
+                               pr_warn("audit rule for LSM \'%s\' is 
invalid\n",
+                                       str);
+                               err = 0;
+-                      }
+-                      if (err) {
+-                              kfree(str);
++                      } else if (err)
+                               goto exit_free;
+-                      } else
+-                              f->lsm_str = str;
+                       break;
+               case AUDIT_WATCH:
+-                      str = audit_unpack_string(&bufp, &remain, f->val);
+-                      if (IS_ERR(str))
++                      str = audit_unpack_string(&bufp, &remain, f_val);
++                      if (IS_ERR(str)) {
++                              err = PTR_ERR(str);
+                               goto exit_free;
+-                      entry->rule.buflen += f->val;
+-
+-                      err = audit_to_watch(&entry->rule, str, f->val, f->op);
++                      }
++                      err = audit_to_watch(&entry->rule, str, f_val, f->op);
+                       if (err) {
+                               kfree(str);
+                               goto exit_free;
+                       }
++                      entry->rule.buflen += f_val;
+                       break;
+               case AUDIT_DIR:
+-                      str = audit_unpack_string(&bufp, &remain, f->val);
+-                      if (IS_ERR(str))
++                      str = audit_unpack_string(&bufp, &remain, f_val);
++                      if (IS_ERR(str)) {
++                              err = PTR_ERR(str);
+                               goto exit_free;
+-                      entry->rule.buflen += f->val;
+-
++                      }
+                       err = audit_make_tree(&entry->rule, str, f->op);
+                       kfree(str);
+                       if (err)
+                               goto exit_free;
++                      entry->rule.buflen += f_val;
+                       break;
+               case AUDIT_INODE:
++                      f->val = f_val;
+                       err = audit_to_inode(&entry->rule, f);
+                       if (err)
+                               goto exit_free;
+                       break;
+               case AUDIT_FILTERKEY:
+-                      if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
++                      if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
+                               goto exit_free;
+-                      str = audit_unpack_string(&bufp, &remain, f->val);
+-                      if (IS_ERR(str))
++                      str = audit_unpack_string(&bufp, &remain, f_val);
++                      if (IS_ERR(str)) {
++                              err = PTR_ERR(str);
+                               goto exit_free;
+-                      entry->rule.buflen += f->val;
++                      }
++                      entry->rule.buflen += f_val;
+                       entry->rule.filterkey = str;
+                       break;
+               case AUDIT_EXE:
+-                      if (entry->rule.exe || f->val > PATH_MAX)
++                      if (entry->rule.exe || f_val > PATH_MAX)
+                               goto exit_free;
+-                      str = audit_unpack_string(&bufp, &remain, f->val);
++                      str = audit_unpack_string(&bufp, &remain, f_val);
+                       if (IS_ERR(str)) {
+                               err = PTR_ERR(str);
+                               goto exit_free;
+                       }
+-                      entry->rule.buflen += f->val;
+-
+-                      audit_mark = audit_alloc_mark(&entry->rule, str, 
f->val);
++                      audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
+                       if (IS_ERR(audit_mark)) {
+                               kfree(str);
+                               err = PTR_ERR(audit_mark);
+                               goto exit_free;
+                       }
++                      entry->rule.buflen += f_val;
+                       entry->rule.exe = audit_mark;
+                       break;
++              default:
++                      f->val = f_val;
++                      break;
+               }
+       }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 6176dc89b32c..06efd18bf3e3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5749,12 +5749,16 @@ static void buffer_pipe_buf_release(struct 
pipe_inode_info *pipe,
+       buf->private = 0;
+ }
+ 
+-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
++static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+                               struct pipe_buffer *buf)
+ {
+       struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+ 
++      if (ref->ref > INT_MAX/2)
++              return false;
++
+       ref->ref++;
++      return true;
+ }
+ 
+ /* Pipe buffer operations for a buffer. */
+diff --git a/mm/gup.c b/mm/gup.c
+index 2cd3b31e3666..4c5857889e9d 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -126,8 +126,12 @@ retry:
+               }
+       }
+ 
+-      if (flags & FOLL_GET)
+-              get_page_foll(page);
++      if (flags & FOLL_GET) {
++              if (unlikely(!try_get_page_foll(page))) {
++                      page = ERR_PTR(-ENOMEM);
++                      goto out;
++              }
++      }
+       if (flags & FOLL_TOUCH) {
+               if ((flags & FOLL_WRITE) &&
+                   !pte_dirty(pte) && !PageDirty(page))
+@@ -289,7 +293,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned 
long address,
+                       goto unmap;
+               *page = pte_page(*pte);
+       }
+-      get_page(*page);
++      if (unlikely(!try_get_page(*page))) {
++              ret = -ENOMEM;
++              goto unmap;
++      }
+ out:
+       ret = 0;
+ unmap:
+@@ -1053,6 +1060,20 @@ struct page *get_dump_page(unsigned long addr)
+  */
+ #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
+ 
++/*
++ * Return the compund head page with ref appropriately incremented,
++ * or NULL if that failed.
++ */
++static inline struct page *try_get_compound_head(struct page *page, int refs)
++{
++      struct page *head = compound_head(page);
++      if (WARN_ON_ONCE(atomic_read(&head->_count) < 0))
++              return NULL;
++      if (unlikely(!page_cache_add_speculative(head, refs)))
++              return NULL;
++      return head;
++}
++
+ #ifdef __HAVE_ARCH_PTE_SPECIAL
+ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
+                        int write, struct page **pages, int *nr)
+@@ -1083,6 +1104,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, 
unsigned long end,
+               VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+               page = pte_page(pte);
+ 
++              if (WARN_ON_ONCE(page_ref_count(page) < 0))
++                      goto pte_unmap;
++
+               if (!page_cache_get_speculative(page))
+                       goto pte_unmap;
+ 
+@@ -1130,18 +1154,17 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, 
unsigned long addr,
+               return 0;
+ 
+       refs = 0;
+-      head = pmd_page(orig);
+-      page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++      page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+       tail = page;
+       do {
+-              VM_BUG_ON_PAGE(compound_head(page) != head, page);
+               pages[*nr] = page;
+               (*nr)++;
+               page++;
+               refs++;
+       } while (addr += PAGE_SIZE, addr != end);
+ 
+-      if (!page_cache_add_speculative(head, refs)) {
++      head = try_get_compound_head(pmd_page(orig), refs);
++      if (!head) {
+               *nr -= refs;
+               return 0;
+       }
+@@ -1177,18 +1200,17 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, 
unsigned long addr,
+               return 0;
+ 
+       refs = 0;
+-      head = pud_page(orig);
+-      page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
++      page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+       tail = page;
+       do {
+-              VM_BUG_ON_PAGE(compound_head(page) != head, page);
+               pages[*nr] = page;
+               (*nr)++;
+               page++;
+               refs++;
+       } while (addr += PAGE_SIZE, addr != end);
+ 
+-      if (!page_cache_add_speculative(head, refs)) {
++      head = try_get_compound_head(pud_page(orig), refs);
++      if (!head) {
+               *nr -= refs;
+               return 0;
+       }
+@@ -1220,18 +1242,17 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, 
unsigned long addr,
+               return 0;
+ 
+       refs = 0;
+-      head = pgd_page(orig);
+-      page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
++      page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+       tail = page;
+       do {
+-              VM_BUG_ON_PAGE(compound_head(page) != head, page);
+               pages[*nr] = page;
+               (*nr)++;
+               page++;
+               refs++;
+       } while (addr += PAGE_SIZE, addr != end);
+ 
+-      if (!page_cache_add_speculative(head, refs)) {
++      head = try_get_compound_head(pgd_page(orig), refs);
++      if (!head) {
+               *nr -= refs;
+               return 0;
+       }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index fd932e7a25dd..3a1501e85483 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3886,6 +3886,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+       unsigned long vaddr = *position;
+       unsigned long remainder = *nr_pages;
+       struct hstate *h = hstate_vma(vma);
++      int err = -EFAULT;
+ 
+       while (vaddr < vma->vm_end && remainder) {
+               pte_t *pte;
+@@ -3957,6 +3958,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
+ 
+               pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
+               page = pte_page(huge_ptep_get(pte));
++
++              /*
++               * Instead of doing 'try_get_page_foll()' below in the same_page
++               * loop, just check the count once here.
++               */
++              if (unlikely(page_count(page) <= 0)) {
++                      if (pages) {
++                              spin_unlock(ptl);
++                              remainder = 0;
++                              err = -ENOMEM;
++                              break;
++                      }
++              }
+ same_page:
+               if (pages) {
+                       pages[i] = mem_map_offset(page, pfn_offset);
+@@ -3983,7 +3997,7 @@ same_page:
+       *nr_pages = remainder;
+       *position = vaddr;
+ 
+-      return i ? i : -EFAULT;
++      return i ? i : err;
+ }
+ 
+ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+diff --git a/mm/internal.h b/mm/internal.h
+index f63f4393d633..d83afc995a49 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -81,7 +81,8 @@ static inline void __get_page_tail_foll(struct page *page,
+        * speculative page access (like in
+        * page_cache_get_speculative()) on tail pages.
+        */
+-      VM_BUG_ON_PAGE(atomic_read(&compound_head(page)->_count) <= 0, page);
++      VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(compound_head(page)),
++                     page);
+       if (get_page_head)
+               atomic_inc(&compound_head(page)->_count);
+       get_huge_page_tail(page);
+@@ -106,11 +107,34 @@ static inline void get_page_foll(struct page *page)
+                * Getting a normal page or the head of a compound page
+                * requires to already have an elevated page->_count.
+                */
+-              VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
++              VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+               atomic_inc(&page->_count);
+       }
+ }
+ 
++static inline __must_check bool try_get_page_foll(struct page *page)
++{
++      if (unlikely(PageTail(page))) {
++              if (WARN_ON_ONCE(atomic_read(&compound_head(page)->_count) <= 
0))
++                      return false;
++              /*
++               * This is safe only because
++               * __split_huge_page_refcount() can't run under
++               * get_page_foll() because we hold the proper PT lock.
++               */
++              __get_page_tail_foll(page, true);
++      } else {
++              /*
++               * Getting a normal page or the head of a compound page
++               * requires to already have an elevated page->_count.
++               */
++              if (WARN_ON_ONCE(atomic_read(&page->_count) <= 0))
++                      return false;
++              atomic_inc(&page->_count);
++      }
++      return true;
++}
++
+ extern unsigned long highest_memmap_pfn;
+ 
+ /*
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 365de66436ac..2fd4aae8f285 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -570,7 +570,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct 
fib_rule *rule,
+ 
+       frh = nlmsg_data(nlh);
+       frh->family = ops->family;
+-      frh->table = rule->table;
++      frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
+       if (nla_put_u32(skb, FRA_TABLE, rule->table))
+               goto nla_put_failure;
+       if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index c23e02a7ccb0..30eb8bdcdbda 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -780,8 +780,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct 
rt6_info *rt,
+                                       found++;
+                                       break;
+                               }
+-                              if (rt_can_ecmp)
+-                                      fallback_ins = fallback_ins ?: ins;
++                              fallback_ins = fallback_ins ?: ins;
+                               goto next_iter;
+                       }
+ 
+@@ -821,7 +820,9 @@ next_iter:
+       }
+ 
+       if (fallback_ins && !found) {
+-              /* No ECMP-able route found, replace first non-ECMP one */
++              /* No matching route with same ecmp-able-ness found, replace
++               * first matching route
++               */
+               ins = fallback_ins;
+               iter = *ins;
+               found++;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index fffd2ad28942..63a7d31fa9f0 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2953,6 +2953,7 @@ static int ip6_route_multipath_add(struct fib6_config 
*cfg)
+                */
+               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+                                                    NLM_F_REPLACE);
++              cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               nhn++;
+       }
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 2214c77d4172..4301a92fc160 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -939,16 +939,22 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t 
len, bool action,
+                               elem_parse_failed = true;
+                       break;
+               case WLAN_EID_VHT_OPERATION:
+-                      if (elen >= sizeof(struct ieee80211_vht_operation))
++                      if (elen >= sizeof(struct ieee80211_vht_operation)) {
+                               elems->vht_operation = (void *)pos;
+-                      else
+-                              elem_parse_failed = true;
++                              if (calc_crc)
++                                      crc = crc32_be(crc, pos - 2, elen + 2);
++                              break;
++                      }
++                      elem_parse_failed = true;
+                       break;
+               case WLAN_EID_OPMODE_NOTIF:
+-                      if (elen > 0)
++                      if (elen > 0) {
+                               elems->opmode_notif = pos;
+-                      else
+-                              elem_parse_failed = true;
++                              if (calc_crc)
++                                      crc = crc32_be(crc, pos - 2, elen + 2);
++                              break;
++                      }
++                      elem_parse_failed = true;
+                       break;
+               case WLAN_EID_MESH_ID:
+                       elems->mesh_id = pos;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index bf292010760a..cc37a219e11e 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1003,7 +1003,8 @@ static int netlink_bind(struct socket *sock, struct 
sockaddr *addr,
+       if (nlk->netlink_bind && groups) {
+               int group;
+ 
+-              for (group = 0; group < nlk->ngroups; group++) {
++              /* nl_groups is a u32, so cap the maximum groups we can bind */
++              for (group = 0; group < BITS_PER_TYPE(u32); group++) {
+                       if (!test_bit(group, &groups))
+                               continue;
+                       err = nlk->netlink_bind(net, group + 1);
+@@ -1022,7 +1023,7 @@ static int netlink_bind(struct socket *sock, struct 
sockaddr *addr,
+                       netlink_insert(sk, nladdr->nl_pid) :
+                       netlink_autobind(sock);
+               if (err) {
+-                      netlink_undo_bind(nlk->ngroups, groups, sk);
++                      netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
+                       return err;
+               }
+       }
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index e5a58c82728a..5ab8205f988b 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -127,6 +127,7 @@ static int fl_classify(struct sk_buff *skb, const struct 
tcf_proto *tp,
+       struct fl_flow_key skb_key;
+       struct fl_flow_key skb_mkey;
+ 
++      flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
+       fl_clear_masked_range(&skb_key, &head->mask);
+       skb_key.indev_ifindex = skb->skb_iif;
+       /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index df9ac3746c1b..adaaaaad527d 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -177,6 +177,16 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
+       return 1;
+ }
+ 
++/* Check for format error in an ABORT chunk */
++static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
++{
++      struct sctp_errhdr *err;
++
++      sctp_walk_errors(err, chunk->chunk_hdr);
++
++      return (void *)err == (void *)chunk->chunk_end;
++}
++
+ /**********************************************************
+  * These are the state functions for handling chunk events.
+  **********************************************************/
+@@ -2159,6 +2169,9 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
+                   sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 
commands);
+ 
++      if (!sctp_err_chunk_valid(chunk))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+ }
+ 
+@@ -2201,6 +2214,9 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct 
net *net,
+                   sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 
commands);
+ 
++      if (!sctp_err_chunk_valid(chunk))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+       /* Stop the T2-shutdown timer. */
+       sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+                       SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
+@@ -2466,6 +2482,9 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
+                   sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
+               return sctp_sf_discard_chunk(net, ep, asoc, type, arg, 
commands);
+ 
++      if (!sctp_err_chunk_valid(chunk))
++              return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
++
+       return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
+ }
+ 
+@@ -2482,15 +2501,9 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct 
net *net,
+ 
+       /* See if we have an error cause code in the chunk.  */
+       len = ntohs(chunk->chunk_hdr->length);
+-      if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
+-
+-              sctp_errhdr_t *err;
+-              sctp_walk_errors(err, chunk->chunk_hdr);
+-              if ((void *)err != (void *)chunk->chunk_end)
+-                      return sctp_sf_pdiscard(net, ep, asoc, type, arg, 
commands);
+ 
++      if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
+               error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
+-      }
+ 
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
+       /* ASSOC_FAILED will DELETE_TCB. */
+diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
+index e9e91298c70d..3cedf2c2b60b 100644
+--- a/net/wireless/ethtool.c
++++ b/net/wireless/ethtool.c
+@@ -6,9 +6,13 @@
+ void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo 
*info)
+ {
+       struct wireless_dev *wdev = dev->ieee80211_ptr;
++      struct device *pdev = wiphy_dev(wdev->wiphy);
+ 
+-      strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
+-              sizeof(info->driver));
++      if (pdev->driver)
++              strlcpy(info->driver, pdev->driver->name,
++                      sizeof(info->driver));
++      else
++              strlcpy(info->driver, "N/A", sizeof(info->driver));
+ 
+       strlcpy(info->version, init_utsname()->release, sizeof(info->version));
+ 
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index fd0bf278067e..4b30e91106d0 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -330,6 +330,7 @@ static const struct nla_policy 
nl80211_policy[NUM_NL80211_ATTR] = {
+       [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 },
+       [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
+       [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
++      [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
+       [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
+       [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+       [NL80211_ATTR_PID] = { .type = NLA_U32 },
+diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
+index 047c48953a20..1a90732e7621 100644
+--- a/sound/soc/codecs/pcm512x.c
++++ b/sound/soc/codecs/pcm512x.c
+@@ -1439,13 +1439,15 @@ int pcm512x_probe(struct device *dev, struct regmap 
*regmap)
+       }
+ 
+       pcm512x->sclk = devm_clk_get(dev, NULL);
+-      if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
+-              return -EPROBE_DEFER;
++      if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
++              ret = -EPROBE_DEFER;
++              goto err;
++      }
+       if (!IS_ERR(pcm512x->sclk)) {
+               ret = clk_prepare_enable(pcm512x->sclk);
+               if (ret != 0) {
+                       dev_err(dev, "Failed to enable SCLK: %d\n", ret);
+-                      return ret;
++                      goto err;
+               }
+       }
+ 
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 7e26d173da41..b245379b4dfc 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -4317,7 +4317,7 @@ static void soc_dapm_shutdown_dapm(struct 
snd_soc_dapm_context *dapm)
+                       continue;
+               if (w->power) {
+                       dapm_seq_insert(w, &down_list, false);
+-                      w->power = 0;
++                      w->new_power = 0;
+                       powerdown = 1;
+               }
+       }
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 81bedd9bb922..7cffa98ec313 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -2866,16 +2866,16 @@ static ssize_t dpcm_show_state(struct 
snd_soc_pcm_runtime *fe,
+       ssize_t offset = 0;
+ 
+       /* FE state */
+-      offset += snprintf(buf + offset, size - offset,
++      offset += scnprintf(buf + offset, size - offset,
+                       "[%s - %s]\n", fe->dai_link->name,
+                       stream ? "Capture" : "Playback");
+ 
+-      offset += snprintf(buf + offset, size - offset, "State: %s\n",
++      offset += scnprintf(buf + offset, size - offset, "State: %s\n",
+                       dpcm_state_string(fe->dpcm[stream].state));
+ 
+       if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+           (fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+-              offset += snprintf(buf + offset, size - offset,
++              offset += scnprintf(buf + offset, size - offset,
+                               "Hardware Params: "
+                               "Format = %s, Channels = %d, Rate = %d\n",
+                               snd_pcm_format_name(params_format(params)),
+@@ -2883,10 +2883,10 @@ static ssize_t dpcm_show_state(struct 
snd_soc_pcm_runtime *fe,
+                               params_rate(params));
+ 
+       /* BEs state */
+-      offset += snprintf(buf + offset, size - offset, "Backends:\n");
++      offset += scnprintf(buf + offset, size - offset, "Backends:\n");
+ 
+       if (list_empty(&fe->dpcm[stream].be_clients)) {
+-              offset += snprintf(buf + offset, size - offset,
++              offset += scnprintf(buf + offset, size - offset,
+                               " No active DSP links\n");
+               goto out;
+       }
+@@ -2895,16 +2895,16 @@ static ssize_t dpcm_show_state(struct 
snd_soc_pcm_runtime *fe,
+               struct snd_soc_pcm_runtime *be = dpcm->be;
+               params = &dpcm->hw_params;
+ 
+-              offset += snprintf(buf + offset, size - offset,
++              offset += scnprintf(buf + offset, size - offset,
+                               "- %s\n", be->dai_link->name);
+ 
+-              offset += snprintf(buf + offset, size - offset,
++              offset += scnprintf(buf + offset, size - offset,
+                               "   State: %s\n",
+                               dpcm_state_string(be->dpcm[stream].state));
+ 
+               if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
+                   (be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
+-                      offset += snprintf(buf + offset, size - offset,
++                      offset += scnprintf(buf + offset, size - offset,
+                               "   Hardware Params: "
+                               "Format = %s, Channels = %d, Rate = %d\n",
+                               snd_pcm_format_name(params_format(params)),
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 08a954582e31..82f3a9d78cab 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1865,12 +1865,12 @@ int kvm_write_guest_cached(struct kvm *kvm, struct 
gfn_to_hva_cache *ghc,
+       if (slots->generation != ghc->generation)
+               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+ 
+-      if (unlikely(!ghc->memslot))
+-              return kvm_write_guest(kvm, ghc->gpa, data, len);
+-
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
+ 
++      if (unlikely(!ghc->memslot))
++              return kvm_write_guest(kvm, ghc->gpa, data, len);
++
+       r = __copy_to_user((void __user *)ghc->hva, data, len);
+       if (r)
+               return -EFAULT;
+@@ -1891,12 +1891,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct 
gfn_to_hva_cache *ghc,
+       if (slots->generation != ghc->generation)
+               kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
+ 
+-      if (unlikely(!ghc->memslot))
+-              return kvm_read_guest(kvm, ghc->gpa, data, len);
+-
+       if (kvm_is_error_hva(ghc->hva))
+               return -EFAULT;
+ 
++      if (unlikely(!ghc->memslot))
++              return kvm_read_guest(kvm, ghc->gpa, data, len);
++
+       r = __copy_from_user(data, (void __user *)ghc->hva, len);
+       if (r)
+               return -EFAULT;

Reply via email to