diff --git a/Makefile b/Makefile
index 2534e51de1db..ce4248f558d1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 17
-SUBLEVEL = 13
+SUBLEVEL = 14
 EXTRAVERSION =
 NAME = Merciless Moray
 
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 414dc7e7c950..041b77692bfa 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -23,7 +23,7 @@
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX       3
+#define UNCORE_EXTRA_PCI_DEV_MAX       4
 
 #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
 
diff --git a/arch/x86/events/intel/uncore_snbep.c 
b/arch/x86/events/intel/uncore_snbep.c
index 77076a102e34..df2d69cb136a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
 enum {
        SNBEP_PCI_QPI_PORT0_FILTER,
        SNBEP_PCI_QPI_PORT1_FILTER,
+       BDX_PCI_QPI_PORT2_FILTER,
        HSWEP_PCI_PCU_3,
 };
 
@@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] 
= {
        },
        { /* QPI Port 0 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  SNBEP_PCI_QPI_PORT0_FILTER),
        },
        { /* QPI Port 1 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  SNBEP_PCI_QPI_PORT1_FILTER),
        },
        { /* QPI Port 2 filter  */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
-               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
+               .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+                                                  BDX_PCI_QPI_PORT2_FILTER),
        },
        { /* PCU.3 (for Capability registers) */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
diff --git a/block/blk-core.c b/block/blk-core.c
index 47ab2d9d02d9..77938b512a71 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2174,11 +2174,12 @@ static inline bool bio_check_ro(struct bio *bio, struct 
hd_struct *part)
        if (part->policy && op_is_write(bio_op(bio))) {
                char b[BDEVNAME_SIZE];
 
-               printk(KERN_ERR
+               WARN_ONCE(1,
                       "generic_make_request: Trying to write "
                        "to read-only block-device %s (partno %d)\n",
                        bio_devname(bio, b), part->partno);
-               return true;
+               /* Older lvm-tools actually trigger this */
+               return false;
        }
 
        return false;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d7267dd9c7bf..6fca5e64cffb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -377,6 +377,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
                goto err_desc;
        }
 
+       reinit_completion(&dma->cmd_complete);
        txdesc->callback = i2c_imx_dma_callback;
        txdesc->callback_param = i2c_imx;
        if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
         * The first byte must be transmitted by the CPU.
         */
        imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
@@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        if (result)
                return result;
 
-       reinit_completion(&i2c_imx->dma->cmd_complete);
        time_left = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 89a4999fa631..c8731568f9c4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                msleep(1000);
 
        qla24xx_disable_vp(vha);
+       qla2x00_wait_for_sess_deletion(vha);
 
        vha->flags.delete_progress = 1;
 
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 3c4c84ed0f0f..21ffbe694acd 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -213,6 +213,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host 
*, fc_port_t *,
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_async_abort_cmd(srb_t *);
 int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
+void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
 
 /*
  * Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cbfbab5d9a59..5ee8730d1d5c 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3712,6 +3712,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t 
*id)
        return rval;
 
 done_free_sp:
+       spin_lock_irqsave(&vha->hw->vport_slock, flags);
+       list_del(&sp->elem);
+       spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+
        if (sp->u.iocb_cmd.u.ctarg.req) {
                dma_free_coherent(&vha->hw->pdev->dev,
                        sizeof(struct ct_sns_pkt),
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0cb552268be3..26da2b286f90 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1518,11 +1518,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, 
uint32_t lun,
 
        wait_for_completion(&tm_iocb->u.tmf.comp);
 
-       rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
-           QLA_SUCCESS : QLA_FUNCTION_FAILED;
+       rval = tm_iocb->u.tmf.data;
 
-       if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
-               ql_dbg(ql_dbg_taskm, vha, 0x8030,
+       if (rval != QLA_SUCCESS) {
+               ql_log(ql_log_warn, vha, 0x8030,
                    "TM IOCB failed (%x).\n", rval);
        }
 
diff --git a/drivers/scsi/qla2xxx/qla_inline.h 
b/drivers/scsi/qla2xxx/qla_inline.h
index 37ae0f6d8ae5..59fd5a9dfeb8 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t 
*fcport, gfp_t flag)
        sp->fcport = fcport;
        sp->iocbs = 1;
        sp->vha = qpair->vha;
+       INIT_LIST_HEAD(&sp->elem);
+
 done:
        if (!sp)
                QLA_QPAIR_MARK_NOT_BUSY(qpair);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 68560a097ae1..bd5ba6acea7a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que 
*rsp, uint16_t *mb)
        unsigned long   flags;
        fc_port_t       *fcport = NULL;
 
+       if (!vha->hw->flags.fw_started)
+               return;
+
        /* Setup to process RIO completion. */
        handle_cnt = 0;
        if (IS_CNA_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d8a36c13aeda..7a50eba9d496 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4212,6 +4212,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct 
req_que *req)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
            "Entered %s.\n", __func__);
 
@@ -4281,6 +4284,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct 
rsp_que *rsp)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
            "Entered %s.\n", __func__);
 
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f6f0a759a7c2..aa727d07b702 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -152,11 +152,18 @@ int
 qla24xx_disable_vp(scsi_qla_host_t *vha)
 {
        unsigned long flags;
-       int ret;
+       int ret = QLA_SUCCESS;
+       fc_port_t *fcport;
+
+       if (vha->hw->flags.fw_started)
+               ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 
-       ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
        atomic_set(&vha->loop_state, LOOP_DOWN);
        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+       list_for_each_entry(fcport, &vha->vp_fcports, list)
+               fcport->logout_on_delete = 0;
+
+       qla2x00_mark_all_devices_lost(vha, 0);
 
        /* Remove port id from vp target map */
        spin_lock_irqsave(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2b0816dfe9bd..88bd730d16f3 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
 static int qla2xxx_map_queues(struct Scsi_Host *shost);
 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
+
 struct scsi_host_template qla2xxx_driver_template = {
        .module                 = THIS_MODULE,
        .name                   = QLA2XXX_DRIVER_NAME,
@@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
  * qla2x00_wait_for_sess_deletion can only be called from remove_one.
  * it has dependency on UNLOADING flag to stop device discovery
  */
-static void
+void
 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
        qla2x00_mark_all_devices_lost(vha, 0);
@@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
+       ql_log(ql_log_info, base_vha, 0xb079,
+           "Removing driver\n");
 
        /* Indicate device removal to prevent future board_disable and wait
         * until any pending board_disable has completed. */
@@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
+       if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+               if (ha->flags.fw_started)
+                       qla2x00_abort_isp_cleanup(base_vha);
+       } else if (!IS_QLAFX00(ha)) {
+               if (IS_QLA8031(ha)) {
+                       ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+                           "Clearing fcoe driver presence.\n");
+                       if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+                               ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+                                   "Error while clearing DRV-Presence.\n");
+               }
+
+               qla2x00_try_to_stop_firmware(base_vha);
+       }
+
        qla2x00_wait_for_sess_deletion(base_vha);
 
        /*
@@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        qla2x00_delete_all_vps(ha, base_vha);
 
-       if (IS_QLA8031(ha)) {
-               ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
-                   "Clearing fcoe driver presence.\n");
-               if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
-                       ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
-                           "Error while clearing DRV-Presence.\n");
-       }
-
        qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
 
        qla2x00_dfs_remove(base_vha);
@@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
                qla2x00_stop_timer(vha);
 
        qla25xx_delete_queues(vha);
-
-       if (ha->flags.fce_enabled)
-               qla2x00_disable_fce_trace(vha, NULL, NULL);
-
-       if (ha->eft)
-               qla2x00_disable_eft_trace(vha);
-
-       if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha)) {
-               if (ha->flags.fw_started)
-                       qla2x00_abort_isp_cleanup(vha);
-       } else {
-               if (ha->flags.fw_started) {
-                       /* Stop currently executing firmware. */
-                       qla2x00_try_to_stop_firmware(vha);
-                       ha->flags.fw_started = 0;
-               }
-       }
-
        vha->flags.online = 0;
 
        /* turn-off interrupts on the card */
@@ -6022,8 +6014,9 @@ qla2x00_do_dpc(void *data)
                                set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
                }
 
-               if (test_and_clear_bit(ISP_ABORT_NEEDED,
-                                               &base_vha->dpc_flags)) {
+               if (test_and_clear_bit
+                   (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+                   !test_bit(UNLOADING, &base_vha->dpc_flags)) {
 
                        ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
                            "ISP abort scheduled.\n");
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 04458eb19d38..4499c787165f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
        if (IS_P3P_TYPE(ha))
                return QLA_SUCCESS;
 
+       if (!ha->flags.fw_started)
+               return QLA_SUCCESS;
+
        ha->beacon_blink_led = 0;
 
        if (IS_QLA2031(ha) || IS_QLA27XX(ha))
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e99b329002cf..47986c0912f0 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4245,6 +4245,7 @@ int try_release_extent_mapping(struct extent_map_tree 
*map,
        struct extent_map *em;
        u64 start = page_offset(page);
        u64 end = start + PAGE_SIZE - 1;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
 
        if (gfpflags_allow_blocking(mask) &&
            page->mapping->host->i_size > SZ_16M) {
@@ -4267,6 +4268,8 @@ int try_release_extent_mapping(struct extent_map_tree 
*map,
                                            extent_map_end(em) - 1,
                                            EXTENT_LOCKED | EXTENT_WRITEBACK,
                                            0, NULL)) {
+                               set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                                       &btrfs_inode->runtime_flags);
                                remove_extent_mapping(map, em);
                                /* once for the rb tree */
                                free_extent_map(em);
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index 395c4c0d0f06..1682a87c00b2 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -115,6 +115,13 @@ struct dinode {
                                        dxd_t _dxd;     /* 16: */
                                        union {
                                                __le32 _rdev;   /* 4: */
+                                               /*
+                                                * The fast symlink area
+                                                * is expected to overflow
+                                                * into _inlineea when
+                                                * needed (which will clear
+                                                * INLINEEA).
+                                                */
                                                u8 _fastsymlink[128];
                                        } _u;
                                        u8 _inlineea[128];
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 1f26d1910409..9940a1e04cbf 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -87,6 +87,7 @@ struct jfs_inode_info {
                struct {
                        unchar _unused[16];     /* 16: */
                        dxd_t _dxd;             /* 16: */
+                       /* _inline may overflow into _inline_ea when needed */
                        unchar _inline[128];    /* 128: inline symlink */
                        /* _inline_ea may overlay the last part of
                         * file._xtroot if maxentry = XTROOTINITSLOT
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 1b9264fd54b6..f08571433aba 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -967,8 +967,7 @@ static int __init init_jfs_fs(void)
        jfs_inode_cachep =
            kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
                        0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
-                       offsetof(struct jfs_inode_info, i_inline),
-                       sizeof_field(struct jfs_inode_info, i_inline),
+                       offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
                        init_once);
        if (jfs_inode_cachep == NULL)
                return -ENOMEM;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index c60f3d32ee91..a6797986b625 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer 
*ea_buf, int min_size)
        if (size > PSIZE) {
                /*
                 * To keep the rest of the code simple.  Allocate a
-                * contiguous buffer to work with
+                * contiguous buffer to work with. Make the buffer large
+                * enough to make use of the whole extent.
                 */
-               ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+               ea_buf->max_size = (size + sb->s_blocksize - 1) &
+                   ~(sb->s_blocksize - 1);
+
+               ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
                if (ea_buf->xattr == NULL)
                        return -ENOMEM;
 
                ea_buf->flag = EA_MALLOC;
-               ea_buf->max_size = (size + sb->s_blocksize - 1) &
-                   ~(sb->s_blocksize - 1);
 
                if (ea_size == 0)
                        return 0;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 2135b8e67dcc..1035c2c97886 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -803,9 +803,8 @@ xfs_attr_shortform_to_leaf(
        ASSERT(blkno == 0);
        error = xfs_attr3_leaf_create(args, blkno, &bp);
        if (error) {
-               error = xfs_da_shrink_inode(args, 0, bp);
-               bp = NULL;
-               if (error)
+               /* xfs_attr3_leaf_create may not have instantiated a block */
+               if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
                        goto out;
                xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
                memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 9a18f69f6e96..817899961f48 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -308,6 +308,46 @@ xfs_reinit_inode(
        return error;
 }
 
+/*
+ * If we are allocating a new inode, then check what was returned is
+ * actually a free, empty inode. If we are not allocating an inode,
+ * then check we didn't find a free inode.
+ *
+ * Returns:
+ *     0               if the inode free state matches the lookup context
+ *     -ENOENT         if the inode is free and we are not allocating
+ *     -EFSCORRUPTED   if there is any state mismatch at all
+ */
+static int
+xfs_iget_check_free_state(
+       struct xfs_inode        *ip,
+       int                     flags)
+{
+       if (flags & XFS_IGET_CREATE) {
+               /* should be a free inode */
+               if (VFS_I(ip)->i_mode != 0) {
+                       xfs_warn(ip->i_mount,
+"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
+                               ip->i_ino, VFS_I(ip)->i_mode);
+                       return -EFSCORRUPTED;
+               }
+
+               if (ip->i_d.di_nblocks != 0) {
+                       xfs_warn(ip->i_mount,
+"Corruption detected! Free inode 0x%llx has blocks allocated!",
+                               ip->i_ino);
+                       return -EFSCORRUPTED;
+               }
+               return 0;
+       }
+
+       /* should be an allocated inode */
+       if (VFS_I(ip)->i_mode == 0)
+               return -ENOENT;
+
+       return 0;
+}
+
 /*
  * Check the validity of the inode we just found it the cache
  */
@@ -357,12 +397,12 @@ xfs_iget_cache_hit(
        }
 
        /*
-        * If lookup is racing with unlink return an error immediately.
+        * Check the inode free state is valid. This also detects lookup
+        * racing with unlinks.
         */
-       if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
-               error = -ENOENT;
+       error = xfs_iget_check_free_state(ip, flags);
+       if (error)
                goto out_error;
-       }
 
        /*
         * If IRECLAIMABLE is set, we've torn down the VFS inode already.
@@ -485,29 +525,12 @@ xfs_iget_cache_miss(
 
 
        /*
-        * If we are allocating a new inode, then check what was returned is
-        * actually a free, empty inode. If we are not allocating an inode,
-        * the check we didn't find a free inode.
+        * Check the inode free state is valid. This also detects lookup
+        * racing with unlinks.
         */
-       if (flags & XFS_IGET_CREATE) {
-               if (VFS_I(ip)->i_mode != 0) {
-                       xfs_warn(mp,
-"Corruption detected! Free inode 0x%llx not marked free on disk",
-                               ino);
-                       error = -EFSCORRUPTED;
-                       goto out_destroy;
-               }
-               if (ip->i_d.di_nblocks != 0) {
-                       xfs_warn(mp,
-"Corruption detected! Free inode 0x%llx has blocks allocated!",
-                               ino);
-                       error = -EFSCORRUPTED;
-                       goto out_destroy;
-               }
-       } else if (VFS_I(ip)->i_mode == 0) {
-               error = -ENOENT;
+       error = xfs_iget_check_free_state(ip, flags);
+       if (error)
                goto out_destroy;
-       }
 
        /*
         * Preload the radix tree so we can insert safely under the
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index a0233edc0718..72341f7c5673 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
 void ring_buffer_record_off(struct ring_buffer *buffer);
 void ring_buffer_record_on(struct ring_buffer *buffer);
 int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
 
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index facfecfc543c..48b70c368f73 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1067,6 +1067,13 @@ static int irq_setup_forced_threading(struct irqaction 
*new)
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
                return 0;
 
+       /*
+        * No further action required for interrupts which are requested as
+        * threaded interrupts already
+        */
+       if (new->handler == irq_default_primary_handler)
+               return 0;
+
        new->flags |= IRQF_ONESHOT;
 
        /*
@@ -1074,7 +1081,7 @@ static int irq_setup_forced_threading(struct irqaction 
*new)
         * thread handler. We force thread them as well by creating a
         * secondary action.
         */
-       if (new->handler != irq_default_primary_handler && new->thread_fn) {
+       if (new->handler && new->thread_fn) {
                /* Allocate the secondary action */
                new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
                if (!new->secondary)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8a040bcaa033..ce4fb0e12504 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -387,7 +387,7 @@ static inline void tick_irq_exit(void)
 
        /* Make sure that timer wheel updates are propagated */
        if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
-               if (!in_interrupt())
+               if (!in_irq())
                        tick_nohz_irq_exit();
        }
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index da9455a6b42b..5b33e2f5c0ed 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -642,7 +642,7 @@ static void tick_nohz_restart(struct tick_sched *ts, 
ktime_t now)
 
 static inline bool local_timer_softirq_pending(void)
 {
-       return local_softirq_pending() & TIMER_SOFTIRQ;
+       return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 }
 
 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c9cb9767d49b..2bf2a6c7c18e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3226,6 +3226,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
        return !atomic_read(&buffer->record_disabled);
 }
 
+/**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set 
writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+       return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
 /**
  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  * @buffer: The ring buffer to stop writes to.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 4e67d0020337..a583b6494b95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1375,6 +1375,12 @@ update_max_tr(struct trace_array *tr, struct task_struct 
*tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
+       /* Inherit the recordable setting from trace_buffer */
+       if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+               ring_buffer_record_on(tr->max_buffer.buffer);
+       else
+               ring_buffer_record_off(tr->max_buffer.buffer);
+
        buf = tr->trace_buffer.buffer;
        tr->trace_buffer.buffer = tr->max_buffer.buffer;
        tr->max_buffer.buffer = buf;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index adc434752d67..13a203157dbe 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1013,8 +1013,8 @@ static int netlink_bind(struct socket *sock, struct 
sockaddr *addr,
 
        if (nlk->ngroups == 0)
                groups = 0;
-       else
-               groups &= (1ULL << nlk->ngroups) - 1;
+       else if (nlk->ngroups < 8*sizeof(groups))
+               groups &= (1UL << nlk->ngroups) - 1;
 
        bound = nlk->bound;
        if (bound) {

Reply via email to