diff --git a/Makefile b/Makefile
index 485afde0f1f1..7f561ef954f2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 14
-SUBLEVEL = 97
+SUBLEVEL = 98
 EXTRAVERSION =
 NAME = Petit Gorille
 
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a..5e11ad3164e0 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
        } else /* remote PCI bus */
                base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
 
-       return base + (where & 0xffc) + (devfn << 12);
+       return base + where + (devfn << 12);
 }
 
 static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index a028cc95afe1..bb444c693796 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
                dcache_clean_range(__idmap_text_start, __idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
-               if (el2_reset_needed())
+               if (el2_reset_needed()) {
                        dcache_clean_range(__hyp_idmap_text_start, 
__hyp_idmap_text_end);
+                       dcache_clean_range(__hyp_text_start, __hyp_text_end);
+               }
 
                /* make the crash dump kernel image protected again */
                crash_post_resume();
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index e1261fbaa374..17f325ba831e 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -28,6 +28,8 @@
 #include <asm/virt.h>
 
        .text
+       .pushsection    .hyp.text, "ax"
+
        .align 11
 
 ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 2bda224e8e71..ae7278286094 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
+       __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
 
        /*
         * Try to map the FDT early. If this fails, we simply bail,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 3f463a61f8cf..fc5bbb2519fe 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1571,6 +1571,8 @@ static void cleanup_glue_dir(struct device *dev, struct 
kobject *glue_dir)
                return;
 
        mutex_lock(&gdp_mutex);
+       if (!kobject_has_children(glue_dir))
+               kobject_del(glue_dir);
        kobject_put(glue_dir);
        mutex_unlock(&gdp_mutex);
 }
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f1314248..7f9e0304b510 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip 
*gc,
 static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
                                            unsigned int nr, int value)
 {
-       if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT))
+       if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) 
{
+               altr_a10sr_gpio_set(gc, nr, value);
                return 0;
+       }
        return -EINVAL;
 }
 
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index a4fd78b9c0e4..e94c34920241 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
  */
 struct pcf857x {
        struct gpio_chip        chip;
+       struct irq_chip         irqchip;
        struct i2c_client       *client;
        struct mutex            lock;           /* protect 'out' */
        unsigned                out;            /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data 
*data)
        mutex_unlock(&gpio->lock);
 }
 
-static struct irq_chip pcf857x_irq_chip = {
-       .name           = "pcf857x",
-       .irq_enable     = pcf857x_irq_enable,
-       .irq_disable    = pcf857x_irq_disable,
-       .irq_ack        = noop,
-       .irq_mask       = noop,
-       .irq_unmask     = noop,
-       .irq_set_wake   = pcf857x_irq_set_wake,
-       .irq_bus_lock           = pcf857x_irq_bus_lock,
-       .irq_bus_sync_unlock    = pcf857x_irq_bus_sync_unlock,
-};
-
 /*-------------------------------------------------------------------------*/
 
 static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
 
        /* Enable irqchip if we have an interrupt */
        if (client->irq) {
+               gpio->irqchip.name = "pcf857x",
+               gpio->irqchip.irq_enable = pcf857x_irq_enable,
+               gpio->irqchip.irq_disable = pcf857x_irq_disable,
+               gpio->irqchip.irq_ack = noop,
+               gpio->irqchip.irq_mask = noop,
+               gpio->irqchip.irq_unmask = noop,
+               gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
+               gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
+               gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
                status = gpiochip_irqchip_add_nested(&gpio->chip,
-                                                    &pcf857x_irq_chip,
+                                                    &gpio->irqchip,
                                                     0, handle_level_irq,
                                                     IRQ_TYPE_NONE);
                if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
                if (status)
                        goto fail;
 
-               gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip,
+               gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
                                            client->irq);
                gpio->irq_parent = client->irq;
        }
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c 
b/drivers/infiniband/hw/hfi1/file_ops.c
index 9abc5a9c47a0..76861a8b5c1e 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -605,7 +605,7 @@ static int hfi1_file_mmap(struct file *fp, struct 
vm_area_struct *vma)
                vmf = 1;
                break;
        case STATUS:
-               if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
+               if (flags & VM_WRITE) {
                        ret = -EPERM;
                        goto done;
                }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 34006354d2eb..802ba7b16e09 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -5210,7 +5210,7 @@ static void intel_iommu_put_resv_regions(struct device 
*dev,
        struct iommu_resv_region *entry, *next;
 
        list_for_each_entry_safe(entry, next, head, list) {
-               if (entry->type == IOMMU_RESV_RESERVED)
+               if (entry->type == IOMMU_RESV_MSI)
                        kfree(entry);
        }
 }
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 0d535b40cb3b..dcef761ab242 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1942,12 +1942,14 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
 }
 
 static struct stripe_head *
-r5c_recovery_alloc_stripe(struct r5conf *conf,
-                         sector_t stripe_sect)
+r5c_recovery_alloc_stripe(
+               struct r5conf *conf,
+               sector_t stripe_sect,
+               int noblock)
 {
        struct stripe_head *sh;
 
-       sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+       sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
        if (!sh)
                return NULL;  /* no more stripe available */
 
@@ -2157,7 +2159,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                                stripe_sect);
 
                if (!sh) {
-                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
+                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
                        /*
                         * cannot get stripe from raid5_get_active_stripe
                         * try replay some stripes
@@ -2166,20 +2168,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                r5c_recovery_replay_stripes(
                                        cached_stripe_list, ctx);
                                sh = r5c_recovery_alloc_stripe(
-                                       conf, stripe_sect);
+                                       conf, stripe_sect, 1);
                        }
                        if (!sh) {
+                               int new_size = conf->min_nr_stripes * 2;
                                pr_debug("md/raid:%s: Increasing stripe cache 
size to %d to recovery data on journal.\n",
                                        mdname(mddev),
-                                       conf->min_nr_stripes * 2);
-                               raid5_set_cache_size(mddev,
-                                                    conf->min_nr_stripes * 2);
-                               sh = r5c_recovery_alloc_stripe(conf,
-                                                              stripe_sect);
+                                       new_size);
+                               ret = raid5_set_cache_size(mddev, new_size);
+                               if (conf->min_nr_stripes <= new_size / 2) {
+                                       pr_err("md/raid:%s: Cannot increase 
cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
+                                               mdname(mddev),
+                                               ret,
+                                               new_size,
+                                               conf->min_nr_stripes,
+                                               conf->max_nr_stripes);
+                                       return -ENOMEM;
+                               }
+                               sh = r5c_recovery_alloc_stripe(
+                                       conf, stripe_sect, 0);
                        }
                        if (!sh) {
                                pr_err("md/raid:%s: Cannot get enough stripes 
due to memory pressure. Recovery failed.\n",
-                                      mdname(mddev));
+                                       mdname(mddev));
                                return -ENOMEM;
                        }
                        list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dbf51b4c21b3..7dbb74cd506a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6336,6 +6336,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char 
*page)
 int
 raid5_set_cache_size(struct mddev *mddev, int size)
 {
+       int result = 0;
        struct r5conf *conf = mddev->private;
 
        if (size <= 16 || size > 32768)
@@ -6352,11 +6353,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
 
        mutex_lock(&conf->cache_size_mutex);
        while (size > conf->max_nr_stripes)
-               if (!grow_one_stripe(conf, GFP_KERNEL))
+               if (!grow_one_stripe(conf, GFP_KERNEL)) {
+                       conf->min_nr_stripes = conf->max_nr_stripes;
+                       result = -ENOMEM;
                        break;
+               }
        mutex_unlock(&conf->cache_size_mutex);
 
-       return 0;
+       return result;
 }
 EXPORT_SYMBOL(raid5_set_cache_size);
 
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 768972af8b85..0d3b7473bc21 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1427,6 +1427,8 @@ static int bcm2835_probe(struct platform_device *pdev)
 
 err:
        dev_dbg(dev, "%s -> err %d\n", __func__, ret);
+       if (host->dma_chan_rxtx)
+               dma_release_channel(host->dma_chan_rxtx);
        mmc_free_host(mmc);
 
        return ret;
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0cfbdb3ab68a..cecffcbd3ca8 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -278,7 +278,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
 
        iproc_host->data = iproc_data;
 
-       mmc_of_parse(host->mmc);
+       ret = mmc_of_parse(host->mmc);
+       if (ret)
+               goto err;
+
        sdhci_get_of_property(pdev);
 
        host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c 
b/drivers/net/ethernet/freescale/ucc_geth.c
index f77ba9fa257b..94df1ddc5dcb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private 
*ugeth)
        u16 i, j;
        u8 __iomem *bd;
 
+       netdev_reset_queue(ugeth->ndev);
+
        ug_info = ugeth->ug_info;
        uf_info = &ug_info->uf_info;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c 
b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 16c09949afd5..7440c769b30f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2048,9 +2048,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
 {
        struct mlx4_cmd_mailbox *mailbox;
        __be32 *outbox;
+       u64 qword_field;
        u32 dword_field;
-       int err;
+       u16 word_field;
        u8 byte_field;
+       int err;
        static const u8 a0_dmfs_query_hw_steering[] =  {
                [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
                [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2078,19 +2080,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
 
        /* QPC/EEC/CQC/EQC/RDMARC attributes */
 
-       MLX4_GET(param->qpc_base,      outbox, INIT_HCA_QPC_BASE_OFFSET);
-       MLX4_GET(param->log_num_qps,   outbox, INIT_HCA_LOG_QP_OFFSET);
-       MLX4_GET(param->srqc_base,     outbox, INIT_HCA_SRQC_BASE_OFFSET);
-       MLX4_GET(param->log_num_srqs,  outbox, INIT_HCA_LOG_SRQ_OFFSET);
-       MLX4_GET(param->cqc_base,      outbox, INIT_HCA_CQC_BASE_OFFSET);
-       MLX4_GET(param->log_num_cqs,   outbox, INIT_HCA_LOG_CQ_OFFSET);
-       MLX4_GET(param->altc_base,     outbox, INIT_HCA_ALTC_BASE_OFFSET);
-       MLX4_GET(param->auxc_base,     outbox, INIT_HCA_AUXC_BASE_OFFSET);
-       MLX4_GET(param->eqc_base,      outbox, INIT_HCA_EQC_BASE_OFFSET);
-       MLX4_GET(param->log_num_eqs,   outbox, INIT_HCA_LOG_EQ_OFFSET);
-       MLX4_GET(param->num_sys_eqs,   outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
-       MLX4_GET(param->rdmarc_base,   outbox, INIT_HCA_RDMARC_BASE_OFFSET);
-       MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+       MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
+       param->qpc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
+       param->log_num_qps = byte_field & 0x1f;
+       MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+       param->srqc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+       param->log_num_srqs = byte_field & 0x1f;
+       MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
+       param->cqc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
+       param->log_num_cqs = byte_field & 0x1f;
+       MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+       param->altc_base = qword_field;
+       MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+       param->auxc_base = qword_field;
+       MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
+       param->eqc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
+       param->log_num_eqs = byte_field & 0x1f;
+       MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+       param->num_sys_eqs = word_field & 0xfff;
+       MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+       param->rdmarc_base = qword_field & ~((u64)0x1f);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
+       param->log_rd_per_qp = byte_field & 0x7;
 
        MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
        if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2109,22 +2124,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
        /* steering attributes */
        if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
                MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
-               MLX4_GET(param->log_mc_entry_sz, outbox,
-                        INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
-               MLX4_GET(param->log_mc_table_sz, outbox,
-                        INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
-               MLX4_GET(byte_field, outbox,
-                        INIT_HCA_FS_A0_OFFSET);
+               MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+               param->log_mc_entry_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+               param->log_mc_table_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
                param->dmfs_high_steer_mode =
                        a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
        } else {
                MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
-               MLX4_GET(param->log_mc_entry_sz, outbox,
-                        INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
-               MLX4_GET(param->log_mc_hash_sz,  outbox,
-                        INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
-               MLX4_GET(param->log_mc_table_sz, outbox,
-                        INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+               MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+               param->log_mc_entry_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field,  outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+               param->log_mc_hash_sz = byte_field & 0x1f;
+               MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+               param->log_mc_table_sz = byte_field & 0x1f;
        }
 
        /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2148,15 +2162,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
        /* TPT attributes */
 
        MLX4_GET(param->dmpt_base,  outbox, INIT_HCA_DMPT_BASE_OFFSET);
-       MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
-       MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
+       param->mw_enabled = byte_field >> 7;
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+       param->log_mpt_sz = byte_field & 0x3f;
        MLX4_GET(param->mtt_base,   outbox, INIT_HCA_MTT_BASE_OFFSET);
        MLX4_GET(param->cmpt_base,  outbox, INIT_HCA_CMPT_BASE_OFFSET);
 
        /* UAR attributes */
 
        MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
-       MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+       MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+       param->log_uar_sz = byte_field & 0xf;
 
        /* phv_check enable */
        MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c 
b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1af9894abd95..2f93e6e9dc9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1126,13 +1126,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch 
*esw,
        int err = 0;
        u8 *smac_v;
 
-       if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
-               mlx5_core_warn(esw->dev,
-                              "vport[%d] configure ingress rules failed, 
illegal mac with spoofchk\n",
-                              vport->vport);
-               return -EPERM;
-       }
-
        esw_vport_cleanup_ingress_rules(esw, vport);
 
        if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1614,7 +1607,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
        int vport_num;
        int err;
 
-       if (!MLX5_ESWITCH_MANAGER(dev))
+       if (!MLX5_VPORT_MANAGER(dev))
                return 0;
 
        esw_info(dev,
@@ -1687,7 +1680,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 
 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 {
-       if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
+       if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
                return;
 
        esw_info(esw->dev, "cleanup\n");
@@ -1734,13 +1727,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        mutex_lock(&esw->state_lock);
        evport = &esw->vports[vport];
 
-       if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
+       if (evport->info.spoofchk && !is_valid_ether_addr(mac))
                mlx5_core_warn(esw->dev,
-                              "MAC invalidation is not allowed when spoofchk 
is on, vport(%d)\n",
+                              "Set invalid MAC while spoofchk is on, 
vport(%d)\n",
                               vport);
-               err = -EPERM;
-               goto unlock;
-       }
 
        err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
        if (err) {
@@ -1886,6 +1876,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch 
*esw,
        evport = &esw->vports[vport];
        pschk = evport->info.spoofchk;
        evport->info.spoofchk = spoofchk;
+       if (pschk && !is_valid_ether_addr(evport->info.mac))
+               mlx5_core_warn(esw->dev,
+                              "Spoofchk in set while MAC is invalid, 
vport(%d)\n",
+                              evport->vport);
        if (evport->enabled && esw->mode == SRIOV_LEGACY)
                err = esw_vport_ingress_config(esw, evport);
        if (err)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 58133c9f701b..2222ed63d055 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -95,12 +95,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 
nval)
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
                        if (!err) {
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
-                               mdev->priv_flags |= IFF_L3MDEV_MASTER;
+                               mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
                        } else
                                goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
-                       mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+                       mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
@@ -172,7 +172,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
 
        dev->priv_flags &= ~IFF_IPVLAN_MASTER;
        if (port->mode == IPVLAN_MODE_L3S) {
-               dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+               dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
                ipvlan_unregister_nf_hook(dev_net(dev));
                dev->l3mdev_ops = NULL;
        }
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 215696f21d67..0b457c81c448 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1149,6 +1149,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
        u64_stats_update_end(&stats->tx_syncp);
 }
 
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+               return false;
+       else if (q < vi->curr_queue_pairs)
+               return true;
+       else
+               return false;
+}
+
 static void virtnet_poll_cleantx(struct receive_queue *rq)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1156,7 +1166,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
        struct send_queue *sq = &vi->sq[index];
        struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
 
-       if (!sq->napi.weight)
+       if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
                return;
 
        if (__netif_tx_trylock(txq)) {
@@ -1206,8 +1216,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int 
budget)
 {
        struct send_queue *sq = container_of(napi, struct send_queue, napi);
        struct virtnet_info *vi = sq->vq->vdev->priv;
-       struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+       unsigned int index = vq2txq(sq->vq);
+       struct netdev_queue *txq;
 
+       if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+               /* We don't need to enable cb for XDP */
+               napi_complete_done(napi, 0);
+               return 0;
+       }
+
+       txq = netdev_get_tx_queue(vi->dev, index);
        __netif_tx_lock(txq, raw_smp_processor_id());
        free_old_xmit_skbs(sq);
        __netif_tx_unlock(txq);
@@ -2006,14 +2024,17 @@ static int virtnet_xdp_set(struct net_device *dev, 
struct bpf_prog *prog,
        }
 
        /* Make sure NAPI is not using any XDP TX queues for RX. */
-       if (netif_running(dev))
-               for (i = 0; i < vi->max_queue_pairs; i++)
+       if (netif_running(dev)) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
                        napi_disable(&vi->rq[i].napi);
+                       virtnet_napi_tx_disable(&vi->sq[i].napi);
+               }
+       }
 
-       netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
        if (err)
                goto err;
+       netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
        vi->xdp_queue_pairs = xdp_qp;
 
        for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -2027,15 +2048,23 @@ static int virtnet_xdp_set(struct net_device *dev, 
struct bpf_prog *prog,
                }
                if (old_prog)
                        bpf_prog_put(old_prog);
-               if (netif_running(dev))
+               if (netif_running(dev)) {
                        virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+                                              &vi->sq[i].napi);
+               }
        }
 
        return 0;
 
 err:
-       for (i = 0; i < vi->max_queue_pairs; i++)
-               virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+       if (netif_running(dev)) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+                       virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+                                              &vi->sq[i].napi);
+               }
+       }
        if (prog)
                bpf_prog_sub(prog, vi->max_queue_pairs - 1);
        return err;
@@ -2176,16 +2205,6 @@ static void free_receive_page_frags(struct virtnet_info 
*vi)
                        put_page(vi->rq[i].alloc_frag.page);
 }
 
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-               return false;
-       else if (q < vi->curr_queue_pairs)
-               return true;
-       else
-               return false;
-}
-
 static void free_unused_bufs(struct virtnet_info *vi)
 {
        void *buf;
diff --git a/drivers/platform/x86/asus-nb-wmi.c 
b/drivers/platform/x86/asus-nb-wmi.c
index a6a33327f5e7..9c4b0d7f15c3 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -433,8 +433,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x30, { KEY_VOLUMEUP } },
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        { KE_KEY, 0x32, { KEY_MUTE } },
-       { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
-       { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
+       { KE_KEY, 0x35, { KEY_SCREENLOCK } },
        { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
        { KE_KEY, 0x41, { KEY_NEXTSONG } },
        { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 000b7bfa8cf0..48aa854c564a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -51,6 +51,7 @@
 #include "cifs_unicode.h"
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
+#include "dns_resolve.h"
 #include "ntlmssp.h"
 #include "nterr.h"
 #include "rfc1002pdu.h"
@@ -313,6 +314,53 @@ static void cifs_prune_tlinks(struct work_struct *work);
 static int cifs_setup_volume_info(struct smb_vol *volume_info, char 
*mount_data,
                                        const char *devname);
 
+/*
+ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
+ * get their ip addresses changed at some point.
+ *
+ * This should be called with server->srv_mutex held.
+ */
+#ifdef CONFIG_CIFS_DFS_UPCALL
+static int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+       int rc;
+       int len;
+       char *unc, *ipaddr = NULL;
+
+       if (!server->hostname)
+               return -EINVAL;
+
+       len = strlen(server->hostname) + 3;
+
+       unc = kmalloc(len, GFP_KERNEL);
+       if (!unc) {
+               cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
+               return -ENOMEM;
+       }
+       snprintf(unc, len, "\\\\%s", server->hostname);
+
+       rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+       kfree(unc);
+
+       if (rc < 0) {
+               cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: 
%d\n",
+                        __func__, server->hostname, rc);
+               return rc;
+       }
+
+       rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
+                                 strlen(ipaddr));
+       kfree(ipaddr);
+
+       return !rc ? -1 : 0;
+}
+#else
+static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+       return 0;
+}
+#endif
+
 /*
  * cifs tcp session reconnection
  *
@@ -408,6 +456,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
                rc = generic_ip_connect(server);
                if (rc) {
                        cifs_dbg(FYI, "reconnect error %d\n", rc);
+                       rc = reconn_set_ipaddr(server);
+                       if (rc) {
+                               cifs_dbg(FYI, "%s: failed to resolve hostname: 
%d\n",
+                                        __func__, rc);
+                       }
                        mutex_unlock(&server->srv_mutex);
                        msleep(3000);
                } else {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c0f8087d9819..fd2d199dd413 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -3071,8 +3071,8 @@ SMB2_query_directory(const unsigned int xid, struct 
cifs_tcon *tcon,
                    rsp->hdr.sync_hdr.Status == STATUS_NO_MORE_FILES) {
                        srch_inf->endOfSearch = true;
                        rc = 0;
-               }
-               cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+               } else
+                       cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
                goto qdir_exit;
        }
 
diff --git a/fs/dcache.c b/fs/dcache.c
index 28b2e770bb69..9ac1290ae44f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1183,15 +1183,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct 
list_head *item,
  */
 void shrink_dcache_sb(struct super_block *sb)
 {
-       long freed;
-
        do {
                LIST_HEAD(dispose);
 
-               freed = list_lru_walk(&sb->s_dentry_lru,
+               list_lru_walk(&sb->s_dentry_lru,
                        dentry_lru_isolate_shrink, &dispose, 1024);
-
-               this_cpu_sub(nr_dentry_unused, freed);
                shrink_dentry_list(&dispose);
                cond_resched();
        } while (list_lru_count(&sb->s_dentry_lru) > 0);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 914cb3d72ddf..b0eee90738ff 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1695,9 +1695,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, 
u32 *minext,
                        goto next_iter;
                }
                if (ret == -E2BIG) {
-                       n += rbm->bii - initial_bii;
                        rbm->bii = 0;
                        rbm->offset = 0;
+                       n += (rbm->bii - initial_bii);
                        goto res_covered_end_of_rgrp;
                }
                return ret;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 19e6ea89ad26..2d956a7d5378 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -618,11 +618,12 @@ static int nfs_page_async_flush(struct 
nfs_pageio_descriptor *pgio,
        nfs_set_page_writeback(page);
        WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
 
-       ret = 0;
+       ret = req->wb_context->error;
        /* If there is a fatal error that covers this write, just exit */
-       if (nfs_error_is_fatal_on_server(req->wb_context->error))
+       if (nfs_error_is_fatal_on_server(ret))
                goto out_launder;
 
+       ret = 0;
        if (!nfs_pageio_add_request(pgio, req)) {
                ret = pgio->pg_error;
                /*
@@ -632,9 +633,9 @@ static int nfs_page_async_flush(struct 
nfs_pageio_descriptor *pgio,
                        nfs_context_set_write_error(req->wb_context, ret);
                        if (nfs_error_is_fatal_on_server(ret))
                                goto out_launder;
-               }
+               } else
+                       ret = -EAGAIN;
                nfs_redirty_request(req);
-               ret = -EAGAIN;
        } else
                nfs_add_stats(page_file_mapping(page)->host,
                                NFSIOS_WRITEPAGES, 1);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 2bc61e7543dd..506da82ff3f1 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -158,9 +158,9 @@ int __fsnotify_parent(const struct path *path, struct 
dentry *dentry, __u32 mask
        parent = dget_parent(dentry);
        p_inode = parent->d_inode;
 
-       if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+       if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
                __fsnotify_update_child_dentry_flags(p_inode);
-       else if (p_inode->i_fsnotify_mask & mask) {
+       } else if (p_inode->i_fsnotify_mask & mask & ~FS_EVENT_ON_CHILD) {
                struct name_snapshot name;
 
                /* we are notifying a parent so come up with the new mask which
@@ -264,6 +264,10 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void 
*data, int data_is,
        else
                mnt = NULL;
 
+       /* An event "on child" is not intended for a mount mark */
+       if (mask & FS_EVENT_ON_CHILD)
+               mnt = NULL;
+
        /*
         * Optimization: srcu_read_lock() has a memory barrier which can
         * be expensive.  It protects walking the *_fsnotify_marks lists.
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e0a6205caa71..e232df1d9db2 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -117,6 +117,23 @@ extern void kobject_put(struct kobject *kobj);
 extern const void *kobject_namespace(struct kobject *kobj);
 extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
 
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+       WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+       return kobj->sd && kobj->sd->dir.subdirs;
+}
+
 struct kobj_type {
        void (*release)(struct kobject *kobj);
        const struct sysfs_ops *sysfs_ops;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a516dbe5869f..40b830d55fe5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1356,6 +1356,7 @@ struct net_device_ops {
  * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
  *     entity (i.e. the master device for bridged veth)
  * @IFF_MACSEC: device is a MACsec device
+ * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
  */
 enum netdev_priv_flags {
        IFF_802_1Q_VLAN                 = 1<<0,
@@ -1386,6 +1387,7 @@ enum netdev_priv_flags {
        IFF_RXFH_CONFIGURED             = 1<<25,
        IFF_PHONY_HEADROOM              = 1<<26,
        IFF_MACSEC                      = 1<<27,
+       IFF_L3MDEV_RX_HANDLER           = 1<<28,
 };
 
 #define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
@@ -1415,6 +1417,7 @@ enum netdev_priv_flags {
 #define IFF_TEAM                       IFF_TEAM
 #define IFF_RXFH_CONFIGURED            IFF_RXFH_CONFIGURED
 #define IFF_MACSEC                     IFF_MACSEC
+#define IFF_L3MDEV_RX_HANDLER          IFF_L3MDEV_RX_HANDLER
 
 /**
  *     struct net_device - The DEVICE structure.
@@ -4206,6 +4209,11 @@ static inline bool netif_supports_nofcs(struct 
net_device *dev)
        return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
+{
+       return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
+}
+
 static inline bool netif_is_l3_master(const struct net_device *dev)
 {
        return dev->priv_flags & IFF_L3MDEV_MASTER;
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index ec912d01126f..ecdc6542070f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_HUGE_ZERO_PAGE     23      /* mm has ever used the global huge 
zero page */
 #define MMF_DISABLE_THP                24      /* disable THP for all VMAs */
 #define MMF_OOM_VICTIM         25      /* mm is the oom victim */
+#define MMF_OOM_REAP_QUEUED    26      /* mm was queued for oom_reaper */
 #define MMF_DISABLE_THP_MASK   (1 << MMF_DISABLE_THP)
 
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 3832099289c5..128487658ff7 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -142,7 +142,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 
proto)
 
        if (netif_is_l3_slave(skb->dev))
                master = netdev_master_upper_dev_get_rcu(skb->dev);
-       else if (netif_is_l3_master(skb->dev))
+       else if (netif_is_l3_master(skb->dev) ||
+                netif_has_l3_rx_handler(skb->dev))
                master = skb->dev;
 
        if (master && master->l3mdev_ops->l3mdev_l3_rcv)
diff --git a/kernel/exit.c b/kernel/exit.c
index e3a08761eb40..3aa01b74c1e3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -557,12 +557,14 @@ static struct task_struct *find_alive_thread(struct 
task_struct *p)
        return NULL;
 }
 
-static struct task_struct *find_child_reaper(struct task_struct *father)
+static struct task_struct *find_child_reaper(struct task_struct *father,
+                                               struct list_head *dead)
        __releases(&tasklist_lock)
        __acquires(&tasklist_lock)
 {
        struct pid_namespace *pid_ns = task_active_pid_ns(father);
        struct task_struct *reaper = pid_ns->child_reaper;
+       struct task_struct *p, *n;
 
        if (likely(reaper != father))
                return reaper;
@@ -578,6 +580,12 @@ static struct task_struct *find_child_reaper(struct 
task_struct *father)
                panic("Attempted to kill init! exitcode=0x%08x\n",
                        father->signal->group_exit_code ?: father->exit_code);
        }
+
+       list_for_each_entry_safe(p, n, dead, ptrace_entry) {
+               list_del_init(&p->ptrace_entry);
+               release_task(p);
+       }
+
        zap_pid_ns_processes(pid_ns);
        write_lock_irq(&tasklist_lock);
 
@@ -667,7 +675,7 @@ static void forget_original_parent(struct task_struct 
*father,
                exit_ptrace(father, dead);
 
        /* Can drop and reacquire tasklist_lock */
-       reaper = find_child_reaper(father);
+       reaper = find_child_reaper(father, dead);
        if (list_empty(&father->children))
                return;
 
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 345e69d88b37..ef080fa682a6 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -339,7 +339,8 @@ static void kill_procs(struct list_head *to_kill, int 
forcekill, int trapno,
                        if (fail || tk->addr_valid == 0) {
                                pr_err("Memory failure: %#lx: forcibly killing 
%s:%d because of failure to unmap corrupted page\n",
                                       pfn, tk->tsk->comm, tk->tsk->pid);
-                               force_sig(SIGKILL, tk->tsk);
+                               do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
+                                                tk->tsk, PIDTYPE_PID);
                        }
 
                        /*
diff --git a/mm/migrate.c b/mm/migrate.c
index cbb025239071..8c57cdd77ba5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1106,10 +1106,13 @@ static int __unmap_and_move(struct page *page, struct 
page *newpage,
         * If migration is successful, decrease refcount of the newpage
         * which will not free the page because new page owner increased
         * refcounter. As well, if it is LRU page, add the page to LRU
-        * list in here.
+        * list in here. Use the old state of the isolated source page to
+        * determine if we migrated a LRU page. newpage was already unlocked
+        * and possibly modified by its owner - don't rely on the page
+        * state.
         */
        if (rc == MIGRATEPAGE_SUCCESS) {
-               if (unlikely(__PageMovable(newpage)))
+               if (unlikely(!is_lru))
                        put_page(newpage);
                else
                        putback_lru_page(newpage);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 58977f634ced..fe0aac2348e5 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -628,8 +628,8 @@ static void wake_oom_reaper(struct task_struct *tsk)
        if (!oom_reaper_th)
                return;
 
-       /* tsk is already queued? */
-       if (tsk == oom_reaper_list || tsk->oom_reaper_list)
+       /* mm is already queued? */
+       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
                return;
 
        get_task_struct(tsk);
@@ -870,6 +870,13 @@ static void oom_kill_process(struct oom_control *oc, const 
char *message)
         * still freeing memory.
         */
        read_lock(&tasklist_lock);
+
+       /*
+        * The task 'p' might have already exited before reaching here. The
+        * put_task_struct() will free task_struct 'p' while the loop still try
+        * to access the field of 'p', so, get an extra reference.
+        */
+       get_task_struct(p);
        for_each_thread(p, t) {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
@@ -889,6 +896,7 @@ static void oom_kill_process(struct oom_control *oc, const 
char *message)
                        }
                }
        }
+       put_task_struct(p);
        read_unlock(&tasklist_lock);
 
        p = find_lock_task_mm(victim);
diff --git a/net/core/dev.c b/net/core/dev.c
index 4337450a5fdb..54ba5b5bc55c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7748,6 +7748,9 @@ int init_dummy_netdev(struct net_device *dev)
        set_bit(__LINK_STATE_PRESENT, &dev->state);
        set_bit(__LINK_STATE_START, &dev->state);
 
+       /* napi_busy_loop stats accounting wants this */
+       dev_net_set(dev, &init_net);
+
        /* Note : We dont allocate pcpu_refcnt for dummy devices,
         * because users of this 'device' dont need to change
         * its refcount.
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index f8bbd693c19c..d95b32af4a0e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -425,6 +425,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
         * fragment.
         */
 
+       err = -EINVAL;
        /* Find out where to put this fragment.  */
        prev_tail = qp->q.fragments_tail;
        if (!prev_tail)
@@ -501,7 +502,6 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff 
*skb)
 
 discard_qp:
        inet_frag_kill(&qp->q);
-       err = -EINVAL;
        __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
 err:
        kfree_skb(skb);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b1ed9254a4b6..9552e0b08f45 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -370,6 +370,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, 
int addr_len)
                                        err = -EINVAL;
                                        goto out_unlock;
                                }
+                       }
+
+                       if (sk->sk_bound_dev_if) {
                                dev = dev_get_by_index_rcu(net, 
sk->sk_bound_dev_if);
                                if (!dev) {
                                        err = -ENODEV;
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index cf9342bfe95a..de4c9826c1ce 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -126,6 +126,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct 
ipv6_sr_hdr *osrh, int proto)
        } else {
                ip6_flow_hdr(hdr, 0, 0);
                hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
+
+               memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
        }
 
        hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 33ea389ee015..e494f04819e9 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
 #define L2TP_SLFLAG_S     0x40000000
 #define L2TP_SL_SEQ_MASK   0x00ffffff
 
-#define L2TP_HDR_SIZE_SEQ              10
-#define L2TP_HDR_SIZE_NOSEQ            6
+#define L2TP_HDR_SIZE_MAX              14
 
 /* Default trace flags */
 #define L2TP_DEFAULT_DEBUG_FLAGS       0
@@ -759,11 +758,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct 
sk_buff *skb,
                                 "%s: recv data ns=%u, session nr=%u\n",
                                 session->name, ns, session->nr);
                }
+               ptr += 4;
        }
 
-       /* Advance past L2-specific header, if present */
-       ptr += session->l2specific_len;
-
        if (L2TP_SKB_CB(skb)->has_seq) {
                /* Received a packet with sequence numbers. If we're the LNS,
                 * check if we sre sending sequence numbers and if not,
@@ -907,7 +904,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, 
struct sk_buff *skb,
        __skb_pull(skb, sizeof(struct udphdr));
 
        /* Short packet? */
-       if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+       if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
                l2tp_info(tunnel, L2TP_MSG_DATA,
                          "%s: recv short packet (len=%d)\n",
                          tunnel->name, skb->len);
@@ -986,6 +983,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, 
struct sk_buff *skb,
                goto error;
        }
 
+       if (tunnel->version == L2TP_HDR_VER_3 &&
+           l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+               goto error;
+
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, 
payload_hook);
        l2tp_session_dec_refcount(session);
 
@@ -1085,21 +1086,20 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session 
*session, void *buf)
                memcpy(bufp, &session->cookie[0], session->cookie_len);
                bufp += session->cookie_len;
        }
-       if (session->l2specific_len) {
-               if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
-                       u32 l2h = 0;
-                       if (session->send_seq) {
-                               l2h = 0x40000000 | session->ns;
-                               session->ns++;
-                               session->ns &= 0xffffff;
-                               l2tp_dbg(session, L2TP_MSG_SEQ,
-                                        "%s: updated ns to %u\n",
-                                        session->name, session->ns);
-                       }
+       if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+               u32 l2h = 0;
 
-                       *((__be32 *) bufp) = htonl(l2h);
+               if (session->send_seq) {
+                       l2h = 0x40000000 | session->ns;
+                       session->ns++;
+                       session->ns &= 0xffffff;
+                       l2tp_dbg(session, L2TP_MSG_SEQ,
+                                "%s: updated ns to %u\n",
+                                session->name, session->ns);
                }
-               bufp += session->l2specific_len;
+
+               *((__be32 *)bufp) = htonl(l2h);
+               bufp += 4;
        }
 
        return bufp - optr;
@@ -1765,7 +1765,7 @@ int l2tp_session_delete(struct l2tp_session *session)
 EXPORT_SYMBOL_GPL(l2tp_session_delete);
 
 /* We come here whenever a session's send_seq, cookie_len or
- * l2specific_len parameters are set.
+ * l2specific_type parameters are set.
  */
 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 {
@@ -1774,7 +1774,8 @@ void l2tp_session_set_header_len(struct l2tp_session 
*session, int version)
                if (session->send_seq)
                        session->hdr_len += 4;
        } else {
-               session->hdr_len = 4 + session->cookie_len + 
session->l2specific_len;
+               session->hdr_len = 4 + session->cookie_len;
+               session->hdr_len += l2tp_get_l2specific_len(session);
                if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
                        session->hdr_len += 4;
        }
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 0a58c0754526..62598ee7b2e7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -320,6 +320,37 @@ do {                                                       
                \
 #define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
 #endif
 
+static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
+{
+       switch (session->l2specific_type) {
+       case L2TP_L2SPECTYPE_DEFAULT:
+               return 4;
+       case L2TP_L2SPECTYPE_NONE:
+       default:
+               return 0;
+       }
+}
+
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, 
struct sk_buff *skb,
+                                              unsigned char **ptr, unsigned 
char **optr)
+{
+       int opt_len = session->peer_cookie_len + 
l2tp_get_l2specific_len(session);
+
+       if (opt_len > 0) {
+               int off = *ptr - *optr;
+
+               if (!pskb_may_pull(skb, off + opt_len))
+                       return -1;
+
+               if (skb->data != *optr) {
+                       *optr = skb->data;
+                       *ptr = skb->data + off;
+               }
+       }
+
+       return 0;
+}
+
 #define l2tp_printk(ptr, type, func, fmt, ...)                         \
 do {                                                                   \
        if (((ptr)->debug) & (type))                                    \
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index e4280b6568b4..f7880becc165 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
        }
 
+       if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+               goto discard_sess;
+
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, 
tunnel->recv_payload_hook);
        l2tp_session_dec_refcount(session);
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 8bcaa975b432..3c77507601c7 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
        }
 
+       if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+               goto discard_sess;
+
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
                         tunnel->recv_payload_hook);
        l2tp_session_dec_refcount(session);
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 94d05806a9a2..f0ecaec1ff3d 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
 {
        struct nr_sock *nr = nr_sk(sk);
 
-       mod_timer(&nr->t1timer, jiffies + nr->t1);
+       sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
 }
 
 void nr_start_t2timer(struct sock *sk)
 {
        struct nr_sock *nr = nr_sk(sk);
 
-       mod_timer(&nr->t2timer, jiffies + nr->t2);
+       sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
 }
 
 void nr_start_t4timer(struct sock *sk)
 {
        struct nr_sock *nr = nr_sk(sk);
 
-       mod_timer(&nr->t4timer, jiffies + nr->t4);
+       sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
 }
 
 void nr_start_idletimer(struct sock *sk)
@@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
        struct nr_sock *nr = nr_sk(sk);
 
        if (nr->idle > 0)
-               mod_timer(&nr->idletimer, jiffies + nr->idle);
+               sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
 }
 
 void nr_start_heartbeat(struct sock *sk)
 {
-       mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
 }
 
 void nr_stop_t1timer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->t1timer);
+       sk_stop_timer(sk, &nr_sk(sk)->t1timer);
 }
 
 void nr_stop_t2timer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->t2timer);
+       sk_stop_timer(sk, &nr_sk(sk)->t2timer);
 }
 
 void nr_stop_t4timer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->t4timer);
+       sk_stop_timer(sk, &nr_sk(sk)->t4timer);
 }
 
 void nr_stop_idletimer(struct sock *sk)
 {
-       del_timer(&nr_sk(sk)->idletimer);
+       sk_stop_timer(sk, &nr_sk(sk)->idletimer);
 }
 
 void nr_stop_heartbeat(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 }
 
 int nr_t1timer_running(struct sock *sk)
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 452bbb38d943..2741abec7ee7 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev)
 
 /*
  *     Route a frame to an appropriate AX.25 connection.
+ *     A NULL ax25_cb indicates an internally generated frame.
  */
 int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
 {
@@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
 
        if (skb->len < ROSE_MIN_LEN)
                return res;
+
+       if (!ax25)
+               return rose_loopback_queue(skb, NULL);
+
        frametype = skb->data[2];
        lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
        if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 61273534ae10..71534bd4e77c 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -360,9 +360,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
        struct sctp_strreset_outreq *outreq = param.v;
        struct sctp_stream *stream = &asoc->stream;
        __u32 result = SCTP_STRRESET_DENIED;
-       __u16 i, nums, flags = 0;
        __be16 *str_p = NULL;
        __u32 request_seq;
+       __u16 i, nums;
 
        request_seq = ntohl(outreq->request_seq);
 
@@ -390,6 +390,15 @@ struct sctp_chunk *sctp_process_strreset_outreq(
        if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
                goto out;
 
+       nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
+       str_p = outreq->list_of_streams;
+       for (i = 0; i < nums; i++) {
+               if (ntohs(str_p[i]) >= stream->incnt) {
+                       result = SCTP_STRRESET_ERR_WRONG_SSN;
+                       goto out;
+               }
+       }
+
        if (asoc->strreset_chunk) {
                if (!sctp_chunk_lookup_strreset_param(
                                asoc, outreq->response_seq,
@@ -412,32 +421,19 @@ struct sctp_chunk *sctp_process_strreset_outreq(
                        sctp_chunk_put(asoc->strreset_chunk);
                        asoc->strreset_chunk = NULL;
                }
-
-               flags = SCTP_STREAM_RESET_INCOMING_SSN;
        }
 
-       nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2;
-       if (nums) {
-               str_p = outreq->list_of_streams;
-               for (i = 0; i < nums; i++) {
-                       if (ntohs(str_p[i]) >= stream->incnt) {
-                               result = SCTP_STRRESET_ERR_WRONG_SSN;
-                               goto out;
-                       }
-               }
-
+       if (nums)
                for (i = 0; i < nums; i++)
                        stream->in[ntohs(str_p[i])].ssn = 0;
-       } else {
+       else
                for (i = 0; i < stream->incnt; i++)
                        stream->in[i].ssn = 0;
-       }
 
        result = SCTP_STRRESET_PERFORMED;
 
        *evp = sctp_ulpevent_make_stream_reset_event(asoc,
-               flags | SCTP_STREAM_RESET_OUTGOING_SSN, nums, str_p,
-               GFP_ATOMIC);
+               SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
 
 out:
        sctp_update_strreset_result(asoc, result);
@@ -507,9 +503,6 @@ struct sctp_chunk *sctp_process_strreset_inreq(
 
        result = SCTP_STRRESET_PERFORMED;
 
-       *evp = sctp_ulpevent_make_stream_reset_event(asoc,
-               SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
-
 out:
        sctp_update_strreset_result(asoc, result);
 err:
@@ -642,6 +635,16 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
        if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
                goto out;
 
+       in = ntohs(addstrm->number_of_streams);
+       incnt = stream->incnt + in;
+       if (!in || incnt > SCTP_MAX_STREAM)
+               goto out;
+
+       streamin = krealloc(stream->in, incnt * sizeof(*streamin),
+                           GFP_ATOMIC);
+       if (!streamin)
+               goto out;
+
        if (asoc->strreset_chunk) {
                if (!sctp_chunk_lookup_strreset_param(
                        asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
@@ -665,16 +668,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_out(
                }
        }
 
-       in = ntohs(addstrm->number_of_streams);
-       incnt = stream->incnt + in;
-       if (!in || incnt > SCTP_MAX_STREAM)
-               goto out;
-
-       streamin = krealloc(stream->in, incnt * sizeof(*streamin),
-                           GFP_ATOMIC);
-       if (!streamin)
-               goto out;
-
        memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
        stream->in = streamin;
        stream->incnt = incnt;
@@ -750,9 +743,6 @@ struct sctp_chunk *sctp_process_strreset_addstrm_in(
 
        result = SCTP_STRRESET_PERFORMED;
 
-       *evp = sctp_ulpevent_make_stream_change_event(asoc,
-               0, 0, ntohs(addstrm->number_of_streams), GFP_ATOMIC);
-
 out:
        sctp_update_strreset_result(asoc, result);
 err:
@@ -805,10 +795,10 @@ struct sctp_chunk *sctp_process_strreset_resp(
                                for (i = 0; i < stream->outcnt; i++)
                                        stream->out[i].ssn = 0;
                        }
-
-                       flags = SCTP_STREAM_RESET_OUTGOING_SSN;
                }
 
+               flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
+
                for (i = 0; i < stream->outcnt; i++)
                        stream->out[i].state = SCTP_STREAM_OPEN;
 
@@ -826,6 +816,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
                str_p = inreq->list_of_streams;
                nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
 
+               flags |= SCTP_STREAM_RESET_INCOMING_SSN;
+
                *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
                        nums, str_p, GFP_ATOMIC);
        } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1191d8925c44..972fd95f08ca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -118,6 +118,7 @@ struct alc_spec {
        int codec_variant;      /* flag for other variants */
        unsigned int has_alc5505_dsp:1;
        unsigned int no_depop_delay:1;
+       unsigned int done_hp_init:1;
 
        /* for PLL fix */
        hda_nid_t pll_nid;
@@ -3213,6 +3214,48 @@ static void alc_default_shutup(struct hda_codec *codec)
        snd_hda_shutup_pins(codec);
 }
 
+static void alc294_hp_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+       int i, val;
+
+       if (!hp_pin)
+               return;
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+       msleep(100);
+
+       snd_hda_codec_write(codec, hp_pin, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual 
mode */
+       alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop 
procedure start */
+
+       /* Wait for depop procedure finish  */
+       val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       for (i = 0; i < 20 && val & 0x0080; i++) {
+               msleep(50);
+               val = alc_read_coefex_idx(codec, 0x58, 0x01);
+       }
+       /* Set HP depop to auto mode */
+       alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+       msleep(50);
+}
+
+static void alc294_init(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (!spec->done_hp_init) {
+               alc294_hp_init(codec);
+               spec->done_hp_init = true;
+       }
+       alc_default_init(codec);
+}
+
 static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
                             unsigned int val)
 {
@@ -6981,37 +7024,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
        alc_update_coef_idx(codec, 0x4, 0, 1<<11);
 }
 
-static void alc294_hp_init(struct hda_codec *codec)
-{
-       struct alc_spec *spec = codec->spec;
-       hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
-       int i, val;
-
-       if (!hp_pin)
-               return;
-
-       snd_hda_codec_write(codec, hp_pin, 0,
-                           AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
-
-       msleep(100);
-
-       snd_hda_codec_write(codec, hp_pin, 0,
-                           AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
-
-       alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual 
mode */
-       alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop 
procedure start */
-
-       /* Wait for depop procedure finish  */
-       val = alc_read_coefex_idx(codec, 0x58, 0x01);
-       for (i = 0; i < 20 && val & 0x0080; i++) {
-               msleep(50);
-               val = alc_read_coefex_idx(codec, 0x58, 0x01);
-       }
-       /* Set HP depop to auto mode */
-       alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
-       msleep(50);
-}
-
 /*
  */
 static int patch_alc269(struct hda_codec *codec)
@@ -7148,7 +7160,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC294;
                spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback 
mixer path */
                alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* 
UAJ MIC Vref control by verb */
-               alc294_hp_init(codec);
+               spec->init_hook = alc294_init;
                break;
        case 0x10ec0300:
                spec->codec_variant = ALC269_TYPE_ALC300;
@@ -7160,7 +7172,7 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback 
mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack 
auto trigger control */
-               alc294_hp_init(codec);
+               spec->init_hook = alc294_init;
                break;
 
        }
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c 
b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 194759ec9e70..ba15baa2061b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1554,7 +1554,16 @@ TEST_F(TRACE_poke, getpid_runs_normally)
 #ifdef SYSCALL_NUM_RET_SHARE_REG
 # define EXPECT_SYSCALL_RETURN(val, action)    EXPECT_EQ(-1, action)
 #else
-# define EXPECT_SYSCALL_RETURN(val, action)    EXPECT_EQ(val, action)
+# define EXPECT_SYSCALL_RETURN(val, action)            \
+       do {                                            \
+               errno = 0;                              \
+               if (val < 0) {                          \
+                       EXPECT_EQ(-1, action);          \
+                       EXPECT_EQ(-(val), errno);       \
+               } else {                                \
+                       EXPECT_EQ(val, action);         \
+               }                                       \
+       } while (0)
 #endif
 
 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
@@ -1593,7 +1602,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t 
tracee)
 
 /* Architecture-specific syscall changing routine. */
 void change_syscall(struct __test_metadata *_metadata,
-                   pid_t tracee, int syscall)
+                   pid_t tracee, int syscall, int result)
 {
        int ret;
        ARCH_REGS regs;
@@ -1652,7 +1661,7 @@ void change_syscall(struct __test_metadata *_metadata,
 #ifdef SYSCALL_NUM_RET_SHARE_REG
                TH_LOG("Can't modify syscall return on this architecture");
 #else
-               regs.SYSCALL_RET = EPERM;
+               regs.SYSCALL_RET = result;
 #endif
 
 #ifdef HAVE_GETREGS
@@ -1680,14 +1689,19 @@ void tracer_syscall(struct __test_metadata *_metadata, 
pid_t tracee,
        case 0x1002:
                /* change getpid to getppid. */
                EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
-               change_syscall(_metadata, tracee, __NR_getppid);
+               change_syscall(_metadata, tracee, __NR_getppid, 0);
                break;
        case 0x1003:
-               /* skip gettid. */
+               /* skip gettid with valid return code. */
                EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
-               change_syscall(_metadata, tracee, -1);
+               change_syscall(_metadata, tracee, -1, 45000);
                break;
        case 0x1004:
+               /* skip openat with error. */
+               EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
+               change_syscall(_metadata, tracee, -1, -ESRCH);
+               break;
+       case 0x1005:
                /* do nothing (allow getppid) */
                EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
                break;
@@ -1720,9 +1734,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, 
pid_t tracee,
        nr = get_syscall(_metadata, tracee);
 
        if (nr == __NR_getpid)
-               change_syscall(_metadata, tracee, __NR_getppid);
+               change_syscall(_metadata, tracee, __NR_getppid, 0);
+       if (nr == __NR_gettid)
+               change_syscall(_metadata, tracee, -1, 45000);
        if (nr == __NR_openat)
-               change_syscall(_metadata, tracee, -1);
+               change_syscall(_metadata, tracee, -1, -ESRCH);
 }
 
 FIXTURE_DATA(TRACE_syscall) {
@@ -1739,8 +1755,10 @@ FIXTURE_SETUP(TRACE_syscall)
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
                BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
-               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
+               BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
+               BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
                BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
        };
 
@@ -1788,15 +1806,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected)
        EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, ptrace_syscall_dropped)
+TEST_F(TRACE_syscall, ptrace_syscall_errno)
+{
+       /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
+       teardown_trace_fixture(_metadata, self->tracer);
+       self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
+                                          true);
+
+       /* Tracer should skip the open syscall, resulting in ESRCH. */
+       EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, ptrace_syscall_faked)
 {
        /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */
        teardown_trace_fixture(_metadata, self->tracer);
        self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL,
                                           true);
 
-       /* Tracer should skip the open syscall, resulting in EPERM. */
-       EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat));
+       /* Tracer should skip the gettid syscall, resulting fake pid. */
+       EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, syscall_allowed)
@@ -1829,7 +1858,21 @@ TEST_F(TRACE_syscall, syscall_redirected)
        EXPECT_NE(self->mypid, syscall(__NR_getpid));
 }
 
-TEST_F(TRACE_syscall, syscall_dropped)
+TEST_F(TRACE_syscall, syscall_errno)
+{
+       long ret;
+
+       ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
+       ASSERT_EQ(0, ret);
+
+       /* openat has been skipped and an errno return. */
+       EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
+}
+
+TEST_F(TRACE_syscall, syscall_faked)
 {
        long ret;
 
@@ -1840,8 +1883,7 @@ TEST_F(TRACE_syscall, syscall_dropped)
        ASSERT_EQ(0, ret);
 
        /* gettid has been skipped and an altered return value stored. */
-       EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid));
-       EXPECT_NE(self->mytid, syscall(__NR_gettid));
+       EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
 }
 
 TEST_F(TRACE_syscall, skip_after_RET_TRACE)

Reply via email to