commit: 149745ed61e0ffc43e55b6682710f9553c3ceb45 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Tue Oct 27 13:19:39 2015 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Tue Oct 27 13:19:39 2015 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=149745ed
Linux patch 4.1.12 0000_README | 4 + 1011_linux-4.1.12.patch | 1494 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1498 insertions(+) diff --git a/0000_README b/0000_README index 18e95dd..8ed7605 100644 --- a/0000_README +++ b/0000_README @@ -87,6 +87,10 @@ Patch: 1010_linux-4.1.11.patch From: http://www.kernel.org Desc: Linux 4.1.11 +Patch: 1011_linux-4.1.12.patch +From: http://www.kernel.org +Desc: Linux 4.1.12 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1011_linux-4.1.12.patch b/1011_linux-4.1.12.patch new file mode 100644 index 0000000..07910df --- /dev/null +++ b/1011_linux-4.1.12.patch @@ -0,0 +1,1494 @@ +diff --git a/Makefile b/Makefile +index c7d877b1c248..2320f1911404 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 1 +-SUBLEVEL = 11 ++SUBLEVEL = 12 + EXTRAVERSION = + NAME = Series 4800 + +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 81151663ef38..3258174e6152 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -31,7 +31,7 @@ endif + CHECKFLAGS += -D__aarch64__ + + ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) +-CFLAGS_MODULE += -mcmodel=large ++KBUILD_CFLAGS_MODULE += -mcmodel=large + endif + + # Default value +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index 56283f8a675c..cf7319422768 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -80,7 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); + #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) + #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) + +-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) ++#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) + #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) + #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) + #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) + { + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | +- PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; ++ PTE_PROT_NONE | PTE_VALID | PTE_WRITE; + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + return pte; + } +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c +index df81caab7383..f1e0e5522e3a 100644 +--- a/arch/powerpc/kvm/book3s_hv.c ++++ b/arch/powerpc/kvm/book3s_hv.c +@@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) + vc->runner = vcpu; + if (n_ceded == vc->n_runnable) { + kvmppc_vcore_blocked(vc); +- } else if (should_resched()) { ++ } else if (need_resched()) { + vc->vcore_state = VCORE_PREEMPT; + /* Let something else run */ + cond_resched_lock(&vc->lock); +diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c +index 2e48eb8813ff..c90930de76ba 100644 +--- a/arch/sparc/crypto/aes_glue.c ++++ b/arch/sparc/crypto/aes_glue.c +@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, +@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, ++ .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = ctr_crypt, + .decrypt = ctr_crypt, +diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c +index 6bf2479a12fb..561a84d93cf6 100644 +--- a/arch/sparc/crypto/camellia_glue.c ++++ b/arch/sparc/crypto/camellia_glue.c +@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { { + .blkcipher = { + .min_keysize = CAMELLIA_MIN_KEY_SIZE, + .max_keysize = CAMELLIA_MAX_KEY_SIZE, ++ .ivsize = CAMELLIA_BLOCK_SIZE, + .setkey = camellia_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, +diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c +index dd6a34fa6e19..61af794aa2d3 100644 +--- a/arch/sparc/crypto/des_glue.c ++++ b/arch/sparc/crypto/des_glue.c +@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { { + .blkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, ++ .ivsize = DES_BLOCK_SIZE, + .setkey = des_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, +@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { { + .blkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, ++ .ivsize = DES3_EDE_BLOCK_SIZE, + .setkey = des3_ede_set_key, + .encrypt = cbc3_encrypt, + .decrypt = cbc3_decrypt, +diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h +index 8f3271842533..67b6cd00a44f 100644 +--- a/arch/x86/include/asm/preempt.h ++++ b/arch/x86/include/asm/preempt.h +@@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void) + /* + * Returns true when we need to resched and can (barring IRQ state). + */ +-static __always_inline bool should_resched(void) ++static __always_inline bool should_resched(int preempt_offset) + { +- return unlikely(!raw_cpu_read_4(__preempt_count)); ++ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); + } + + #ifdef CONFIG_PREEMPT +diff --git a/crypto/ahash.c b/crypto/ahash.c +index 8acb886032ae..9c1dc8d6106a 100644 +--- a/crypto/ahash.c ++++ b/crypto/ahash.c +@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg) + struct crypto_alg *base = &alg->halg.base; + + if (alg->halg.digestsize > PAGE_SIZE / 8 || +- alg->halg.statesize > PAGE_SIZE / 8) ++ alg->halg.statesize > PAGE_SIZE / 8 || ++ alg->halg.statesize == 0) + return -EINVAL; + + base->cra_type = &crypto_ahash_type; +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index 010ce0b1f517..fe8f1e4b4c7c 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -5174,7 +5174,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) + out_err: + if (parent) { + rbd_dev_unparent(rbd_dev); +- kfree(rbd_dev->header_name); + rbd_dev_destroy(parent); + } else { + rbd_put_client(rbdc); +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index 7f467fdc9107..2a2eb96caeda 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -2766,12 +2766,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs + if (msgs[num - 1].flags & I2C_M_RD) + reading = true; + +- if (!reading) { ++ if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { + DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); + ret = -EIO; + goto out; + } + ++ memset(&msg, 0, sizeof(msg)); + msg.req_type = DP_REMOTE_I2C_READ; + msg.u.i2c_read.num_transactions = num - 1; + msg.u.i2c_read.port_number = port->port_num; +diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c +index eb7e61078a5b..92586b0af3ab 100644 +--- a/drivers/gpu/drm/drm_sysfs.c ++++ b/drivers/gpu/drm/drm_sysfs.c +@@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device, + char *buf) + { + struct drm_connector *connector = to_drm_connector(device); +- struct drm_device *dev = connector->dev; +- uint64_t dpms_status; +- int ret; ++ int dpms; + +- ret = drm_object_property_get_value(&connector->base, +- dev->mode_config.dpms_property, +- &dpms_status); +- if (ret) +- return 0; ++ dpms = READ_ONCE(connector->dpms); + + return snprintf(buf, PAGE_SIZE, "%s\n", +- drm_get_dpms_name((int)dpms_status)); ++ drm_get_dpms_name(dpms)); + } + + static ssize_t enabled_show(struct device *device, +diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +index 6751553abe4a..567791b27d6d 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c +@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) + return 0; + } + ++static int ++nouveau_fbcon_open(struct fb_info *info, int user) ++{ ++ struct nouveau_fbdev *fbcon = info->par; ++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev); ++ int ret = pm_runtime_get_sync(drm->dev->dev); ++ if (ret < 0 && ret != -EACCES) ++ return ret; ++ return 0; ++} ++ ++static int ++nouveau_fbcon_release(struct fb_info *info, int user) ++{ ++ struct nouveau_fbdev *fbcon = info->par; ++ struct nouveau_drm *drm = nouveau_drm(fbcon->dev); ++ pm_runtime_put(drm->dev->dev); ++ return 0; ++} ++ + static struct fb_ops nouveau_fbcon_ops = { + .owner = THIS_MODULE, ++ .fb_open = nouveau_fbcon_open, ++ .fb_release = nouveau_fbcon_release, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = nouveau_fbcon_fillrect, +@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { + + static struct fb_ops nouveau_fbcon_sw_ops = { + .owner = THIS_MODULE, ++ .fb_open = nouveau_fbcon_open, ++ .fb_release = nouveau_fbcon_release, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = cfb_fillrect, +diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c +index d2e9e9efc159..6743174acdbc 100644 +--- a/drivers/gpu/drm/radeon/radeon_display.c ++++ b/drivers/gpu/drm/radeon/radeon_display.c +@@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev) + radeon_fbdev_init(rdev); + drm_kms_helper_poll_init(rdev->ddev); + +- if (rdev->pm.dpm_enabled) { +- /* do dpm late init */ +- ret = radeon_pm_late_init(rdev); +- if (ret) { +- rdev->pm.dpm_enabled = false; +- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); +- } +- /* set the dpm state for PX since there won't be +- * a modeset to call this. +- */ +- radeon_pm_compute_clocks(rdev); +- } ++ /* do pm late init */ ++ ret = radeon_pm_late_init(rdev); + + return 0; + } +diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c +index 257b10be5cda..42986130cc63 100644 +--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c ++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c +@@ -283,6 +283,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol + radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); + + drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); ++ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); + drm_mode_connector_set_path_property(connector, pathprop); + drm_reinit_primary_mode_group(dev); + +diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c +index c1ba83a8dd8c..948c33105801 100644 +--- a/drivers/gpu/drm/radeon/radeon_pm.c ++++ b/drivers/gpu/drm/radeon/radeon_pm.c +@@ -1331,14 +1331,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) + INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); + + if (rdev->pm.num_power_states > 1) { +- /* where's the best place to put these? */ +- ret = device_create_file(rdev->dev, &dev_attr_power_profile); +- if (ret) +- DRM_ERROR("failed to create device file for power profile\n"); +- ret = device_create_file(rdev->dev, &dev_attr_power_method); +- if (ret) +- DRM_ERROR("failed to create device file for power method\n"); +- + if (radeon_debugfs_pm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for PM!\n"); + } +@@ -1396,20 +1388,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) + goto dpm_failed; + rdev->pm.dpm_enabled = true; + +- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); +- if (ret) +- DRM_ERROR("failed to create device file for dpm state\n"); +- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); +- if (ret) +- DRM_ERROR("failed to create device file for dpm state\n"); +- /* XXX: these are noops for dpm but are here for backwards compat */ +- ret = device_create_file(rdev->dev, &dev_attr_power_profile); +- if (ret) +- DRM_ERROR("failed to create device file for power profile\n"); +- ret = device_create_file(rdev->dev, &dev_attr_power_method); +- if (ret) +- DRM_ERROR("failed to create device file for power method\n"); +- + if (radeon_debugfs_pm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for dpm!\n"); + } +@@ -1550,9 +1528,44 @@ int radeon_pm_late_init(struct radeon_device *rdev) + int ret = 0; + + if (rdev->pm.pm_method == PM_METHOD_DPM) { +- mutex_lock(&rdev->pm.mutex); +- ret = radeon_dpm_late_enable(rdev); +- mutex_unlock(&rdev->pm.mutex); ++ if (rdev->pm.dpm_enabled) { ++ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); ++ if (ret) ++ DRM_ERROR("failed to create device file for dpm state\n"); ++ ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); ++ if (ret) ++ DRM_ERROR("failed to create device file for dpm state\n"); ++ /* XXX: these are noops for dpm but are here for backwards compat */ ++ ret = device_create_file(rdev->dev, &dev_attr_power_profile); ++ if (ret) ++ DRM_ERROR("failed to create device file for power profile\n"); ++ ret = device_create_file(rdev->dev, &dev_attr_power_method); ++ if (ret) ++ DRM_ERROR("failed to create device file for power method\n"); ++ ++ mutex_lock(&rdev->pm.mutex); ++ ret = radeon_dpm_late_enable(rdev); ++ mutex_unlock(&rdev->pm.mutex); ++ if (ret) { ++ rdev->pm.dpm_enabled = false; ++ DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); ++ } else { ++ /* set the dpm state for PX since there won't be ++ * a modeset to call this. ++ */ ++ radeon_pm_compute_clocks(rdev); ++ } ++ } ++ } else { ++ if (rdev->pm.num_power_states > 1) { ++ /* where's the best place to put these? */ ++ ret = device_create_file(rdev->dev, &dev_attr_power_profile); ++ if (ret) ++ DRM_ERROR("failed to create device file for power profile\n"); ++ ret = device_create_file(rdev->dev, &dev_attr_power_method); ++ if (ret) ++ DRM_ERROR("failed to create device file for power method\n"); ++ } + } + return ret; + } +diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c +index 0a80e4aabaed..3f7d4876937e 100644 +--- a/drivers/i2c/busses/i2c-designware-platdrv.c ++++ b/drivers/i2c/busses/i2c-designware-platdrv.c +@@ -24,6 +24,7 @@ + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/delay.h> ++#include <linux/dmi.h> + #include <linux/i2c.h> + #include <linux/clk.h> + #include <linux/clk-provider.h> +@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) + } + + #ifdef CONFIG_ACPI ++/* ++ * The HCNT/LCNT information coming from ACPI should be the most accurate ++ * for given platform. However, some systems get it wrong. On such systems ++ * we get better results by calculating those based on the input clock. ++ */ ++static const struct dmi_system_id dw_i2c_no_acpi_params[] = { ++ { ++ .ident = "Dell Inspiron 7348", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), ++ }, ++ }, ++ { } ++}; ++ + static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], + u16 *hcnt, u16 *lcnt, u32 *sda_hold) + { +@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], + acpi_handle handle = ACPI_HANDLE(&pdev->dev); + union acpi_object *obj; + ++ if (dmi_check_system(dw_i2c_no_acpi_params)) ++ return; ++ + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) + return; + +@@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev) + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + +- r = i2c_add_numbered_adapter(adap); +- if (r) { +- dev_err(&pdev->dev, "failure adding adapter\n"); +- return r; +- } +- + if (dev->pm_runtime_disabled) { + pm_runtime_forbid(&pdev->dev); + } else { +@@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev) + pm_runtime_enable(&pdev->dev); + } + ++ r = i2c_add_numbered_adapter(adap); ++ if (r) { ++ dev_err(&pdev->dev, "failure adding adapter\n"); ++ pm_runtime_disable(&pdev->dev); ++ return r; ++ } ++ + return 0; + } + +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index 5a84bea5b845..d9d022cdfff0 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -688,15 +688,16 @@ static int rcar_i2c_probe(struct platform_device *pdev) + return ret; + } + ++ pm_runtime_enable(dev); ++ platform_set_drvdata(pdev, priv); ++ + ret = i2c_add_numbered_adapter(adap); + if (ret < 0) { + dev_err(dev, "reg adap failed: %d\n", ret); ++ pm_runtime_disable(dev); + return ret; + } + +- pm_runtime_enable(dev); +- platform_set_drvdata(pdev, priv); +- + dev_info(dev, "probed\n"); + + return 0; +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c +index 297e9c9ac943..424794271703 100644 +--- a/drivers/i2c/busses/i2c-s3c2410.c ++++ b/drivers/i2c/busses/i2c-s3c2410.c +@@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) + i2c->adap.nr = i2c->pdata->bus_num; + i2c->adap.dev.of_node = pdev->dev.of_node; + ++ platform_set_drvdata(pdev, i2c); ++ ++ pm_runtime_enable(&pdev->dev); ++ + ret = i2c_add_numbered_adapter(&i2c->adap); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add bus to i2c core\n"); ++ pm_runtime_disable(&pdev->dev); + s3c24xx_i2c_deregister_cpufreq(i2c); + clk_unprepare(i2c->clk); + return ret; + } + +- platform_set_drvdata(pdev, i2c); +- +- pm_runtime_enable(&pdev->dev); + pm_runtime_enable(&i2c->adap.dev); + + dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index e22e6c892b8a..7073b22d4cb4 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2959,7 +2959,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) + metadata_low_callback, + pool); + if (r) +- goto out_free_pt; ++ goto out_flags_changed; + + pt->callbacks.congested_fn = pool_is_congested; + dm_table_add_target_callbacks(ti->table, &pt->callbacks); +diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c +index a354ac677ec7..1074a0d68680 100644 +--- a/drivers/mfd/max77843.c ++++ b/drivers/mfd/max77843.c +@@ -79,7 +79,7 @@ static int max77843_chg_init(struct max77843 *max77843) + if (!max77843->i2c_chg) { + dev_err(&max77843->i2c->dev, + "Cannot allocate I2C device for Charger\n"); +- return PTR_ERR(max77843->i2c_chg); ++ return -ENODEV; + } + i2c_set_clientdata(max77843->i2c_chg, max77843); + +diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h +index 28df37420da9..ac02c675c59c 100644 +--- a/drivers/net/ethernet/ibm/emac/core.h ++++ b/drivers/net/ethernet/ibm/emac/core.h +@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { + u32 index; + }; + +-#define EMAC_ETHTOOL_REGS_VER 0 +-#define EMAC4_ETHTOOL_REGS_VER 1 +-#define EMAC4SYNC_ETHTOOL_REGS_VER 2 ++#define EMAC_ETHTOOL_REGS_VER 3 ++#define EMAC4_ETHTOOL_REGS_VER 4 ++#define EMAC4SYNC_ETHTOOL_REGS_VER 5 + + #endif /* __IBM_NEWEMAC_CORE_H */ +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c +index b62a5e3a1c65..db2c3cdf2c40 100644 +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev) + if (po->pppoe_dev == dev && + sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { + pppox_unbind_sock(sk); +- sk->sk_state = PPPOX_ZOMBIE; + sk->sk_state_change(sk); + po->pppoe_dev = NULL; + dev_put(dev); +diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c +index faf635654312..293ed4381cc0 100644 +--- a/drivers/pinctrl/freescale/pinctrl-imx25.c ++++ b/drivers/pinctrl/freescale/pinctrl-imx25.c +@@ -26,7 +26,8 @@ + #include "pinctrl-imx.h" + + enum imx25_pads { +- MX25_PAD_RESERVE0 = 1, ++ MX25_PAD_RESERVE0 = 0, ++ MX25_PAD_RESERVE1 = 1, + MX25_PAD_A10 = 2, + MX25_PAD_A13 = 3, + MX25_PAD_A14 = 4, +@@ -169,6 +170,7 @@ enum imx25_pads { + /* Pad names for the pinmux subsystem */ + static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = { + IMX_PINCTRL_PIN(MX25_PAD_RESERVE0), ++ IMX_PINCTRL_PIN(MX25_PAD_RESERVE1), + IMX_PINCTRL_PIN(MX25_PAD_A10), + IMX_PINCTRL_PIN(MX25_PAD_A13), + IMX_PINCTRL_PIN(MX25_PAD_A14), +diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c +index a1800c150839..08cb419eb4e6 100644 +--- a/drivers/xen/preempt.c ++++ b/drivers/xen/preempt.c +@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); + asmlinkage __visible void xen_maybe_preempt_hcall(void) + { + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) +- && should_resched())) { ++ && need_resched())) { + /* + * Clear flag as we may be rescheduled on a different + * cpu. +diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c +index 614aaa1969bd..723470850b94 100644 +--- a/fs/btrfs/backref.c ++++ b/fs/btrfs/backref.c +@@ -1786,7 +1786,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, + int found = 0; + struct extent_buffer *eb; + struct btrfs_inode_extref *extref; +- struct extent_buffer *leaf; + u32 item_size; + u32 cur_offset; + unsigned long ptr; +@@ -1814,9 +1813,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + btrfs_release_path(path); + +- leaf = path->nodes[0]; +- item_size = btrfs_item_size_nr(leaf, slot); +- ptr = btrfs_item_ptr_offset(leaf, slot); ++ item_size = btrfs_item_size_nr(eb, slot); ++ ptr = btrfs_item_ptr_offset(eb, slot); + cur_offset = 0; + + while (cur_offset < item_size) { +@@ -1830,7 +1828,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, + if (ret) + break; + +- cur_offset += btrfs_inode_extref_name_len(leaf, extref); ++ cur_offset += btrfs_inode_extref_name_len(eb, extref); + cur_offset += sizeof(*extref); + } + btrfs_tree_read_unlock_blocking(eb); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 37d456a9a3b8..af3dd3c55ef1 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -4492,6 +4492,11 @@ locked: + bctl->flags |= BTRFS_BALANCE_TYPE_MASK; + } + ++ if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { ++ ret = -EINVAL; ++ goto out_bargs; ++ } ++ + do_balance: + /* + * Ownership of bctl and mutually_exclusive_operation_running +diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h +index ebc31331a837..e1cc5b45069a 100644 +--- a/fs/btrfs/volumes.h ++++ b/fs/btrfs/volumes.h +@@ -372,6 +372,14 @@ struct map_lookup { + #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) + #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) + ++#define BTRFS_BALANCE_ARGS_MASK \ ++ (BTRFS_BALANCE_ARGS_PROFILES | \ ++ BTRFS_BALANCE_ARGS_USAGE | \ ++ BTRFS_BALANCE_ARGS_DEVID | \ ++ BTRFS_BALANCE_ARGS_DRANGE | \ ++ BTRFS_BALANCE_ARGS_VRANGE | \ ++ BTRFS_BALANCE_ARGS_LIMIT) ++ + /* + * Profile changing flags. When SOFT is set we won't relocate chunk if + * it already has the target profile (even though it may be +diff --git a/fs/locks.c b/fs/locks.c +index 653faabb07f4..d3d558ba4da7 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, + * whether or not a lock was successfully freed by testing the return + * value for -ENOENT. + */ +-static int flock_lock_file(struct file *filp, struct file_lock *request) ++static int flock_lock_inode(struct inode *inode, struct file_lock *request) + { + struct file_lock *new_fl = NULL; + struct file_lock *fl; + struct file_lock_context *ctx; +- struct inode *inode = file_inode(filp); + int error = 0; + bool found = false; + LIST_HEAD(dispose); +@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) + goto find_conflict; + + list_for_each_entry(fl, &ctx->flc_flock, fl_list) { +- if (filp != fl->fl_file) ++ if (request->fl_file != fl->fl_file) + continue; + if (request->fl_type == fl->fl_type) + goto out; +@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl, + EXPORT_SYMBOL(posix_lock_file); + + /** +- * posix_lock_file_wait - Apply a POSIX-style lock to a file +- * @filp: The file to apply the lock to ++ * posix_lock_inode_wait - Apply a POSIX-style lock to a file ++ * @inode: inode of file to which lock request should be applied + * @fl: The lock to be applied + * +- * Add a POSIX style lock to a file. +- * We merge adjacent & overlapping locks whenever possible. +- * POSIX locks are sorted by owner task, then by starting address ++ * Variant of posix_lock_file_wait that does not take a filp, and so can be ++ * used after the filp has already been torn down. + */ +-int posix_lock_file_wait(struct file *filp, struct file_lock *fl) ++int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) + { + int error; + might_sleep (); + for (;;) { +- error = posix_lock_file(filp, fl, NULL); ++ error = __posix_lock_file(inode, fl, NULL); + if (error != FILE_LOCK_DEFERRED) + break; + error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); +@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) + } + return error; + } +-EXPORT_SYMBOL(posix_lock_file_wait); ++EXPORT_SYMBOL(posix_lock_inode_wait); + + /** + * locks_mandatory_locked - Check for an active lock +@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg) + } + + /** +- * flock_lock_file_wait - Apply a FLOCK-style lock to a file +- * @filp: The file to apply the lock to ++ * flock_lock_inode_wait - Apply a FLOCK-style lock to a file ++ * @inode: inode of the file to apply to + * @fl: The lock to be applied + * +- * Add a FLOCK style lock to a file. ++ * Apply a FLOCK style lock request to an inode. + */ +-int flock_lock_file_wait(struct file *filp, struct file_lock *fl) ++int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) + { + int error; + might_sleep(); + for (;;) { +- error = flock_lock_file(filp, fl); ++ error = flock_lock_inode(inode, fl); + if (error != FILE_LOCK_DEFERRED) + break; + error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); +@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl) + } + return error; + } +- +-EXPORT_SYMBOL(flock_lock_file_wait); ++EXPORT_SYMBOL(flock_lock_inode_wait); + + /** + * sys_flock: - flock() system call. +@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp) + .fl_type = F_UNLCK, + .fl_end = OFFSET_MAX, + }; +- struct file_lock_context *flctx = file_inode(filp)->i_flctx; ++ struct inode *inode = file_inode(filp); ++ struct file_lock_context *flctx = inode->i_flctx; + + if (list_empty(&flctx->flc_flock)) + return; +@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp) + if (filp->f_op->flock) + filp->f_op->flock(filp, F_SETLKW, &fl); + else +- flock_lock_file(filp, &fl); ++ flock_lock_inode(inode, &fl); + + if (fl.fl_ops && fl.fl_ops->fl_release_private) + fl.fl_ops->fl_release_private(&fl); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index c245874d7e9d..8f393fcc313b 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -5367,15 +5367,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock * + return err; + } + +-static int do_vfs_lock(struct file *file, struct file_lock *fl) ++static int do_vfs_lock(struct inode *inode, struct file_lock *fl) + { + int res = 0; + switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { + case FL_POSIX: +- res = posix_lock_file_wait(file, fl); ++ res = posix_lock_inode_wait(inode, fl); + break; + case FL_FLOCK: +- res = flock_lock_file_wait(file, fl); ++ res = flock_lock_inode_wait(inode, fl); + break; + default: + BUG(); +@@ -5435,7 +5435,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) + switch (task->tk_status) { + case 0: + renew_lease(calldata->server, calldata->timestamp); +- do_vfs_lock(calldata->fl.fl_file, &calldata->fl); ++ do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); + if (nfs4_update_lock_stateid(calldata->lsp, + &calldata->res.stateid)) + break; +@@ -5543,7 +5543,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * + mutex_lock(&sp->so_delegreturn_mutex); + /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ + down_read(&nfsi->rwsem); +- if (do_vfs_lock(request->fl_file, request) == -ENOENT) { ++ if (do_vfs_lock(inode, request) == -ENOENT) { + up_read(&nfsi->rwsem); + mutex_unlock(&sp->so_delegreturn_mutex); + goto out; +@@ -5684,7 +5684,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) + data->timestamp); + if (data->arg.new_lock) { + data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); +- if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) { ++ if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { + rpc_restart_call_prepare(task); + break; + } +@@ -5926,7 +5926,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock + if (status != 0) + goto out; + request->fl_flags |= FL_ACCESS; +- status = do_vfs_lock(request->fl_file, request); ++ status = do_vfs_lock(state->inode, request); + if (status < 0) + goto out; + down_read(&nfsi->rwsem); +@@ -5934,7 +5934,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock + /* Yes: cache locks! */ + /* ...but avoid races with delegation recall... */ + request->fl_flags = fl_flags & ~FL_SLEEP; +- status = do_vfs_lock(request->fl_file, request); ++ status = do_vfs_lock(state->inode, request); + up_read(&nfsi->rwsem); + goto out; + } +diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c +index cdefaa331a07..c29d9421bd5e 100644 +--- a/fs/nfsd/blocklayout.c ++++ b/fs/nfsd/blocklayout.c +@@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, + u32 device_generation = 0; + int error; + +- /* +- * We do not attempt to support I/O smaller than the fs block size, +- * or not aligned to it. +- */ +- if (args->lg_minlength < block_size) { +- dprintk("pnfsd: I/O too small\n"); +- goto out_layoutunavailable; +- } + if (seg->offset & (block_size - 1)) { + dprintk("pnfsd: I/O misaligned\n"); + goto out_layoutunavailable; +diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h +index eb6f9e6c3075..b6a53e8e526a 100644 +--- a/include/asm-generic/preempt.h ++++ b/include/asm-generic/preempt.h +@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void) + /* + * Returns true when we need to resched and can (barring IRQ state). + */ +-static __always_inline bool should_resched(void) ++static __always_inline bool should_resched(int preempt_offset) + { +- return unlikely(!preempt_count() && tif_need_resched()); ++ return unlikely(preempt_count() == preempt_offset && ++ tif_need_resched()); + } + + #ifdef CONFIG_PREEMPT +diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h +index 86d0b25ed054..a89f505c856b 100644 +--- a/include/drm/drm_dp_mst_helper.h ++++ b/include/drm/drm_dp_mst_helper.h +@@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write { + u8 *bytes; + }; + ++#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 + struct drm_dp_remote_i2c_read { + u8 num_transactions; + u8 port_number; +@@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read { + u8 *bytes; + u8 no_stop_bit; + u8 i2c_transaction_delay; +- } transactions[4]; ++ } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; + u8 read_i2c_device_id; + u8 num_bytes_read; + }; +diff --git a/include/linux/fs.h b/include/linux/fs.h +index f93192333b37..fdc369fa69e8 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1036,12 +1036,12 @@ extern void locks_remove_file(struct file *); + extern void locks_release_private(struct file_lock *); + extern void posix_test_lock(struct file *, struct file_lock *); + extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); +-extern int posix_lock_file_wait(struct file *, struct file_lock *); ++extern int posix_lock_inode_wait(struct inode *, struct file_lock *); + extern int posix_unblock_lock(struct file_lock *); + extern int vfs_test_lock(struct file *, struct file_lock *); + extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); + extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); +-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); ++extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl); + extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); + extern void lease_get_mtime(struct inode *, struct timespec *time); + extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); +@@ -1127,7 +1127,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, + return -ENOLCK; + } + +-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) ++static inline int posix_lock_inode_wait(struct inode *inode, ++ struct file_lock *fl) + { + return -ENOLCK; + } +@@ -1153,8 +1154,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) + return 0; + } + +-static inline int flock_lock_file_wait(struct file *filp, +- struct file_lock *request) ++static inline int flock_lock_inode_wait(struct inode *inode, ++ struct file_lock *request) + { + return -ENOLCK; + } +@@ -1192,6 +1193,20 @@ static inline void show_fd_locks(struct seq_file *f, + struct file *filp, struct files_struct *files) {} + #endif /* !CONFIG_FILE_LOCKING */ + ++static inline struct inode *file_inode(const struct file *f) ++{ ++ return f->f_inode; ++} ++ ++static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) ++{ ++ return posix_lock_inode_wait(file_inode(filp), fl); ++} ++ ++static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl) ++{ ++ return flock_lock_inode_wait(file_inode(filp), fl); ++} + + struct fasync_struct { + spinlock_t fa_lock; +@@ -1991,11 +2006,6 @@ extern void ihold(struct inode * inode); + extern void iput(struct inode *); + extern int generic_update_time(struct inode *, struct timespec *, int); + +-static inline struct inode *file_inode(const struct file *f) +-{ +- return f->f_inode; +-} +- + /* /sys/fs */ + extern struct kobject *fs_kobj; + +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index de83b4eb1642..8cd6725c5758 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -20,7 +20,8 @@ + #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) + extern void preempt_count_add(int val); + extern void preempt_count_sub(int val); +-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) ++#define preempt_count_dec_and_test() \ ++ ({ preempt_count_sub(1); should_resched(0); }) + #else + #define preempt_count_add(val) __preempt_count_add(val) + #define preempt_count_sub(val) __preempt_count_sub(val) +@@ -59,7 +60,7 @@ do { \ + + #define preempt_check_resched() \ + do { \ +- if (should_resched()) \ ++ if (should_resched(0)) \ + __preempt_schedule(); \ + } while (0) + +diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h +index dbeec4d4a3be..5cb25f17331a 100644 +--- a/include/linux/preempt_mask.h ++++ b/include/linux/preempt_mask.h +@@ -71,13 +71,21 @@ + */ + #define in_nmi() (preempt_count() & NMI_MASK) + ++/* ++ * The preempt_count offset after preempt_disable(); ++ */ + #if defined(CONFIG_PREEMPT_COUNT) +-# define PREEMPT_CHECK_OFFSET 1 ++# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET + #else +-# define PREEMPT_CHECK_OFFSET 0 ++# define PREEMPT_DISABLE_OFFSET 0 + #endif + + /* ++ * The preempt_count offset after spin_lock() ++ */ ++#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET ++ ++/* + * The preempt_count offset needed for things like: + * + * spin_lock_bh() +@@ -90,7 +98,7 @@ + * + * Work as expected. + */ +-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET) ++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) + + /* + * Are we running in atomic context? WARNING: this macro cannot +@@ -106,7 +114,7 @@ + * (used by the scheduler, *after* releasing the kernel lock) + */ + #define in_atomic_preempt_off() \ +- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) ++ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET) + + #ifdef CONFIG_PREEMPT_COUNT + # define preemptible() (preempt_count() == 0 && !irqs_disabled()) +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 26a2e6122734..61f4f2d5c882 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -2834,12 +2834,6 @@ extern int _cond_resched(void); + + extern int __cond_resched_lock(spinlock_t *lock); + +-#ifdef CONFIG_PREEMPT_COUNT +-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET +-#else +-#define PREEMPT_LOCK_OFFSET 0 +-#endif +- + #define cond_resched_lock(lock) ({ \ + ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ + __cond_resched_lock(lock); \ +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index eb1c55b8255a..4307e20a4a4a 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -2588,6 +2588,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, + { + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); ++ else if (skb->ip_summed == CHECKSUM_PARTIAL && ++ skb_checksum_start_offset(skb) < 0) ++ skb->ip_summed = CHECKSUM_NONE; + } + + unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); +diff --git a/include/net/af_unix.h b/include/net/af_unix.h +index a175ba4a7adb..dfe4ddfbb43c 100644 +--- a/include/net/af_unix.h ++++ b/include/net/af_unix.h +@@ -64,7 +64,11 @@ struct unix_sock { + #define UNIX_GC_MAYBE_CYCLE 1 + struct socket_wq peer_wq; + }; +-#define unix_sk(__sk) ((struct unix_sock *)__sk) ++ ++static inline struct unix_sock *unix_sk(struct sock *sk) ++{ ++ return (struct unix_sock *)sk; ++} + + #define peer_wait peer_wq.wait + +diff --git a/include/net/sock.h b/include/net/sock.h +index 3a4898ec8c67..ed01a012f8d5 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -826,6 +826,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s + if (sk_rcvqueues_full(sk, limit)) + return -ENOBUFS; + ++ /* ++ * If the skb was allocated from pfmemalloc reserves, only ++ * allow SOCK_MEMALLOC sockets to use it as this socket is ++ * helping free memory ++ */ ++ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) ++ return -ENOMEM; ++ + __sk_add_backlog(sk, skb); + sk->sk_backlog.len += skb->truesize; + return 0; +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 8476206a1e19..4d870eb6086b 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4232,7 +4232,7 @@ SYSCALL_DEFINE0(sched_yield) + + int __sched _cond_resched(void) + { +- if (should_resched()) { ++ if (should_resched(0)) { + preempt_schedule_common(); + return 1; + } +@@ -4250,7 +4250,7 @@ EXPORT_SYMBOL(_cond_resched); + */ + int __cond_resched_lock(spinlock_t *lock) + { +- int resched = should_resched(); ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); + int ret = 0; + + lockdep_assert_held(lock); +@@ -4272,7 +4272,7 @@ int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); + +- if (should_resched()) { ++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { + local_bh_enable(); + preempt_schedule_common(); + local_bh_disable(); +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 586ad91300b0..5c01664c26e2 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -1451,13 +1451,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, + timer_stats_timer_set_start_info(&dwork->timer); + + dwork->wq = wq; ++ /* timer isn't guaranteed to run in this cpu, record earlier */ ++ if (cpu == WORK_CPU_UNBOUND) ++ cpu = raw_smp_processor_id(); + dwork->cpu = cpu; + timer->expires = jiffies + delay; + +- if (unlikely(cpu != WORK_CPU_UNBOUND)) +- add_timer_on(timer, cpu); +- else +- add_timer(timer); ++ add_timer_on(timer, cpu); + } + + /** +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index a04225d372ba..68dea90334cb 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -3677,6 +3677,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, + ret = page_counter_memparse(args, "-1", &threshold); + if (ret) + return ret; ++ threshold <<= PAGE_SHIFT; + + mutex_lock(&memcg->thresholds_lock); + +diff --git a/net/core/ethtool.c b/net/core/ethtool.c +index 1d00b8922902..4a6824767f3d 100644 +--- a/net/core/ethtool.c ++++ b/net/core/ethtool.c +@@ -1273,7 +1273,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) + + gstrings.len = ret; + +- data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); ++ data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + +diff --git a/net/core/filter.c b/net/core/filter.c +index bf831a85c315..0fa2613b5e35 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -1526,9 +1526,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, + goto out; + + /* We're copying the filter that has been originally attached, +- * so no conversion/decode needed anymore. ++ * so no conversion/decode needed anymore. eBPF programs that ++ * have no original program cannot be dumped through this. + */ ++ ret = -EACCES; + fprog = filter->prog->orig_prog; ++ if (!fprog) ++ goto out; + + ret = fprog->len; + if (!len) +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index a2e4e47b2839..075d2e78c87e 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2976,11 +2976,12 @@ EXPORT_SYMBOL(skb_append_datato_frags); + */ + unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) + { ++ unsigned char *data = skb->data; ++ + BUG_ON(len > skb->len); +- skb->len -= len; +- BUG_ON(skb->len < skb->data_len); +- skb_postpull_rcsum(skb, skb->data, len); +- return skb->data += len; ++ __skb_pull(skb, len); ++ skb_postpull_rcsum(skb, data, len); ++ return skb->data; + } + EXPORT_SYMBOL_GPL(skb_pull_rcsum); + +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index e664706b350c..4d2bc8c6694f 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -568,21 +568,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack); + static bool reqsk_queue_unlink(struct request_sock_queue *queue, + struct request_sock *req) + { +- struct listen_sock *lopt = queue->listen_opt; + struct request_sock **prev; ++ struct listen_sock *lopt; + bool found = false; + + spin_lock(&queue->syn_wait_lock); +- +- for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; +- prev = &(*prev)->dl_next) { +- if (*prev == req) { +- *prev = req->dl_next; +- found = true; +- break; ++ lopt = queue->listen_opt; ++ if (lopt) { ++ for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; ++ prev = &(*prev)->dl_next) { ++ if (*prev == req) { ++ *prev = req->dl_next; ++ found = true; ++ break; ++ } + } + } +- + spin_unlock(&queue->syn_wait_lock); + if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) + reqsk_put(req); +@@ -676,20 +677,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue, + req->num_timeout = 0; + req->sk = NULL; + ++ setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); ++ mod_timer_pinned(&req->rsk_timer, jiffies + timeout); ++ req->rsk_hash = hash; ++ + /* before letting lookups find us, make sure all req fields + * are committed to memory and refcnt initialized. + */ + smp_wmb(); + atomic_set(&req->rsk_refcnt, 2); +- setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); +- req->rsk_hash = hash; + + spin_lock(&queue->syn_wait_lock); + req->dl_next = lopt->syn_table[hash]; + lopt->syn_table[hash] = req; + spin_unlock(&queue->syn_wait_lock); +- +- mod_timer_pinned(&req->rsk_timer, jiffies + timeout); + } + EXPORT_SYMBOL(reqsk_queue_hash_req); + +diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c +index a29a504492af..e3db498f0233 100644 +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) + tunnel = container_of(work, struct l2tp_tunnel, del_work); + sk = l2tp_tunnel_sock_lookup(tunnel); + if (!sk) +- return; ++ goto out; + + sock = sk->sk_socket; + +@@ -1340,6 +1340,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) + } + + l2tp_tunnel_sock_put(sk); ++out: ++ l2tp_tunnel_dec_refcount(tunnel); + } + + /* Create a socket for the tunnel, if one isn't set up by +@@ -1639,8 +1641,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); + */ + int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) + { ++ l2tp_tunnel_inc_refcount(tunnel); + l2tp_tunnel_closeall(tunnel); +- return (false == queue_work(l2tp_wq, &tunnel->del_work)); ++ if (false == queue_work(l2tp_wq, &tunnel->del_work)) { ++ l2tp_tunnel_dec_refcount(tunnel); ++ return 1; ++ } ++ return 0; + } + EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); + +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 980121e75d2e..d139c43ac6e5 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -2683,6 +2683,7 @@ static int netlink_dump(struct sock *sk) + struct sk_buff *skb = NULL; + struct nlmsghdr *nlh; + int len, err = -ENOBUFS; ++ int alloc_min_size; + int alloc_size; + + mutex_lock(nlk->cb_mutex); +@@ -2691,9 +2692,6 @@ static int netlink_dump(struct sock *sk) + goto errout_skb; + } + +- cb = &nlk->cb; +- alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); +- + if (!netlink_rx_is_mmaped(sk) && + atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + goto errout_skb; +@@ -2703,23 +2701,35 @@ static int netlink_dump(struct sock *sk) + * to reduce number of system calls on dump operations, if user + * ever provided a big enough buffer. + */ +- if (alloc_size < nlk->max_recvmsg_len) { +- skb = netlink_alloc_skb(sk, +- nlk->max_recvmsg_len, +- nlk->portid, ++ cb = &nlk->cb; ++ alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); ++ ++ if (alloc_min_size < nlk->max_recvmsg_len) { ++ alloc_size = nlk->max_recvmsg_len; ++ skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, + GFP_KERNEL | + __GFP_NOWARN | + __GFP_NORETRY); +- /* available room should be exact amount to avoid MSG_TRUNC */ +- if (skb) +- skb_reserve(skb, skb_tailroom(skb) - +- nlk->max_recvmsg_len); + } +- if (!skb) ++ if (!skb) { ++ alloc_size = alloc_min_size; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, + GFP_KERNEL); ++ } + if (!skb) + goto errout_skb; ++ ++ /* Trim skb to allocated size. User is expected to provide buffer as ++ * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at ++ * netlink_recvmsg())). dump will pack as many smaller messages as ++ * could fit within the allocated skb. skb is typically allocated ++ * with larger space than required (could be as much as near 2x the ++ * requested size with align to next power of 2 approach). Allowing ++ * dump to use the excess space makes it difficult for a user to have a ++ * reasonable static buffer based on the expected largest dump of a ++ * single netdev. The outcome is MSG_TRUNC error. ++ */ ++ skb_reserve(skb, skb_tailroom(skb) - alloc_size); + netlink_skb_set_owner_r(skb, sk); + + len = cb->dump(skb, cb); +diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c +index aa349514e4cb..eed562295c78 100644 +--- a/net/openvswitch/flow_table.c ++++ b/net/openvswitch/flow_table.c +@@ -92,7 +92,8 @@ struct sw_flow *ovs_flow_alloc(void) + + /* Initialize the default stat node. */ + stats = kmem_cache_alloc_node(flow_stats_cache, +- GFP_KERNEL | __GFP_ZERO, 0); ++ GFP_KERNEL | __GFP_ZERO, ++ node_online(0) ? 0 : NUMA_NO_NODE); + if (!stats) + goto err; + +diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c +index 3f63ceac8e01..844dd85426dc 100644 +--- a/net/sched/act_mirred.c ++++ b/net/sched/act_mirred.c +@@ -166,6 +166,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, + + skb2->skb_iif = skb->dev->ifindex; + skb2->dev = dev; ++ skb_sender_cpu_clear(skb2); + err = dev_queue_xmit(skb2); + + out: +diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +index f9f13a32ddb8..2873b8d65608 100644 +--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c ++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +@@ -146,7 +146,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, + ctxt->read_hdr = head; + pages_needed = + min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed)); +- read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); ++ read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, ++ rs_length); + + for (pno = 0; pno < pages_needed; pno++) { + int len = min_t(int, rs_length, PAGE_SIZE - pg_off); +@@ -245,7 +246,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, + ctxt->direction = DMA_FROM_DEVICE; + ctxt->frmr = frmr; + pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); +- read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); ++ read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, ++ rs_length); + + frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); + frmr->direction = DMA_FROM_DEVICE; +diff --git a/net/tipc/msg.h b/net/tipc/msg.h +index e1d3595e2ee9..4cbb0fbad046 100644 +--- a/net/tipc/msg.h ++++ b/net/tipc/msg.h +@@ -353,7 +353,7 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n) + static inline u32 msg_importance(struct tipc_msg *m) + { + if (unlikely(msg_user(m) == MSG_FRAGMENTER)) +- return msg_bits(m, 5, 13, 0x7); ++ return msg_bits(m, 9, 0, 0x7); + if (likely(msg_isdata(m) && !msg_errcode(m))) + return msg_user(m); + return TIPC_SYSTEM_IMPORTANCE; +@@ -362,7 +362,7 @@ static inline u32 msg_importance(struct tipc_msg *m) + static inline void msg_set_importance(struct tipc_msg *m, u32 i) + { + if (unlikely(msg_user(m) == MSG_FRAGMENTER)) +- msg_set_bits(m, 5, 13, 0x7, i); ++ msg_set_bits(m, 9, 0, 0x7, i); + else if (likely(i < TIPC_SYSTEM_IMPORTANCE)) + msg_set_user(m, i); + else +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 06430598cf51..76e66695621c 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1938,6 +1938,11 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, + goto out; + } + ++ if (flags & MSG_PEEK) ++ skip = sk_peek_offset(sk, flags); ++ else ++ skip = 0; ++ + do { + int chunk; + struct sk_buff *skb, *last; +@@ -1984,7 +1989,6 @@ again: + break; + } + +- skip = sk_peek_offset(sk, flags); + while (skip >= unix_skb_len(skb)) { + skip -= unix_skb_len(skb); + last = skb; +@@ -2048,6 +2052,16 @@ again: + + sk_peek_offset_fwd(sk, chunk); + ++ if (UNIXCB(skb).fp) ++ break; ++ ++ skip = 0; ++ last = skb; ++ unix_state_lock(sk); ++ skb = skb_peek_next(skb, &sk->sk_receive_queue); ++ if (skb) ++ goto again; ++ unix_state_unlock(sk); + break; + } + } while (size);