commit: b2dc6d58c360a6763403f7fd947011a6e225ddf5 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Fri Dec 2 16:21:25 2016 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Fri Dec 2 16:21:25 2016 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b2dc6d58
Linux patch 4.4.36 0000_README | 4 + 1035_linux-4.4.36.patch | 914 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 918 insertions(+) diff --git a/0000_README b/0000_README index 28c74ce..58c7374 100644 --- a/0000_README +++ b/0000_README @@ -183,6 +183,10 @@ Patch: 1034_linux-4.4.35.patch From: http://www.kernel.org Desc: Linux 4.4.35 +Patch: 1035_linux-4.4.36.patch +From: http://www.kernel.org +Desc: Linux 4.4.36 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1035_linux-4.4.36.patch b/1035_linux-4.4.36.patch new file mode 100644 index 0000000..0db6e38 --- /dev/null +++ b/1035_linux-4.4.36.patch @@ -0,0 +1,914 @@ +diff --git a/Makefile b/Makefile +index f88830af1533..705eb9e38fce 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 35 ++SUBLEVEL = 36 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c +index cda6dbbe9842..fd5979f28ada 100644 +--- a/arch/parisc/kernel/cache.c ++++ b/arch/parisc/kernel/cache.c +@@ -351,6 +351,7 @@ void __init parisc_setup_cache_timing(void) + { + unsigned long rangetime, alltime; + unsigned long size, start; ++ unsigned long threshold; + + alltime = mfctl(16); + flush_data_cache(); +@@ -364,17 +365,12 @@ void __init parisc_setup_cache_timing(void) + printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime, size, rangetime); + +- /* Racy, but if we see an intermediate value, it's ok too... */ +- parisc_cache_flush_threshold = size * alltime / rangetime; +- +- parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); +- if (!parisc_cache_flush_threshold) +- parisc_cache_flush_threshold = FLUSH_THRESHOLD; +- +- if (parisc_cache_flush_threshold > cache_info.dc_size) +- parisc_cache_flush_threshold = cache_info.dc_size; +- +- printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", ++ threshold = L1_CACHE_ALIGN(size * alltime / rangetime); ++ if (threshold > cache_info.dc_size) ++ threshold = cache_info.dc_size; ++ if (threshold) ++ parisc_cache_flush_threshold = threshold; ++ printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", + parisc_cache_flush_threshold/1024); + + /* calculate TLB flush threshold */ +@@ -383,7 +379,7 @@ void __init parisc_setup_cache_timing(void) + flush_tlb_all(); + alltime = mfctl(16) - alltime; + +- size = PAGE_SIZE; ++ size = 0; + start = (unsigned long) _text; + rangetime = mfctl(16); + while (start < (unsigned long) _end) { +@@ -396,13 +392,10 @@ void __init parisc_setup_cache_timing(void) + printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", + alltime, size, rangetime); + +- parisc_tlb_flush_threshold = size * alltime / rangetime; +- parisc_tlb_flush_threshold *= num_online_cpus(); +- parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); +- if (!parisc_tlb_flush_threshold) +- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; +- +- printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", ++ threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); ++ if (threshold) ++ parisc_tlb_flush_threshold = threshold; ++ printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", + parisc_tlb_flush_threshold/1024); + } + +diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S +index b743a80eaba0..675521919229 100644 +--- a/arch/parisc/kernel/pacache.S ++++ b/arch/parisc/kernel/pacache.S +@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */ + + fitmanymiddle: /* Loop if LOOP >= 2 */ + addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ +- pitlbe 0(%sr1, %r28) ++ pitlbe %r0(%sr1, %r28) + pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ + addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ + copy %arg3, %r31 /* Re-init inner loop count */ +@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */ + + fdtmanymiddle: /* Loop if LOOP >= 2 */ + addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ +- pdtlbe 0(%sr1, %r28) ++ pdtlbe %r0(%sr1, %r28) + pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ + addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ + copy %arg3, %r31 /* Re-init inner loop count */ +@@ -620,12 +620,12 @@ ENTRY(copy_user_page_asm) + /* Purge any old translations */ + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r28) +- pdtlb,l 0(%r29) ++ pdtlb,l %r0(%r28) ++ pdtlb,l %r0(%r29) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r28) +- pdtlb 0(%r29) ++ pdtlb %r0(%r28) ++ pdtlb %r0(%r29) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -768,10 +768,10 @@ ENTRY(clear_user_page_asm) + /* Purge any old translation */ + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r28) ++ pdtlb,l %r0(%r28) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r28) ++ pdtlb %r0(%r28) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -852,10 +852,10 @@ ENTRY(flush_dcache_page_asm) + /* Purge any old translation */ + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r28) ++ pdtlb,l %r0(%r28) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r28) ++ pdtlb %r0(%r28) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -892,10 +892,10 @@ ENTRY(flush_dcache_page_asm) + sync + + #ifdef CONFIG_PA20 +- pdtlb,l 0(%r25) ++ pdtlb,l %r0(%r25) + #else + tlb_lock %r20,%r21,%r22 +- pdtlb 0(%r25) ++ pdtlb %r0(%r25) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -925,13 +925,18 @@ ENTRY(flush_icache_page_asm) + depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ + #endif + +- /* Purge any old translation */ ++ /* Purge any old translation. Note that the FIC instruction ++ * may use either the instruction or data TLB. Given that we ++ * have a flat address space, it's not clear which TLB will be ++ * used. So, we purge both entries. */ + + #ifdef CONFIG_PA20 ++ pdtlb,l %r0(%r28) + pitlb,l %r0(%sr4,%r28) + #else + tlb_lock %r20,%r21,%r22 +- pitlb (%sr4,%r28) ++ pdtlb %r0(%r28) ++ pitlb %r0(%sr4,%r28) + tlb_unlock %r20,%r21,%r22 + #endif + +@@ -970,10 +975,12 @@ ENTRY(flush_icache_page_asm) + sync + + #ifdef CONFIG_PA20 ++ pdtlb,l %r0(%r28) + pitlb,l %r0(%sr4,%r25) + #else + tlb_lock %r20,%r21,%r22 +- pitlb (%sr4,%r25) ++ pdtlb %r0(%r28) ++ pitlb %r0(%sr4,%r25) + tlb_unlock %r20,%r21,%r22 + #endif + +diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c +index b9402c9b3454..af0d7fae7aa7 100644 +--- a/arch/parisc/kernel/pci-dma.c ++++ b/arch/parisc/kernel/pci-dma.c +@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte, + + if (!pte_none(*pte)) + printk(KERN_ERR "map_pte_uncached: page already exists\n"); +- set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); + purge_tlb_start(flags); ++ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); + pdtlb_kernel(orig_vaddr); + purge_tlb_end(flags); + vaddr += PAGE_SIZE; +diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c +index 81d6f6391944..2e66a887788e 100644 +--- a/arch/parisc/kernel/setup.c ++++ b/arch/parisc/kernel/setup.c +@@ -334,6 +334,10 @@ static int __init parisc_init(void) + /* tell PDC we're Linux. Nevermind failure. */ + pdc_stable_write(0x40, &osid, sizeof(osid)); + ++ /* start with known state */ ++ flush_cache_all_local(); ++ flush_tlb_all_local(NULL); ++ + processor_init(); + #ifdef CONFIG_SMP + pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", +diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c +index 178989e6d3e3..ea960d660917 100644 +--- a/arch/tile/kernel/time.c ++++ b/arch/tile/kernel/time.c +@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) + */ + unsigned long long sched_clock(void) + { +- return clocksource_cyc2ns(get_cycles(), +- sched_clock_mult, SCHED_CLOCK_SHIFT); ++ return mult_frac(get_cycles(), ++ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); + } + + int setup_profiling_timer(unsigned int multiplier) +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 5fa652c16a50..f49e98062ea5 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -2093,16 +2093,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) + static int em_jmp_far(struct x86_emulate_ctxt *ctxt) + { + int rc; +- unsigned short sel, old_sel; +- struct desc_struct old_desc, new_desc; +- const struct x86_emulate_ops *ops = ctxt->ops; ++ unsigned short sel; ++ struct desc_struct new_desc; + u8 cpl = ctxt->ops->cpl(ctxt); + +- /* Assignment of RIP may only fail in 64-bit mode */ +- if (ctxt->mode == X86EMUL_MODE_PROT64) +- ops->get_segment(ctxt, &old_sel, &old_desc, NULL, +- VCPU_SREG_CS); +- + memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); + + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, +@@ -2112,12 +2106,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) + return rc; + + rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); +- if (rc != X86EMUL_CONTINUE) { +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); +- /* assigning eip failed; restore the old cs */ +- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); +- return rc; +- } ++ /* Error handling is not implemented. */ ++ if (rc != X86EMUL_CONTINUE) ++ return X86EMUL_UNHANDLEABLE; ++ + return rc; + } + +@@ -2177,14 +2169,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) + { + int rc; + unsigned long eip, cs; +- u16 old_cs; + int cpl = ctxt->ops->cpl(ctxt); +- struct desc_struct old_desc, new_desc; +- const struct x86_emulate_ops *ops = ctxt->ops; +- +- if (ctxt->mode == X86EMUL_MODE_PROT64) +- ops->get_segment(ctxt, &old_cs, &old_desc, NULL, +- VCPU_SREG_CS); ++ struct desc_struct new_desc; + + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); + if (rc != X86EMUL_CONTINUE) +@@ -2201,10 +2187,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) + if (rc != X86EMUL_CONTINUE) + return rc; + rc = assign_eip_far(ctxt, eip, &new_desc); +- if (rc != X86EMUL_CONTINUE) { +- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); +- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); +- } ++ /* Error handling is not implemented. */ ++ if (rc != X86EMUL_CONTINUE) ++ return X86EMUL_UNHANDLEABLE; ++ + return rc; + } + +diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c +index 84b96d319909..d09544e826f6 100644 +--- a/arch/x86/kvm/irq_comm.c ++++ b/arch/x86/kvm/irq_comm.c +@@ -38,6 +38,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, + bool line_status) + { + struct kvm_pic *pic = pic_irqchip(kvm); ++ ++ /* ++ * XXX: rejecting pic routes when pic isn't in use would be better, ++ * but the default routing table is installed while kvm->arch.vpic is ++ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE. ++ */ ++ if (!pic) ++ return -1; ++ + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); + } + +@@ -46,6 +55,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, + bool line_status) + { + struct kvm_ioapic *ioapic = kvm->arch.vioapic; ++ ++ if (!ioapic) ++ return -1; ++ + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, + line_status); + } +diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c +index 79bab6fd76bb..6755d4768f59 100644 +--- a/drivers/gpu/drm/radeon/atombios_crtc.c ++++ b/drivers/gpu/drm/radeon/atombios_crtc.c +@@ -275,6 +275,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) + atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); + atombios_blank_crtc(crtc, ATOM_DISABLE); + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ /* Make sure vblank interrupt is still enabled if needed */ ++ radeon_irq_set(rdev); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: +diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +index 678b4386540d..89f22bdde298 100644 +--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c ++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +@@ -331,6 +331,8 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) + WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); + } + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); ++ /* Make sure vblank interrupt is still enabled if needed */ ++ radeon_irq_set(rdev); + radeon_crtc_load_lut(crtc); + break; + case DRM_MODE_DPMS_STANDBY: +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index 565bb2c140ed..e913a930ac80 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -326,7 +326,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, + struct pci_dev *pdev = to_pci_dev(data); + struct dmar_pci_notify_info *info; + +- /* Only care about add/remove events for physical functions */ ++ /* Only care about add/remove events for physical functions. ++ * For VFs we actually do the lookup based on the corresponding ++ * PF in device_to_iommu() anyway. */ + if (pdev->is_virtfn) + return NOTIFY_DONE; + if (action != BUS_NOTIFY_ADD_DEVICE && +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index 5baa830ce49f..59e9abd3345e 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -885,7 +885,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf + return NULL; + + if (dev_is_pci(dev)) { ++ struct pci_dev *pf_pdev; ++ + pdev = to_pci_dev(dev); ++ /* VFs aren't listed in scope tables; we need to look up ++ * the PF instead to find the IOMMU. */ ++ pf_pdev = pci_physfn(pdev); ++ dev = &pf_pdev->dev; + segment = pci_domain_nr(pdev->bus); + } else if (has_acpi_companion(dev)) + dev = &ACPI_COMPANION(dev)->dev; +@@ -898,6 +904,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf + for_each_active_dev_scope(drhd->devices, + drhd->devices_cnt, i, tmp) { + if (tmp == dev) { ++ /* For a VF use its original BDF# not that of the PF ++ * which we used for the IOMMU lookup. Strictly speaking ++ * we could do this for all PCI devices; we only need to ++ * get the BDF# from the scope table for ACPI matches. */ ++ if (pdev->is_virtfn) ++ goto got_pdev; ++ + *bus = drhd->devices[i].bus; + *devfn = drhd->devices[i].devfn; + goto out; +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index d9939fa9b588..f929879ecae6 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) + struct page *pages; + int order; + +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; +- if (order < 0) +- order = 0; +- ++ /* Start at 2 because it's defined as 2^(1+PSS) */ ++ iommu->pasid_max = 2 << ecap_pss(iommu->ecap); ++ ++ /* Eventually I'm promised we will get a multi-level PASID table ++ * and it won't have to be physically contiguous. Until then, ++ * limit the size because 8MiB contiguous allocations can be hard ++ * to come by. The limit of 0x20000, which is 1MiB for each of ++ * the PASID and PASID-state tables, is somewhat arbitrary. */ ++ if (iommu->pasid_max > 0x20000) ++ iommu->pasid_max = 0x20000; ++ ++ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!pages) { + pr_warn("IOMMU: %s: Failed to allocate PASID table\n", +@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) + pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); + + if (ecap_dis(iommu->ecap)) { ++ /* Just making it explicit... */ ++ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (pages) + iommu->pasid_state_table = page_address(pages); +@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) + + int intel_svm_free_pasid_tables(struct intel_iommu *iommu) + { +- int order; +- +- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; +- if (order < 0) +- order = 0; ++ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); + + if (iommu->pasid_table) { + free_pages((unsigned long)iommu->pasid_table, order); +@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ + } + svm->iommu = iommu; + +- if (pasid_max > 2 << ecap_pss(iommu->ecap)) +- pasid_max = 2 << ecap_pss(iommu->ecap); ++ if (pasid_max > iommu->pasid_max) ++ pasid_max = iommu->pasid_max; + + /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ + ret = idr_alloc(&iommu->pasid_idr, svm, +diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c +index a77643954523..e59838231703 100644 +--- a/drivers/misc/mei/bus.c ++++ b/drivers/misc/mei/bus.c +@@ -144,7 +144,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) + mutex_lock(&bus->device_lock); + + if (!mei_cl_is_connected(cl)) { +- rets = -EBUSY; ++ rets = -ENODEV; + goto out; + } + } +diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h +index 4e8069866c85..a2661381ddfc 100644 +--- a/drivers/misc/mei/hw-me-regs.h ++++ b/drivers/misc/mei/hw-me-regs.h +@@ -66,9 +66,6 @@ + #ifndef _MEI_HW_MEI_REGS_H_ + #define _MEI_HW_MEI_REGS_H_ + +-#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ +-#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ +- + /* + * MEI device IDs + */ +@@ -124,6 +121,10 @@ + #define MEI_DEV_ID_SPT_2 0x9D3B /* Sunrise Point 2 */ + #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */ + #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */ ++ ++#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ ++#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ ++ + /* + * MEI HW Section + */ +diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c +index 25b1997a62cb..36333750c512 100644 +--- a/drivers/misc/mei/hw-me.c ++++ b/drivers/misc/mei/hw-me.c +@@ -1258,8 +1258,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev) + static bool mei_me_fw_type_sps(struct pci_dev *pdev) + { + u32 reg; +- /* Read ME FW Status check for SPS Firmware */ +- pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®); ++ unsigned int devfn; ++ ++ /* ++ * Read ME FW Status register to check for SPS Firmware ++ * The SPS FW is only signaled in pci function 0 ++ */ ++ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); ++ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, ®); + /* if bits [19:16] = 15, running SPS Firmware */ + return (reg & 0xf0000) == 0xf0000; + } +diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c +index 80f9afcb1382..4ef189a7a2fb 100644 +--- a/drivers/misc/mei/main.c ++++ b/drivers/misc/mei/main.c +@@ -207,7 +207,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, + + mutex_lock(&dev->device_lock); + if (!mei_cl_is_connected(cl)) { +- rets = -EBUSY; ++ rets = -ENODEV; + goto out; + } + } +diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c +index 0af3d7d30419..01e20384ac44 100644 +--- a/drivers/misc/mei/pci-me.c ++++ b/drivers/misc/mei/pci-me.c +@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { + + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)}, +- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)}, +- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)}, ++ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)}, + + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +index 2d867c5bfd9f..8cead04f26d6 100644 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +@@ -3706,6 +3706,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, + } + } + ++static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) ++{ ++ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); ++} ++ + /** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object +@@ -3727,6 +3732,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) + if (!scmd) + continue; + count++; ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_unblock(scmd->device, ++ SDEV_RUNNING); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) +@@ -3831,8 +3839,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) + SAM_STAT_CHECK_CONDITION; + } + +- +- + /** + * scsih_qcmd - main scsi request entry point + * @scmd: pointer to scsi command object +@@ -3859,6 +3865,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); + ++ /* ++ * Lock the device for any subsequent command until command is ++ * done. ++ */ ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_block(scmd->device); ++ + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; +@@ -4431,6 +4444,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) + if (scmd == NULL) + return 1; + ++ if (ata_12_16_cmd(scmd)) ++ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); ++ + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { +diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c +index 965d0e240dcb..ba4a2a1eb3ff 100644 +--- a/drivers/usb/chipidea/core.c ++++ b/drivers/usb/chipidea/core.c +@@ -926,6 +926,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) + if (!ci) + return -ENOMEM; + ++ spin_lock_init(&ci->lock); + ci->dev = dev; + ci->platdata = dev_get_platdata(dev); + ci->imx28_write_fix = !!(ci->platdata->flags & +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c +index 68fc5fce4cc5..d8a045fc1fdb 100644 +--- a/drivers/usb/chipidea/udc.c ++++ b/drivers/usb/chipidea/udc.c +@@ -1884,8 +1884,6 @@ static int udc_start(struct ci_hdrc *ci) + struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; + int retval = 0; + +- spin_lock_init(&ci->lock); +- + ci->gadget.ops = &usb_gadget_ops; + ci->gadget.speed = USB_SPEED_UNKNOWN; + ci->gadget.max_speed = USB_SPEED_HIGH; +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 976195e748a3..fe7452f0f38a 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ ++ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 494167fe6a2c..d3d6ec455151 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, + { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, ++ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), ++ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { } /* Terminating entry */ + }; + +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 21011c0a4c64..48ee04c94a75 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -596,6 +596,12 @@ + #define STK541_PID 0x2109 /* Zigbee Controller */ + + /* ++ * Texas Instruments ++ */ ++#define TI_VID 0x0451 ++#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ ++ ++/* + * Blackfin gnICE JTAG + * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice + */ +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c +index 5e67f63b2e46..02f86dd1a340 100644 +--- a/drivers/usb/storage/transport.c ++++ b/drivers/usb/storage/transport.c +@@ -919,10 +919,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) + + /* COMMAND STAGE */ + /* let's send the command via the control pipe */ ++ /* ++ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. ++ * Stack may be vmallocated. So no DMA for us. Make a copy. ++ */ ++ memcpy(us->iobuf, srb->cmnd, srb->cmd_len); + result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, + US_CBI_ADSC, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, +- us->ifnum, srb->cmnd, srb->cmd_len); ++ us->ifnum, us->iobuf, srb->cmd_len); + + /* check the return code for the command */ + usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", +diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c +index 52a28311e2a4..48efe62e1302 100644 +--- a/fs/nfs/callback.c ++++ b/fs/nfs/callback.c +@@ -261,7 +261,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, + } + + ret = -EPROTONOSUPPORT; +- if (minorversion == 0) ++ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) + ret = nfs4_callback_up_net(serv, net); + else if (xprt->ops->bc_up) + ret = xprt->ops->bc_up(serv, net); +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 2d9b650047a5..d49e26c6cdc7 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -429,6 +429,7 @@ struct intel_iommu { + struct page_req_dsc *prq; + unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct idr pasid_idr; ++ u32 pasid_max; + #endif + struct q_inval *qi; /* Queued invalidation info */ + u32 *iommu_state; /* Store iommu states between suspend and resume.*/ +diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c +index 5464c8744ea9..e24388a863a7 100644 +--- a/lib/mpi/mpi-pow.c ++++ b/lib/mpi/mpi-pow.c +@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) + if (!esize) { + /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 + * depending on if MOD equals 1. */ +- rp[0] = 1; + res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; ++ if (res->nlimbs) { ++ if (mpi_resize(res, 1) < 0) ++ goto enomem; ++ rp = res->d; ++ rp[0] = 1; ++ } + res->sign = 0; + goto leave; + } +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c +index 9aba9e93c0a2..ee9082792530 100644 +--- a/net/core/flow_dissector.c ++++ b/net/core/flow_dissector.c +@@ -949,4 +949,4 @@ static int __init init_default_flow_dissectors(void) + return 0; + } + +-late_initcall_sync(init_default_flow_dissectors); ++core_initcall(init_default_flow_dissectors); +diff --git a/net/wireless/core.h b/net/wireless/core.h +index a618b4b86fa4..47a967fed8ff 100644 +--- a/net/wireless/core.h ++++ b/net/wireless/core.h +@@ -72,6 +72,7 @@ struct cfg80211_registered_device { + struct list_head bss_list; + struct rb_root bss_tree; + u32 bss_generation; ++ u32 bss_entries; + struct cfg80211_scan_request *scan_req; /* protected by RTNL */ + struct sk_buff *scan_msg; + struct cfg80211_sched_scan_request __rcu *sched_scan_req; +diff --git a/net/wireless/scan.c b/net/wireless/scan.c +index 14d5369eb778..8dde12a11725 100644 +--- a/net/wireless/scan.c ++++ b/net/wireless/scan.c +@@ -56,6 +56,19 @@ + * also linked into the probe response struct. + */ + ++/* ++ * Limit the number of BSS entries stored in mac80211. Each one is ++ * a bit over 4k at most, so this limits to roughly 4-5M of memory. ++ * If somebody wants to really attack this though, they'd likely ++ * use small beacons, and only one type of frame, limiting each of ++ * the entries to a much smaller size (in order to generate more ++ * entries in total, so overhead is bigger.) ++ */ ++static int bss_entries_limit = 1000; ++module_param(bss_entries_limit, int, 0644); ++MODULE_PARM_DESC(bss_entries_limit, ++ "limit to number of scan BSS entries (per wiphy, default 1000)"); ++ + #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) + + static void bss_free(struct cfg80211_internal_bss *bss) +@@ -136,6 +149,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, + + list_del_init(&bss->list); + rb_erase(&bss->rbn, &rdev->bss_tree); ++ rdev->bss_entries--; ++ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), ++ "rdev bss entries[%d]/list[empty:%d] corruption\n", ++ rdev->bss_entries, list_empty(&rdev->bss_list)); + bss_ref_put(rdev, bss); + return true; + } +@@ -162,6 +179,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, + rdev->bss_generation++; + } + ++static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) ++{ ++ struct cfg80211_internal_bss *bss, *oldest = NULL; ++ bool ret; ++ ++ lockdep_assert_held(&rdev->bss_lock); ++ ++ list_for_each_entry(bss, &rdev->bss_list, list) { ++ if (atomic_read(&bss->hold)) ++ continue; ++ ++ if (!list_empty(&bss->hidden_list) && ++ !bss->pub.hidden_beacon_bss) ++ continue; ++ ++ if (oldest && time_before(oldest->ts, bss->ts)) ++ continue; ++ oldest = bss; ++ } ++ ++ if (WARN_ON(!oldest)) ++ return false; ++ ++ /* ++ * The callers make sure to increase rdev->bss_generation if anything ++ * gets removed (and a new entry added), so there's no need to also do ++ * it here. ++ */ ++ ++ ret = __cfg80211_unlink_bss(rdev, oldest); ++ WARN_ON(!ret); ++ return ret; ++} ++ + void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, + bool send_message) + { +@@ -687,6 +738,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + const u8 *ie; + int i, ssidlen; + u8 fold = 0; ++ u32 n_entries = 0; + + ies = rcu_access_pointer(new->pub.beacon_ies); + if (WARN_ON(!ies)) +@@ -710,6 +762,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + /* This is the bad part ... */ + + list_for_each_entry(bss, &rdev->bss_list, list) { ++ /* ++ * we're iterating all the entries anyway, so take the ++ * opportunity to validate the list length accounting ++ */ ++ n_entries++; ++ + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) + continue; + if (bss->pub.channel != new->pub.channel) +@@ -738,6 +796,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, + new->pub.beacon_ies); + } + ++ WARN_ONCE(n_entries != rdev->bss_entries, ++ "rdev bss entries[%d]/list[len:%d] corruption\n", ++ rdev->bss_entries, n_entries); ++ + return true; + } + +@@ -890,7 +952,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, + } + } + ++ if (rdev->bss_entries >= bss_entries_limit && ++ !cfg80211_bss_expire_oldest(rdev)) { ++ kfree(new); ++ goto drop; ++ } ++ + list_add_tail(&new->list, &rdev->bss_list); ++ rdev->bss_entries++; + rb_insert_bss(rdev, new); + found = new; + } +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index dc0027b28b04..53426a6ee6dc 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -623,8 +623,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) + /* released below */ + cred = get_current_cred(); + cxt = cred_cxt(cred); +- profile = aa_cred_profile(cred); +- previous_profile = cxt->previous; ++ profile = aa_get_newest_profile(aa_cred_profile(cred)); ++ previous_profile = aa_get_newest_profile(cxt->previous); + + if (unconfined(profile)) { + info = "unconfined"; +@@ -720,6 +720,8 @@ audit: + out: + aa_put_profile(hat); + kfree(name); ++ aa_put_profile(profile); ++ aa_put_profile(previous_profile); + put_cred(cred); + + return error;