diff --git a/Makefile b/Makefile
index e5cefec..ff695f0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 13
 SUBLEVEL = 11
-EXTRAVERSION = -ckt28
+EXTRAVERSION = -ckt29
 NAME = King of Alienated Frog Porn
 
 # *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c99b108..749e88f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -56,6 +56,14 @@ endif
 
 comma = ,
 
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS  += $(call cc-option,-fno-ipa-sra)
+
 # This selects which instruction set is used.
 # Note that GCC does not numerically define an architecture version
 # macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 002fa70..1186a50 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -111,8 +111,8 @@
 
        i2c5_pins: pinmux_i2c5_pins {
                pinctrl-single,pins = <
-                       0x184 (PIN_INPUT | MUX_MODE0)           /* i2c5_scl */
-                       0x186 (PIN_INPUT | MUX_MODE0)           /* i2c5_sda */
+                       0x186 (PIN_INPUT | MUX_MODE0)           /* i2c5_scl */
+                       0x188 (PIN_INPUT | MUX_MODE0)           /* i2c5_sda */
                >;
        };
 
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 04d6388..1e5a4fd 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -353,12 +353,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
                 */
                thumb = handler & 1;
 
-#if __LINUX_ARM_ARCH__ >= 7
+#if __LINUX_ARM_ARCH__ >= 6
                /*
-                * Clear the If-Then Thumb-2 execution state
-                * ARM spec requires this to be all 000s in ARM mode
-                * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
-                * signal transition without this.
+                * Clear the If-Then Thumb-2 execution state.  ARM spec
+                * requires this to be all 000s in ARM mode.  Snapdragon
+                * S4/Krait misbehaves on a Thumb=>ARM signal transition
+                * without this.
+                *
+                * We must do this whenever we are running on a Thumb-2
+                * capable CPU, which includes ARMv6T2.  However, we elect
+                * to do this whenever we're on an ARMv6 or later CPU for
+                * simplicity.
                 */
                cpsr &= ~PSR_IT_MASK;
 #endif
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index b6f6137..2ec794d 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -494,8 +494,7 @@ vcpu        .req    r0              @ vcpu pointer always 
in r0
 
        mrc     p15, 0, r2, c14, c3, 1  @ CNTV_CTL
        str     r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
-       bic     r2, #1                  @ Clear ENABLE
-       mcr     p15, 0, r2, c14, c3, 1  @ CNTV_CTL
+
        isb
 
        mrrc    p15, 3, r2, r3, c14     @ CNTV_CVAL
@@ -508,6 +507,9 @@ vcpu        .req    r0              @ vcpu pointer always 
in r0
        mcrr    p15, 4, r2, r2, c14     @ CNTVOFF
 
 1:
+       mov     r2, #0                  @ Clear ENABLE
+       mcr     p15, 0, r2, c14, c3, 1  @ CNTV_CTL
+
 #endif
        @ Allow physical timer/counter access for the host
        mrc     p15, 4, r2, c14, c1, 0  @ CNTHCTL
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 239635b..a29f98a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -129,6 +129,22 @@ menu "Bus support"
 config ARM_AMBA
        bool
 
+config ARM64_ERRATUM_843419
+       bool "Cortex-A53: 843419: A load or store might access an incorrect 
address"
+       depends on MODULES
+       default y
+       help
+         This option builds kernel modules using the large memory model in
+         order to avoid the use of the ADRP instruction, which can cause
+         a subsequent memory access to use an incorrect address on Cortex-A53
+         parts up to r0p4.
+
+         Note that the kernel itself must be linked with a version of ld
+         which fixes potentially affected ADRP instructions through the
+         use of veneers.
+
+         If unsure, say Y.
+
 endmenu
 
 menu "Kernel Features"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2fceb71..0ab1a34 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -34,6 +34,10 @@ comma = ,
 
 CHECKFLAGS     += -D__aarch64__
 
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+CFLAGS_MODULE  += -mcmodel=large
+endif
+
 # Default value
 head-y         := arch/arm64/kernel/head.o
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c68cca5..0ddb13a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -203,6 +203,11 @@ CPU_LE(    movk    x0, #0x30d0, lsl #16    )       // 
Clear EE and E0E on LE systems
        msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
 #endif
 
+       /* EL2 debug */
+       mrs     x0, pmcr_el0                    // Disable debug access traps
+       ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
+       msr     mdcr_el2, x0                    // all PMU counters from EL1
+
        /* Stage-2 translation */
        msr     vttbr_el2, xzr
 
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index e2ad0d8..373c208 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -393,12 +393,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
                                             INSN_IMM_ADR);
                        break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
                case R_AARCH64_ADR_PREL_PG_HI21_NC:
                        overflow_check = false;
                case R_AARCH64_ADR_PREL_PG_HI21:
                        ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
                                             INSN_IMM_ADR);
                        break;
+#endif
                case R_AARCH64_ADD_ABS_LO12_NC:
                case R_AARCH64_LDST8_ABS_LO12_NC:
                        overflow_check = false;
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 7ed72dc..db6c2eb 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -204,14 +204,32 @@ int copy_siginfo_from_user32(siginfo_t *to, 
compat_siginfo_t __user *from)
 
 /*
  * VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
  */
+union __fpsimd_vreg {
+       __uint128_t     raw;
+       struct {
+#ifdef __AARCH64EB__
+               u64     hi;
+               u64     lo;
+#else
+               u64     lo;
+               u64     hi;
+#endif
+       };
+};
+
 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user 
*frame)
 {
        struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
        compat_ulong_t magic = VFP_MAGIC;
        compat_ulong_t size = VFP_STORAGE_SIZE;
        compat_ulong_t fpscr, fpexc;
-       int err = 0;
+       int i, err = 0;
 
        /*
         * Save the hardware registers to the fpsimd_state structure.
@@ -227,10 +245,15 @@ static int compat_preserve_vfp_context(struct 
compat_vfp_sigframe __user *frame)
        /*
         * Now copy the FP registers. Since the registers are packed,
         * we can copy the prefix we want (V0-V15) as it is.
-        * FIXME: Won't work if big endian.
         */
-       err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
-                             sizeof(frame->ufp.fpregs));
+       for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+               union __fpsimd_vreg vreg = {
+                       .raw = fpsimd->vregs[i >> 1],
+               };
+
+               __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+               __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+       }
 
        /* Create an AArch32 fpscr from the fpsr and the fpcr. */
        fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -255,7 +278,7 @@ static int compat_restore_vfp_context(struct 
compat_vfp_sigframe __user *frame)
        compat_ulong_t magic = VFP_MAGIC;
        compat_ulong_t size = VFP_STORAGE_SIZE;
        compat_ulong_t fpscr;
-       int err = 0;
+       int i, err = 0;
 
        __get_user_error(magic, &frame->magic, err);
        __get_user_error(size, &frame->size, err);
@@ -265,12 +288,14 @@ static int compat_restore_vfp_context(struct 
compat_vfp_sigframe __user *frame)
        if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
                return -EINVAL;
 
-       /*
-        * Copy the FP registers into the start of the fpsimd_state.
-        * FIXME: Won't work if big endian.
-        */
-       err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
-                               sizeof(frame->ufp.fpregs));
+       /* Copy the FP registers into the start of the fpsimd_state. */
+       for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+               union __fpsimd_vreg vreg;
+
+               __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+               __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+               fpsimd.vregs[i >> 1] = vreg.raw;
+       }
 
        /* Extract the fpsr and the fpcr from the fpscr */
        __get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3b47c36..a1621ff 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -485,8 +485,6 @@ CPU_BE(     rev     w5, w5 )
        mrs     x3, cntv_ctl_el0
        and     x3, x3, #3
        str     w3, [x0, #VCPU_TIMER_CNTV_CTL]
-       bic     x3, x3, #1              // Clear Enable
-       msr     cntv_ctl_el0, x3
 
        isb
 
@@ -494,6 +492,9 @@ CPU_BE(     rev     w5, w5 )
        str     x3, [x0, #VCPU_TIMER_CNTV_CVAL]
 
 1:
+       // Disable the virtual timer
+       msr     cntv_ctl_el0, xzr
+
        // Allow physical timer/counter access for the host
        mrs     x2, cnthctl_el2
        orr     x2, x2, #3
diff --git a/arch/powerpc/mm/hugepage-hash64.c 
b/arch/powerpc/mm/hugepage-hash64.c
index 7d86c86..9836e80 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -136,7 +136,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, 
unsigned long vsid,
        BUG_ON(index >= 4096);
 
        vpn = hpt_vpn(ea, vsid, ssize);
-       hash = hpt_hash(vpn, shift, ssize);
        hpte_slot_array = get_hpte_slot_array(pmdp);
        if (psize == MMU_PAGE_4K) {
                /*
@@ -151,6 +150,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, 
unsigned long vsid,
        valid = hpte_valid(hpte_slot_array, index);
        if (valid) {
                /* update the hpte bits */
+               hash = hpt_hash(vpn, shift, ssize);
                hidx =  hpte_hash_index(hpte_slot_array, index);
                if (hidx & _PTEIDX_SECONDARY)
                        hash = ~hash;
@@ -176,6 +176,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, 
unsigned long vsid,
        if (!valid) {
                unsigned long hpte_group;
 
+               hash = hpt_hash(vpn, shift, ssize);
                /* insert new entry */
                pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
                new_pmd |= _PAGE_HASHPTE;
diff --git a/arch/powerpc/platforms/powernv/pci.c 
b/arch/powerpc/platforms/powernv/pci.c
index 5bd801b..2f32872 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -108,6 +108,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
        struct pci_controller *hose = pci_bus_to_host(pdev->bus);
        struct pnv_phb *phb = hose->private_data;
        struct msi_desc *entry;
+       irq_hw_number_t hwirq;
 
        if (WARN_ON(!phb))
                return;
@@ -115,10 +116,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
        list_for_each_entry(entry, &pdev->msi_list, list) {
                if (entry->irq == NO_IRQ)
                        continue;
+               hwirq = virq_to_hw(entry->irq);
                irq_set_msi_desc(entry->irq, NULL);
-               msi_bitmap_free_hwirqs(&phb->msi_bmp,
-                       virq_to_hw(entry->irq) - phb->msi_base, 1);
                irq_dispose_mapping(entry->irq);
+               msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
        }
 }
 #endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 77efbae..4a9b367 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -121,15 +121,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
 {
        struct msi_desc *entry;
        struct fsl_msi *msi_data;
+       irq_hw_number_t hwirq;
 
        list_for_each_entry(entry, &pdev->msi_list, list) {
                if (entry->irq == NO_IRQ)
                        continue;
+               hwirq = virq_to_hw(entry->irq);
                msi_data = irq_get_chip_data(entry->irq);
                irq_set_msi_desc(entry->irq, NULL);
-               msi_bitmap_free_hwirqs(&msi_data->bitmap,
-                                      virq_to_hw(entry->irq), 1);
                irq_dispose_mapping(entry->irq);
+               msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
        }
 
        return;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c 
b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 38e6238..e873616 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int 
nvec, int type)
 static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
 {
        struct msi_desc *entry;
+       irq_hw_number_t hwirq;
 
        pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
 
@@ -81,10 +82,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev 
*pdev)
                if (entry->irq == NO_IRQ)
                        continue;
 
+               hwirq = virq_to_hw(entry->irq);
                irq_set_msi_desc(entry->irq, NULL);
-               msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
-                                      virq_to_hw(entry->irq), ALLOC_CHUNK);
                irq_dispose_mapping(entry->irq);
+               msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 
ALLOC_CHUNK);
        }
 
        return;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 9a7aa0e..dfc3486 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, 
int nvec, int type)
 static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
 {
        struct msi_desc *entry;
+       irq_hw_number_t hwirq;
 
         list_for_each_entry(entry, &pdev->msi_list, list) {
                if (entry->irq == NO_IRQ)
                        continue;
 
+               hwirq = virq_to_hw(entry->irq);
                irq_set_msi_desc(entry->irq, NULL);
-               msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
-                                      virq_to_hw(entry->irq), 1);
                irq_dispose_mapping(entry->irq);
+               msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
        }
 
        return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 43948da..c3e6512 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -121,16 +121,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
 {
        struct msi_desc *entry;
        struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+       irq_hw_number_t hwirq;
 
        dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
 
        list_for_each_entry(entry, &dev->msi_list, list) {
                if (entry->irq == NO_IRQ)
                        continue;
+               hwirq = virq_to_hw(entry->irq);
                irq_set_msi_desc(entry->irq, NULL);
-               msi_bitmap_free_hwirqs(&msi_data->bitmap,
-                               virq_to_hw(entry->irq), 1);
                irq_dispose_mapping(entry->irq);
+               msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
        }
 }
 
diff --git a/arch/x86/include/uapi/asm/msr-index.h 
b/arch/x86/include/uapi/asm/msr-index.h
index 59cea18..87c2dcf 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -232,6 +232,7 @@
 /* C1E active bits in int pending message */
 #define K8_INTP_C1E_ACTIVE_MASK                0x18000000
 #define MSR_K8_TSEG_ADDR               0xc0010112
+#define MSR_K8_TSEG_MASK               0xc0010113
 #define K8_MTRRFIXRANGE_DRAM_ENABLE    0x00040000 /* MtrrFixDramEn bit    */
 #define K8_MTRRFIXRANGE_DRAM_MODIFY    0x00080000 /* MtrrFixDramModEn bit */
 #define K8_MTRR_RDMEM_WRMEM_MASK       0x18181818 /* Mask: RdMem|WrMem    */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f9e7786..0530b6d 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -352,6 +352,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int 
oneshot, int irqen)
        apic_write(APIC_LVTT, lvtt_value);
 
        if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
+               /*
+                * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
+                * writing to the APIC LVTT and TSC_DEADLINE MSR isn't 
serialized.
+                * According to Intel, MFENCE can do the serialization here.
+                */
+               asm volatile("mfence" : : : "memory");
+
                printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
                return;
        }
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8ca354c..95e7a29 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1709,7 +1709,18 @@ END(error_exit)
 /* Runs on exception stack */
 ENTRY(nmi)
        INTR_FRAME
+       /*
+        * Fix up the exception frame if we're on Xen.
+        * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+        * one value to the stack on native, so it may clobber the rdx
+        * scratch slot, but it won't clobber any of the important
+        * slots past it.
+        *
+        * Xen is a different story, because the Xen frame itself overlaps
+        * the "NMI executing" variable.
+        */
        PARAVIRT_ADJUST_EXCEPTION_FRAME
+
        /*
         * We allow breakpoints in NMIs. If a breakpoint occurs, then
         * the iretq it performs will take us out of NMI context.
@@ -1761,9 +1772,12 @@ ENTRY(nmi)
         * we don't want to enable interrupts, because then we'll end
         * up in an awkward situation in which IRQs are on but NMIs
         * are off.
+        *
+        * We also must not push anything to the stack before switching
+        * stacks lest we corrupt the "NMI executing" variable.
         */
 
-       SWAPGS
+       SWAPGS_UNSAFE_STACK
        cld
        movq    %rsp, %rdx
        movq    PER_CPU_VAR(kernel_stack), %rsp
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 1b10af8..45c2045 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -40,10 +40,18 @@
 #include <asm/timer.h>
 #include <asm/special_insns.h>
 
-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+     ".global _paravirt_nop\n"
+     "_paravirt_nop:\n\t"
+     "ret\n\t"
+     ".size _paravirt_nop, . - _paravirt_nop\n\t"
+     ".type _paravirt_nop, @function\n\t"
+     ".popsection");
 
 /* identity function, which can be inlined */
 u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index d809c06..b0fb64f 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -20,6 +20,7 @@
 #include <asm/hypervisor.h>
 #include <asm/nmi.h>
 #include <asm/x86_init.h>
+#include <asm/geode.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -812,15 +813,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
 
 static void __init check_system_tsc_reliable(void)
 {
-#ifdef CONFIG_MGEODE_LX
-       /* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || 
defined(CONFIG_X86_GENERIC)
+       if (is_geode_lx()) {
+               /* RTSC counts during suspend */
 #define RTSC_SUSP 0x100
-       unsigned long res_low, res_high;
+               unsigned long res_low, res_high;
 
-       rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
-       /* Geode_LX - the OLPC CPU has a very reliable TSC */
-       if (res_low & RTSC_SUSP)
-               tsc_clocksource_reliable = 1;
+               rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+               /* Geode_LX - the OLPC CPU has a very reliable TSC */
+               if (res_low & RTSC_SUSP)
+                       tsc_clocksource_reliable = 1;
+       }
 #endif
        if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
                tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d1a0937..8f83ea4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2377,6 +2377,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, 
u64 *pdata)
        case MSR_IA32_LASTINTFROMIP:
        case MSR_IA32_LASTINTTOIP:
        case MSR_K8_SYSCFG:
+       case MSR_K8_TSEG_ADDR:
+       case MSR_K8_TSEG_MASK:
        case MSR_K7_HWCR:
        case MSR_VM_HSAVE_PA:
        case MSR_K7_EVNTSEL0:
diff --git a/drivers/gpu/drm/qxl/qxl_display.c 
b/drivers/gpu/drm/qxl/qxl_display.c
index be728c8..68b8eac 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -809,13 +809,15 @@ static enum drm_connector_status qxl_conn_detect(
                drm_connector_to_qxl_output(connector);
        struct drm_device *ddev = connector->dev;
        struct qxl_device *qdev = ddev->dev_private;
-       int connected;
+       bool connected = false;
 
        /* The first monitor is always connected */
-       connected = (output->index == 0) ||
-                   (qdev->client_monitors_config &&
-                    qdev->client_monitors_config->count > output->index &&
-                    
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+       if (!qdev->client_monitors_config) {
+               if (output->index == 0)
+                       connected = true;
+       } else
+               connected = qdev->client_monitors_config->count > output->index 
&&
+                    
qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
 
        DRM_DEBUG("#%d connected: %d\n", output->index, connected);
        if (!connected)
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 96ff69e..e3f9909 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -350,6 +350,10 @@ static const u16 
NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
 
 /* NCT6776 specific data */
 
+/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 
*/
+#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
+#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
+
 static const s8 NCT6776_ALARM_BITS[] = {
        0, 1, 2, 3, 8, 21, 20, 16,      /* in0.. in7 */
        17, -1, -1, -1, -1, -1, -1,     /* in8..in14 */
@@ -3476,8 +3480,8 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
                data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
                data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
-               data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
-               data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+               data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
                data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
                data->REG_PWM[0] = NCT6775_REG_PWM;
                data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3548,8 +3552,8 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
                data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
                data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
-               data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
-               data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+               data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
                data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
                data->REG_PWM[0] = NCT6775_REG_PWM;
                data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3624,8 +3628,8 @@ static int nct6775_probe(struct platform_device *pdev)
                data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
                data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
                data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
-               data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
-               data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+               data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+               data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
                data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
                data->REG_PWM[0] = NCT6775_REG_PWM;
                data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c 
b/drivers/infiniband/ulp/isert/ib_isert.c
index bd470fd..b4b7a9d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2592,9 +2592,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct 
iscsi_cmd *cmd, bool recovery)
 static int
 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int 
state)
 {
-       int ret;
+       struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+       int ret = 0;
 
        switch (state) {
+       case ISTATE_REMOVE:
+               spin_lock_bh(&conn->cmd_lock);
+               list_del_init(&cmd->i_conn_node);
+               spin_unlock_bh(&conn->cmd_lock);
+               isert_put_cmd(isert_cmd, true);
+               break;
        case ISTATE_SEND_NOPIN_WANT_RESPONSE:
                ret = isert_put_nopin(cmd, conn, false);
                break;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 1d4da74..3b7b7b2 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -779,7 +779,7 @@ int usbnet_stop (struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
        struct driver_info      *info = dev->driver_info;
-       int                     retval, pm;
+       int                     retval, pm, mpn;
 
        clear_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_stop_queue (net);
@@ -810,6 +810,8 @@ int usbnet_stop (struct net_device *net)
 
        usbnet_purge_paused_rxq(dev);
 
+       mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
        /* deferred work (task, timer, softirq) must also stop.
         * can't flush_scheduled_work() until we drop rtnl (later),
         * else workers could deadlock; so make workers a NOP.
@@ -820,8 +822,7 @@ int usbnet_stop (struct net_device *net)
        if (!pm)
                usb_autopm_put_interface(dev->intf);
 
-       if (info->manage_power &&
-           !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+       if (info->manage_power && mpn)
                info->manage_power(dev, 0);
        else
                usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 045876f..1086fd8 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2167,10 +2167,6 @@ static void vxlan_setup(struct net_device *dev)
 
        eth_hw_addr_random(dev);
        ether_setup(dev);
-       if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
-               dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
-       else
-               dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
 
        dev->netdev_ops = &vxlan_netdev_ops;
        dev->destructor = free_netdev;
@@ -2547,8 +2543,12 @@ static int vxlan_newlink(struct net *net, struct 
net_device *dev,
 
                dev->needed_headroom = lowerdev->hard_header_len +
                                       (use_ipv6 ? VXLAN6_HEADROOM : 
VXLAN_HEADROOM);
-       } else if (use_ipv6)
+       } else if (use_ipv6) {
                vxlan->flags |= VXLAN_F_IPV6;
+               dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
+       } else {
+               dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
+       }
 
        if (data[IFLA_VXLAN_TOS])
                vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 6bc9b12..da26bc8 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -362,7 +362,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
                               void *arg)
 {
-       struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+       struct pci_dev *tdev = pci_get_slot(dev->bus,
+                                           PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
        ssize_t ret;
 
        if (!tdev)
@@ -376,7 +377,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t 
pos, size_t count,
 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
                                const void *arg)
 {
-       struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+       struct pci_dev *tdev = pci_get_slot(dev->bus,
+                                           PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
        ssize_t ret;
 
        if (!tdev)
@@ -393,22 +395,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
        .release = pci_vpd_pci22_release,
 };
 
-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
-{
-       struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
-       int ret = 0;
-
-       if (!tdev)
-               return -ENODEV;
-       if (!tdev->vpd || !tdev->multifunction ||
-           dev->class != tdev->class || dev->vendor != tdev->vendor ||
-           dev->device != tdev->device)
-               ret = -ENODEV;
-
-       pci_dev_put(tdev);
-       return ret;
-}
-
 int pci_vpd_pci22_init(struct pci_dev *dev)
 {
        struct pci_vpd_pci22 *vpd;
@@ -417,12 +403,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
        cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
        if (!cap)
                return -ENODEV;
-       if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
-               int ret = pci_vpd_f0_dev_check(dev);
 
-               if (ret)
-                       return ret;
-       }
        vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
        if (!vpd)
                return -ENOMEM;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 7e6cdb2d..8ecdb4f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1878,11 +1878,27 @@ static void quirk_netmos(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
                         PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
 
+/*
+ * Quirk non-zero PCI functions to route VPD access through function 0 for
+ * devices that share VPD resources between functions.  The functions are
+ * expected to be identical devices.
+ */
 static void quirk_f0_vpd_link(struct pci_dev *dev)
 {
-       if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+       struct pci_dev *f0;
+
+       if (!PCI_FUNC(dev->devfn))
                return;
-       dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+       f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+       if (!f0)
+               return;
+
+       if (f0->vpd && dev->class == f0->class &&
+           dev->vendor == f0->vendor && dev->device == f0->device)
+               dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+       pci_dev_put(f0);
 }
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
                              PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 34a7ace..10db6ad 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -562,6 +562,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
        if (!(sccr1_reg & SSCR1_TIE))
                mask &= ~SSSR_TFS;
 
+       /* Ignore RX timeout interrupt if it is disabled */
+       if (!(sccr1_reg & SSCR1_TINTE))
+               mask &= ~SSSR_TINT;
+
        if (!(status & mask))
                return IRQ_NONE;
 
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 8cdd422..e2772a8 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1234,8 +1234,7 @@ static struct class spi_master_class = {
  *
  * The caller is responsible for assigning the bus number and initializing
  * the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() and kfree() to prevent a memory
- * leak.
+ * adding the device) calling spi_master_put() to prevent a memory leak.
  */
 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
 {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 6524383..85756bd 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -114,7 +114,7 @@ static void usb_parse_ss_endpoint_companion(struct device 
*ddev, int cfgno,
                                cfgno, inum, asnum, ep->desc.bEndpointAddress);
                ep->ss_ep_comp.bmAttributes = 16;
        } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
-                       desc->bmAttributes > 2) {
+                  USB_SS_MULT(desc->bmAttributes) > 3) {
                dev_warn(ddev, "Isoc endpoint has Mult of %d in "
                                "config %d interface %d altsetting %d ep %d: "
                                "setting to 3\n", desc->bmAttributes + 1,
@@ -123,7 +123,8 @@ static void usb_parse_ss_endpoint_companion(struct device 
*ddev, int cfgno,
        }
 
        if (usb_endpoint_xfer_isoc(&ep->desc))
-               max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+               max_tx = (desc->bMaxBurst + 1) *
+                       (USB_SS_MULT(desc->bmAttributes)) *
                        usb_endpoint_maxp(&ep->desc);
        else if (usb_endpoint_xfer_int(&ep->desc))
                max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 146923a..51ea3a8 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1402,10 +1402,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
         * use Event Data TRBs, and we don't chain in a link TRB on short
         * transfers, we're basically dividing by 1.
         *
-        * xHCI 1.0 specification indicates that the Average TRB Length should
-        * be set to 8 for control endpoints.
+        * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+        * should be set to 8 for control endpoints.
         */
-       if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+       if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
                ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
        else
                ep_ctx->tx_info |=
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 52547c0..33c0029 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -332,6 +332,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
        ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
                        CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
        if (ret < 0) {
+               /* we are about to kill xhci, give it one more chance */
+               xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+                             &xhci->op_regs->cmd_ring);
+               udelay(1000);
+               ret = xhci_handshake(xhci, &xhci->op_regs->cmd_ring,
+                                    CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+               if (ret == 0)
+                       return 0;
+
                xhci_err(xhci, "Stopped the command ring failed, "
                                "maybe the host is dead\n");
                xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3584,8 +3593,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
        if (start_cycle == 0)
                field |= 0x1;
 
-       /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
-       if (xhci->hci_version == 0x100) {
+       /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+       if (xhci->hci_version >= 0x100) {
                if (urb->transfer_buffer_length > 0) {
                        if (setup->bRequestType & USB_DIR_IN)
                                field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1fc236c..159b826 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -143,7 +143,8 @@ static int xhci_start(struct xhci_hcd *xhci)
                                "waited %u microseconds.\n",
                                XHCI_MAX_HALT_USEC);
        if (!ret)
-               xhci->xhc_state &= ~XHCI_STATE_HALTED;
+               xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
        return ret;
 }
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e2cc293..2188cb8 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
 #define ZTE_PRODUCT_MF622                      0x0001
 #define ZTE_PRODUCT_MF628                      0x0015
 #define ZTE_PRODUCT_MF626                      0x0031
+#define ZTE_PRODUCT_ZM8620_X                   0x0396
+#define ZTE_PRODUCT_ME3620_MBIM                        0x0426
+#define ZTE_PRODUCT_ME3620_X                   0x1432
+#define ZTE_PRODUCT_ME3620_L                   0x1433
 #define ZTE_PRODUCT_AC2726                     0xfff1
 #define ZTE_PRODUCT_CDMA_TECH                  0xfffe
 #define ZTE_PRODUCT_AC8710T                    0xffff
@@ -551,6 +555,18 @@ static const struct option_blacklist_info 
zte_mc2716_z_blacklist = {
        .sendsetup = BIT(1) | BIT(2) | BIT(3),
 };
 
+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+       .reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+       .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+       .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
        .reserved = BIT(1) | BIT(2),
 };
@@ -1588,6 +1604,14 @@ static const struct usb_device_id option_ids[] = {
         .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 
0xff, 0xff, 0xff),
         .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+        .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+        .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+        .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+       { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+        .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
        { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 809bbe1..158f116 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2631,7 +2631,8 @@ static int submit_extent_page(int rw, struct 
extent_io_tree *tree,
                              bio_end_io_t end_io_func,
                              int mirror_num,
                              unsigned long prev_bio_flags,
-                             unsigned long bio_flags)
+                             unsigned long bio_flags,
+                             bool force_bio_submit)
 {
        int ret = 0;
        struct bio *bio;
@@ -2649,6 +2650,7 @@ static int submit_extent_page(int rw, struct 
extent_io_tree *tree,
                        contig = bio_end_sector(bio) == sector;
 
                if (prev_bio_flags != bio_flags || !contig ||
+                   force_bio_submit ||
                    merge_bio(rw, tree, page, offset, page_size, bio, 
bio_flags) ||
                    bio_add_page(bio, page, page_size, offset) < page_size) {
                        ret = submit_one_bio(rw, bio, mirror_num,
@@ -2740,7 +2742,8 @@ static int __do_readpage(struct extent_io_tree *tree,
                         get_extent_t *get_extent,
                         struct extent_map **em_cached,
                         struct bio **bio, int mirror_num,
-                        unsigned long *bio_flags, int rw)
+                        unsigned long *bio_flags, int rw,
+                        u64 *prev_em_start)
 {
        struct inode *inode = page->mapping->host;
        u64 start = page_offset(page);
@@ -2788,6 +2791,7 @@ static int __do_readpage(struct extent_io_tree *tree,
        }
        while (cur <= end) {
                unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+               bool force_bio_submit = false;
 
                if (cur >= last_byte) {
                        char *userpage;
@@ -2838,6 +2842,49 @@ static int __do_readpage(struct extent_io_tree *tree,
                block_start = em->block_start;
                if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
                        block_start = EXTENT_MAP_HOLE;
+
+               /*
+                * If we have a file range that points to a compressed extent
+                * and it's followed by a consecutive file range that points to
+                * to the same compressed extent (possibly with a different
+                * offset and/or length, so it either points to the whole extent
+                * or only part of it), we must make sure we do not submit a
+                * single bio to populate the pages for the 2 ranges because
+                * this makes the compressed extent read zero out the pages
+                * belonging to the 2nd range. Imagine the following scenario:
+                *
+                *  File layout
+                *  [0 - 8K]                     [8K - 24K]
+                *    |                               |
+                *    |                               |
+                * points to extent X,         points to extent X,
+                * offset 4K, length of 8K     offset 0, length 16K
+                *
+                * [extent X, compressed length = 4K uncompressed length = 16K]
+                *
+                * If the bio to read the compressed extent covers both ranges,
+                * it will decompress extent X into the pages belonging to the
+                * first range and then it will stop, zeroing out the remaining
+                * pages that belong to the other range that points to extent X.
+                * So here we make sure we submit 2 bios, one for the first
+                * range and another one for the third range. Both will target
+                * the same physical extent from disk, but we can't currently
+                * make the compressed bio endio callback populate the pages
+                * for both ranges because each compressed bio is tightly
+                * coupled with a single extent map, and each range can have
+                * an extent map with a different offset value relative to the
+                * uncompressed data of our extent and different lengths. This
+                * is a corner case so we prioritize correctness over
+                * non-optimal behavior (submitting 2 bios for the same extent).
+                */
+               if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
+                   prev_em_start && *prev_em_start != (u64)-1 &&
+                   *prev_em_start != em->orig_start)
+                       force_bio_submit = true;
+
+               if (prev_em_start)
+                       *prev_em_start = em->orig_start;
+
                free_extent_map(em);
                em = NULL;
 
@@ -2887,7 +2934,8 @@ static int __do_readpage(struct extent_io_tree *tree,
                                         bdev, bio, pnr,
                                         end_bio_extent_readpage, mirror_num,
                                         *bio_flags,
-                                        this_bio_flag);
+                                        this_bio_flag,
+                                        force_bio_submit);
                if (!ret) {
                        nr++;
                        *bio_flags = this_bio_flag;
@@ -2914,7 +2962,8 @@ static inline void __do_contiguous_readpages(struct 
extent_io_tree *tree,
                                             get_extent_t *get_extent,
                                             struct extent_map **em_cached,
                                             struct bio **bio, int mirror_num,
-                                            unsigned long *bio_flags, int rw)
+                                            unsigned long *bio_flags, int rw,
+                                            u64 *prev_em_start)
 {
        struct inode *inode;
        struct btrfs_ordered_extent *ordered;
@@ -2934,7 +2983,7 @@ static inline void __do_contiguous_readpages(struct 
extent_io_tree *tree,
 
        for (index = 0; index < nr_pages; index++) {
                __do_readpage(tree, pages[index], get_extent, em_cached, bio,
-                             mirror_num, bio_flags, rw);
+                             mirror_num, bio_flags, rw, prev_em_start);
                page_cache_release(pages[index]);
        }
 }
@@ -2944,7 +2993,8 @@ static void __extent_readpages(struct extent_io_tree 
*tree,
                               int nr_pages, get_extent_t *get_extent,
                               struct extent_map **em_cached,
                               struct bio **bio, int mirror_num,
-                              unsigned long *bio_flags, int rw)
+                              unsigned long *bio_flags, int rw,
+                              u64 *prev_em_start)
 {
        u64 start = 0;
        u64 end = 0;
@@ -2965,7 +3015,7 @@ static void __extent_readpages(struct extent_io_tree 
*tree,
                                                  index - first_index, start,
                                                  end, get_extent, em_cached,
                                                  bio, mirror_num, bio_flags,
-                                                 rw);
+                                                 rw, prev_em_start);
                        start = page_start;
                        end = start + PAGE_CACHE_SIZE - 1;
                        first_index = index;
@@ -2976,7 +3026,8 @@ static void __extent_readpages(struct extent_io_tree 
*tree,
                __do_contiguous_readpages(tree, &pages[first_index],
                                          index - first_index, start,
                                          end, get_extent, em_cached, bio,
-                                         mirror_num, bio_flags, rw);
+                                         mirror_num, bio_flags, rw,
+                                         prev_em_start);
 }
 
 static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -3002,7 +3053,7 @@ static int __extent_read_full_page(struct extent_io_tree 
*tree,
        }
 
        ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
-                           bio_flags, rw);
+                           bio_flags, rw, NULL);
        return ret;
 }
 
@@ -3028,7 +3079,7 @@ int extent_read_full_page_nolock(struct extent_io_tree 
*tree, struct page *page,
        int ret;
 
        ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
-                                     &bio_flags, READ);
+                           &bio_flags, READ, NULL);
        if (bio)
                ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
        return ret;
@@ -3297,7 +3348,7 @@ static int __extent_writepage(struct page *page, struct 
writeback_control *wbc,
                                                 sector, iosize, pg_offset,
                                                 bdev, &epd->bio, max_nr,
                                                 end_bio_extent_writepage,
-                                                0, 0, 0);
+                                                0, 0, 0, false);
                        if (ret)
                                SetPageError(page);
                }
@@ -3468,7 +3519,7 @@ static int write_one_eb(struct extent_buffer *eb,
                ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
                                         PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
                                         -1, end_bio_extent_buffer_writepage,
-                                        0, epd->bio_flags, bio_flags);
+                                        0, epd->bio_flags, bio_flags, false);
                epd->bio_flags = bio_flags;
                if (ret) {
                        set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
@@ -3870,6 +3921,7 @@ int extent_readpages(struct extent_io_tree *tree,
        struct page *page;
        struct extent_map *em_cached = NULL;
        int nr = 0;
+       u64 prev_em_start = (u64)-1;
 
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                page = list_entry(pages->prev, struct page, lru);
@@ -3886,12 +3938,12 @@ int extent_readpages(struct extent_io_tree *tree,
                if (nr < ARRAY_SIZE(pagepool))
                        continue;
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
                nr = 0;
        }
        if (nr)
                __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-                                  &bio, 0, &bio_flags, READ);
+                                  &bio, 0, &bio_flags, READ, &prev_em_start);
 
        if (em_cached)
                free_extent_map(em_cached);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c87b439..160471f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4496,7 +4496,8 @@ void btrfs_evict_inode(struct inode *inode)
                goto no_delete;
        }
        /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
-       btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       if (!special_file(inode->i_mode))
+               btrfs_wait_ordered_range(inode, 0, (u64)-1);
 
        if (root->fs_info->log_root_recovering) {
                BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 4934347..3299778 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -441,6 +441,48 @@ find_domain_name(struct cifs_ses *ses, const struct 
nls_table *nls_cp)
        return 0;
 }
 
+/* Server has provided av pairs/target info in the type 2 challenge
+ * packet and we have plucked it and stored within smb session.
+ * We parse that blob here to find the server given timestamp
+ * as part of ntlmv2 authentication (or local current time as
+ * default in case of failure)
+ */
+static __le64
+find_timestamp(struct cifs_ses *ses)
+{
+       unsigned int attrsize;
+       unsigned int type;
+       unsigned int onesize = sizeof(struct ntlmssp2_name);
+       unsigned char *blobptr;
+       unsigned char *blobend;
+       struct ntlmssp2_name *attrptr;
+
+       if (!ses->auth_key.len || !ses->auth_key.response)
+               return 0;
+
+       blobptr = ses->auth_key.response;
+       blobend = blobptr + ses->auth_key.len;
+
+       while (blobptr + onesize < blobend) {
+               attrptr = (struct ntlmssp2_name *) blobptr;
+               type = le16_to_cpu(attrptr->type);
+               if (type == NTLMSSP_AV_EOL)
+                       break;
+               blobptr += 2; /* advance attr type */
+               attrsize = le16_to_cpu(attrptr->length);
+               blobptr += 2; /* advance attr size */
+               if (blobptr + attrsize > blobend)
+                       break;
+               if (type == NTLMSSP_AV_TIMESTAMP) {
+                       if (attrsize == sizeof(u64))
+                               return *((__le64 *)blobptr);
+               }
+               blobptr += attrsize; /* advance attr value */
+       }
+
+       return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+}
+
 static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                            const struct nls_table *nls_cp)
 {
@@ -637,6 +679,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct 
nls_table *nls_cp)
        struct ntlmv2_resp *ntlmv2;
        char ntlmv2_hash[16];
        unsigned char *tiblob = NULL; /* target info blob */
+       __le64 rsp_timestamp;
 
        if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
                if (!ses->domainName) {
@@ -655,6 +698,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct 
nls_table *nls_cp)
                }
        }
 
+       /* Must be within 5 minutes of the server (or in range +/-2h
+        * in case of Mac OS X), so simply carry over server timestamp
+        * (as Windows 7 does)
+        */
+       rsp_timestamp = find_timestamp(ses);
+
        baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
        tilen = ses->auth_key.len;
        tiblob = ses->auth_key.response;
@@ -671,8 +720,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct 
nls_table *nls_cp)
                        (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
        ntlmv2->blob_signature = cpu_to_le32(0x00000101);
        ntlmv2->reserved = 0;
-       /* Must be within 5 minutes of the server */
-       ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+       ntlmv2->time = rsp_timestamp;
+
        get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
        ntlmv2->reserved2 = 0;
 
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index dfc9564..31df9bc 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file 
*dst_file,
                goto out_drop_write;
        }
 
+       if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
+               rc = -EBADF;
+               cifs_dbg(VFS, "src file seems to be from a different filesystem 
type\n");
+               goto out_fput;
+       }
+
        if ((!src_file.file->private_data) || (!dst_file->private_data)) {
                rc = -EBADF;
                cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 30f3eb5..6aeb1de 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -49,9 +49,13 @@ change_conf(struct TCP_Server_Info *server)
                break;
        default:
                server->echoes = true;
-               server->oplocks = true;
+               if (enable_oplocks) {
+                       server->oplocks = true;
+                       server->oplock_credits = 1;
+               } else
+                       server->oplocks = false;
+
                server->echo_credits = 1;
-               server->oplock_credits = 1;
        }
        server->credits -= server->echo_credits + server->oplock_credits;
        return 0;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f06c9a8a..06d80b5 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2345,7 +2345,7 @@ unsigned int datagram_poll(struct file *file, struct 
socket *sock,
 int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
                            struct iovec *to, int size);
 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
-                                    struct iovec *iov);
+                                    struct iovec *iov, int len);
 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
                                 const struct iovec *from, int from_offset,
                                 int len);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 13bc7da..3437762 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -796,6 +796,7 @@ EXPORT_SYMBOL(__skb_checksum_complete);
  *     @skb: skbuff
  *     @hlen: hardware length
  *     @iov: io vector
+ *     @len: amount of data to copy from skb to iov
  *
  *     Caller _must_ check that skb will fit to this iovec.
  *
@@ -805,11 +806,14 @@ EXPORT_SYMBOL(__skb_checksum_complete);
  *                        can be modified!
  */
 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-                                    int hlen, struct iovec *iov)
+                                    int hlen, struct iovec *iov, int len)
 {
        __wsum csum;
        int chunk = skb->len - hlen;
 
+       if (chunk > len)
+               chunk = len;
+
        if (!chunk)
                return 0;
 
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 185c341..aeedc3a 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -621,15 +621,17 @@ static int dump_rules(struct sk_buff *skb, struct 
netlink_callback *cb,
 {
        int idx = 0;
        struct fib_rule *rule;
+       int err = 0;
 
        rcu_read_lock();
        list_for_each_entry_rcu(rule, &ops->rules_list, list) {
                if (idx < cb->args[1])
                        goto skip;
 
-               if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
-                                    cb->nlh->nlmsg_seq, RTM_NEWRULE,
-                                    NLM_F_MULTI, ops) < 0)
+               err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+                                      cb->nlh->nlmsg_seq, RTM_NEWRULE,
+                                      NLM_F_MULTI, ops);
+               if (err)
                        break;
 skip:
                idx++;
@@ -638,7 +640,7 @@ skip:
        cb->args[1] = idx;
        rules_ops_put(ops);
 
-       return skb->len;
+       return err;
 }
 
 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -654,7 +656,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct 
netlink_callback *cb)
                if (ops == NULL)
                        return -EAFNOSUPPORT;
 
-               return dump_rules(skb, cb, ops);
+               dump_rules(skb, cb, ops);
+
+               return skb->len;
        }
 
        rcu_read_lock();
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2cc1313..bd28b91 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4930,7 +4930,7 @@ static int tcp_copy_to_iovec(struct sock *sk, struct 
sk_buff *skb, int hlen)
                err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
        else
                err = skb_copy_and_csum_datagram_iovec(skb, hlen,
-                                                      tp->ucopy.iov);
+                                                      tp->ucopy.iov, chunk);
 
        if (!err) {
                tp->ucopy.len -= chunk;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4e9921b..f70f0b6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1268,7 +1268,7 @@ try_again:
        else {
                err = skb_copy_and_csum_datagram_iovec(skb,
                                                       sizeof(struct udphdr),
-                                                      msg->msg_iov);
+                                                      msg->msg_iov, copied);
 
                if (err == -EINVAL)
                        goto csum_copy_err;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8737400..821d8df 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -552,7 +552,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 
        if (it->cache == &mrt->mfc6_unres_queue)
                spin_unlock_bh(&mfc_unres_lock);
-       else if (it->cache == mrt->mfc6_cache_array)
+       else if (it->cache == &mrt->mfc6_cache_array[it->ct])
                read_unlock(&mrt_lock);
 }
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index b6bb87e..c0a65ae 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -488,7 +488,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock 
*sk,
                        goto csum_copy_err;
                err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
        } else {
-               err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
+               err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov, 
copied);
                if (err == -EINVAL)
                        goto csum_copy_err;
        }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fbe234e..618b94e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -428,7 +428,8 @@ try_again:
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
                                              msg->msg_iov, copied);
        else {
-               err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct 
udphdr), msg->msg_iov);
+               err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct 
udphdr),
+                                                      msg->msg_iov, copied);
                if (err == -EINVAL)
                        goto csum_copy_err;
        }
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index da0c1f4..5efb6e6 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -588,6 +588,13 @@ struct nft_xt {
 
 static struct nft_expr_type nft_match_type;
 
+static bool nft_match_cmp(const struct xt_match *match,
+                         const char *name, u32 rev, u32 family)
+{
+       return strcmp(match->name, name) == 0 && match->revision == rev &&
+              (match->family == NFPROTO_UNSPEC || match->family == family);
+}
+
 static const struct nft_expr_ops *
 nft_match_select_ops(const struct nft_ctx *ctx,
                     const struct nlattr * const tb[])
@@ -595,7 +602,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
        struct nft_xt *nft_match;
        struct xt_match *match;
        char *mt_name;
-       __u32 rev, family;
+       u32 rev, family;
 
        if (tb[NFTA_MATCH_NAME] == NULL ||
            tb[NFTA_MATCH_REV] == NULL ||
@@ -610,8 +617,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
        list_for_each_entry(nft_match, &nft_match_list, head) {
                struct xt_match *match = nft_match->ops.data;
 
-               if (strcmp(match->name, mt_name) == 0 &&
-                   match->revision == rev && match->family == family)
+               if (nft_match_cmp(match, mt_name, rev, family))
                        return &nft_match->ops;
        }
 
@@ -659,6 +665,13 @@ static LIST_HEAD(nft_target_list);
 
 static struct nft_expr_type nft_target_type;
 
+static bool nft_target_cmp(const struct xt_target *tg,
+                          const char *name, u32 rev, u32 family)
+{
+       return strcmp(tg->name, name) == 0 && tg->revision == rev &&
+              (tg->family == NFPROTO_UNSPEC || tg->family == family);
+}
+
 static const struct nft_expr_ops *
 nft_target_select_ops(const struct nft_ctx *ctx,
                      const struct nlattr * const tb[])
@@ -666,7 +679,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        struct nft_xt *nft_target;
        struct xt_target *target;
        char *tg_name;
-       __u32 rev, family;
+       u32 rev, family;
 
        if (tb[NFTA_TARGET_NAME] == NULL ||
            tb[NFTA_TARGET_REV] == NULL ||
@@ -681,8 +694,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        list_for_each_entry(nft_target, &nft_match_list, head) {
                struct xt_target *target = nft_target->ops.data;
 
-               if (strcmp(target->name, tg_name) == 0 &&
-                   target->revision == rev && target->family == family)
+               if (nft_target_cmp(target, tg_name, rev, family))
                        return &nft_target->ops;
        }
 
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index de0f385..c644d7f 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -115,6 +115,24 @@ static inline struct hlist_head *nl_portid_hashfn(struct 
nl_portid_hash *hash, u
        return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
 }
 
+static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
+                                          gfp_t gfp_mask)
+{
+       unsigned int len = skb_end_offset(skb);
+       struct sk_buff *new;
+
+       new = alloc_skb(len, gfp_mask);
+       if (new == NULL)
+               return NULL;
+
+       NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
+       NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
+       NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
+
+       memcpy(skb_put(new, len), skb->data, len);
+       return new;
+}
+
 int netlink_add_tap(struct netlink_tap *nt)
 {
        if (unlikely(nt->dev->type != ARPHRD_NETLINK))
@@ -200,7 +218,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
        int ret = -ENOMEM;
 
        dev_hold(dev);
-       nskb = skb_clone(skb, GFP_ATOMIC);
+
+       if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
+               nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
+       else
+               nskb = skb_clone(skb, GFP_ATOMIC);
        if (nskb) {
                nskb->dev = dev;
                nskb->protocol = htons((u16) sk->sk_protocol);
@@ -263,11 +285,6 @@ static void netlink_rcv_wake(struct sock *sk)
 }
 
 #ifdef CONFIG_NETLINK_MMAP
-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
-{
-       return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
-}
-
 static bool netlink_rx_is_mmaped(struct sock *sk)
 {
        return nlk_sk(sk)->rx_ring.pg_vec != NULL;
@@ -819,7 +836,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct 
sk_buff *skb)
 }
 
 #else /* CONFIG_NETLINK_MMAP */
-#define netlink_skb_is_mmaped(skb)     false
 #define netlink_rx_is_mmaped(sk)       false
 #define netlink_tx_is_mmaped(sk)       false
 #define netlink_mmap                   sock_no_mmap
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index acbd774..dcc89c7 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -65,6 +65,15 @@ struct nl_portid_hash {
        u32                     rnd;
 };
 
+static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NETLINK_MMAP
+       return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+#else
+       return false;
+#endif /* CONFIG_NETLINK_MMAP */
+}
+
 struct netlink_table {
        struct nl_portid_hash   hash;
        struct hlist_head       mc_list;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index d26706f..f85000b 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -789,7 +789,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, 
struct genl_info *info)
                if (IS_ERR(acts))
                        goto error;
 
-               ovs_flow_mask_key(&masked_key, &key, &mask);
+               ovs_flow_mask_key(&masked_key, &key, true, &mask);
                error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
                                             &masked_key, 0, &acts);
                if (error) {
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index dd83154..20063bd 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -57,18 +57,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range 
*range)
 }
 
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
-                      const struct sw_flow_mask *mask)
+                      bool full, const struct sw_flow_mask *mask)
 {
-       const long *m = (long *)((u8 *)&mask->key + mask->range.start);
-       const long *s = (long *)((u8 *)src + mask->range.start);
-       long *d = (long *)((u8 *)dst + mask->range.start);
+       int start = full ? 0 : mask->range.start;
+       int len = full ? sizeof *dst : range_n_bytes(&mask->range);
+       const long *m = (const long *)((const u8 *)&mask->key + start);
+       const long *s = (const long *)((const u8 *)src + start);
+       long *d = (long *)((u8 *)dst + start);
        int i;
 
-       /* The memory outside of the 'mask->range' are not set since
-        * further operations on 'dst' only uses contents within
-        * 'mask->range'.
+       /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
+        * if 'full' is false the memory outside of the 'mask->range' is left
+        * uninitialized. This can be used as an optimization when further
+        * operations on 'dst' only use contents within 'mask->range'.
         */
-       for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+       for (i = 0; i < len; i += sizeof(long))
                *d++ = *s++ & *m++;
 }
 
@@ -417,7 +420,7 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
        u32 hash;
        struct sw_flow_key masked_key;
 
-       ovs_flow_mask_key(&masked_key, unmasked, mask);
+       ovs_flow_mask_key(&masked_key, unmasked, false, mask);
        hash = flow_hash(&masked_key, key_start, key_end);
        head = find_bucket(ti, hash);
        hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 59c15f7..86d07ff 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -78,5 +78,5 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
                               struct sw_flow_match *match);
 
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
-                      const struct sw_flow_mask *mask);
+                      bool full, const struct sw_flow_mask *mask);
 #endif /* flow_table.h */
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 5cc2da5..c67f5d3 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -185,7 +185,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
                                                      msg->msg_iov, copy);
                } else {
                        ret = skb_copy_and_csum_datagram_iovec(skb, offset,
-                                                              msg->msg_iov);
+                                                              msg->msg_iov,
+                                                              copy);
                        if (ret == -EINVAL)
                                goto csum_copy_error;
                }
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index aa4ea94..cf5863c3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -703,7 +703,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
         * outstanding data and rely on the retransmission limit be reached
         * to shutdown the association.
         */
-       if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+       if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
                t->asoc->overall_error_count = 0;
 
        /* Clear the hb_sent flag to signal that we had a good
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 009d937..4a78033 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -143,6 +143,12 @@ static noinline void key_gc_unused_keys(struct list_head 
*keys)
                kdebug("- %u", key->serial);
                key_check(key);
 
+               /* Throw away the key data if the key is instantiated */
+               if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+                   !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
+                   key->type->destroy)
+                       key->type->destroy(key);
+
                security_key_free(key);
 
                /* deal with the user's key tracking and quota */
@@ -157,10 +163,6 @@ static noinline void key_gc_unused_keys(struct list_head 
*keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               /* now throw away the key memory */
-               if (key->type->destroy)
-                       key->type->destroy(key);
-
                key_user_put(key->user);
 
                kfree(key->description);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 3814119..9ea60a7 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -457,6 +457,9 @@ static struct key *construct_key_and_link(struct 
keyring_search_context *ctx,
 
        kenter("");
 
+       if (ctx->index_key.type == &key_type_keyring)
+               return ERR_PTR(-EPERM);
+       
        user = key_user_lookup(current_fsuid());
        if (!user)
                return ERR_PTR(-ENOMEM);
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a..e040621 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,6 +9,14 @@ menuconfig SND_ARM
          Drivers that are implemented on ASoC can be found in
          "ALSA for SoC audio support" section.
 
+config SND_PXA2XX_LIB
+       tristate
+       select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+       select SND_DMAENGINE_PCM
+
+config SND_PXA2XX_LIB_AC97
+       bool
+
 if SND_ARM
 
 config SND_ARMAACI
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
        tristate
        select SND_PCM
 
-config SND_PXA2XX_LIB
-       tristate
-       select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
-
-config SND_PXA2XX_LIB_AC97
-       bool
-
 config SND_PXA2XX_AC97
        tristate "AC97 driver for the Intel PXA2xx chip"
        depends on ARCH_PXA
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 4db74a0..dbaba4f 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,7 +1,6 @@
 config SND_PXA2XX_SOC
        tristate "SoC Audio for the Intel PXA2xx chip"
        depends on ARCH_PXA
-       select SND_ARM
        select SND_PXA2XX_LIB
        help
          Say Y or M if you want to add support for codecs attached to
@@ -24,7 +23,6 @@ config SND_PXA2XX_AC97
 config SND_PXA2XX_SOC_AC97
        tristate
        select AC97_BUS
-       select SND_ARM
        select SND_PXA2XX_LIB_AC97
        select SND_SOC_AC97_BUS
 
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index ae956e3..593e320 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
        .reset  = pxa2xx_ac97_cold_reset,
 };
 
-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
+static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
        .addr           = __PREG(PCDR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data 
pxa2xx_ac97_pcm_stereo_in = {
        .filter_data    = &pxa2xx_ac97_pcm_stereo_in_req,
 };
 
-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
+static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
        .addr           = __PREG(PCDR),
        .addr_width     = DMA_SLAVE_BUSWIDTH_4_BYTES,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 1cd0357..1e07b18 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1719,7 +1719,7 @@ static int process_nrcpus(struct perf_file_section 
*section __maybe_unused,
        if (ph->needs_swap)
                nr = bswap_32(nr);
 
-       ph->env.nr_cpus_online = nr;
+       ph->env.nr_cpus_avail = nr;
 
        ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
@@ -1728,7 +1728,7 @@ static int process_nrcpus(struct perf_file_section 
*section __maybe_unused,
        if (ph->needs_swap)
                nr = bswap_32(nr);
 
-       ph->env.nr_cpus_avail = nr;
+       ph->env.nr_cpus_online = nr;
        return 0;
 }
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index db50f4c..634048e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2807,10 +2807,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
                                  const struct kvm_io_range *r2)
 {
-       if (r1->addr < r2->addr)
+       gpa_t addr1 = r1->addr;
+       gpa_t addr2 = r2->addr;
+
+       if (addr1 < addr2)
                return -1;
-       if (r1->addr + r1->len > r2->addr + r2->len)
+
+       /* If r2->len == 0, match the exact address.  If r2->len != 0,
+        * accept any overlapping write.  Any order is acceptable for
+        * overlapping ranges, because kvm_io_bus_get_first_dev ensures
+        * we process all of them.
+        */
+       if (r2->len) {
+               addr1 += r1->len;
+               addr2 += r2->len;
+       }
+
+       if (addr1 > addr2)
                return 1;
+
        return 0;
 }
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to