diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
index 69b3cac4749d..5d8675615e59 100644
--- a/Documentation/ramoops.txt
+++ b/Documentation/ramoops.txt
@@ -14,11 +14,19 @@ survive after a restart.
 
 1. Ramoops concepts
 
-Ramoops uses a predefined memory area to store the dump. The start and size of
-the memory area are set using two variables:
+Ramoops uses a predefined memory area to store the dump. The start and size
+and type of the memory area are set using three variables:
   * "mem_address" for the start
   * "mem_size" for the size. The memory size will be rounded down to a
   power of two.
+  * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
+
+Typically the default value of mem_type=0 should be used as that sets the 
pstore
+mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
+pgprot_noncached, which only works on some platforms. This is because pstore
+depends on atomic operations. At least on ARM, pgprot_noncached causes the
+memory to be mapped strongly ordered, and atomic operations on strongly ordered
+memory are implementation defined, and won't work on many ARMs such as omaps.
 
 The memory area is divided into "record_size" chunks (also rounded down to
 power of two) and each oops/panic writes a "record_size" chunk of
@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different 
manners:
 static struct ramoops_platform_data ramoops_data = {
         .mem_size               = <...>,
         .mem_address            = <...>,
+        .mem_type               = <...>,
         .record_size            = <...>,
         .dump_oops              = <...>,
         .ecc                    = <...>,
diff --git a/Makefile b/Makefile
index a2e572bfff7d..7aff64ee4fb6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 14
-SUBLEVEL = 28
+SUBLEVEL = 29
 EXTRAVERSION =
 NAME = Remembering Coco
 
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 767f0e376f4d..f60aeee2fc9e 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -458,7 +458,7 @@
                };
 
                wdt2: wdt@4ae14000 {
-                       compatible = "ti,omap4-wdt";
+                       compatible = "ti,omap3-wdt";
                        reg = <0x4ae14000 0x80>;
                        interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "wd_timer2";
diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts 
b/arch/arm/boot/dts/s3c6410-mini6410.dts
index 57e00f9bce99..a25debb50401 100644
--- a/arch/arm/boot/dts/s3c6410-mini6410.dts
+++ b/arch/arm/boot/dts/s3c6410-mini6410.dts
@@ -198,10 +198,6 @@
        status = "okay";
 };
 
-&pwm {
-       status = "okay";
-};
-
 &pinctrl0 {
        gpio_leds: gpio-leds {
                samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7";
diff --git a/arch/arm/boot/dts/s3c64xx.dtsi b/arch/arm/boot/dts/s3c64xx.dtsi
index 4e3be4d3493d..4f1eff3420f6 100644
--- a/arch/arm/boot/dts/s3c64xx.dtsi
+++ b/arch/arm/boot/dts/s3c64xx.dtsi
@@ -168,7 +168,6 @@
                        clocks = <&clocks PCLK_PWM>;
                        samsung,pwm-outputs = <0>, <1>;
                        #pwm-cells = <3>;
-                       status = "disabled";
                };
 
                pinctrl0: pinctrl@7f008000 {
diff --git a/arch/arm/configs/multi_v7_defconfig 
b/arch/arm/configs/multi_v7_defconfig
index ee6982976d66..cf4823bc111d 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -235,6 +235,7 @@ CONFIG_SND_SOC_TEGRA_MAX98090=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_ISP1760_HCD=y
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1e8b030dbefd..aab70f657cd2 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1021,6 +1021,15 @@ static int c_show(struct seq_file *m, void *v)
                seq_printf(m, "model name\t: %s rev %d (%s)\n",
                           cpu_name, cpuid & 15, elf_platform);
 
+#if defined(CONFIG_SMP)
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+                          (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) 
% 100);
+#else
+               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+                          loops_per_jiffy / (500000/HZ),
+                          (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
                /* dump out the processor features */
                seq_puts(m, "Features\t: ");
 
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b7b4c86e338b..8cd3724714fe 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -388,8 +388,17 @@ asmlinkage void secondary_start_kernel(void)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-       printk(KERN_INFO "SMP: Total of %d processors activated.\n",
-              num_online_cpus());
+       int cpu;
+       unsigned long bogosum = 0;
+
+       for_each_online_cpu(cpu)
+               bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+       printk(KERN_INFO "SMP: Total of %d processors activated "
+              "(%lu.%02lu BogoMIPS).\n",
+              num_online_cpus(),
+              bogosum / (500000/HZ),
+              (bogosum / (5000/HZ)) % 100);
 
        hyp_mode_check();
 }
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 4e9d58148ca7..c295c10f9217 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -125,6 +125,29 @@ int __init coherency_init(void)
 {
        struct device_node *np;
 
+       /*
+        * The coherency fabric is needed:
+        * - For coherency between processors on Armada XP, so only
+        *   when SMP is enabled.
+        * - For coherency between the processor and I/O devices, but
+        *   this coherency requires many pre-requisites (write
+        *   allocate cache policy, shareable pages, SMP bit set) that
+        *   are only meant in SMP situations.
+        *
+        * Note that this means that on Armada 370, there is currently
+        * no way to use hardware I/O coherency, because even when
+        * CONFIG_SMP is enabled, is_smp() returns false due to the
+        * Armada 370 being a single-core processor. To lift this
+        * limitation, we would have to find a way to make the cache
+        * policy set to write-allocate (on all Armada SoCs), and to
+        * set the shareable attribute in page tables (on all Armada
+        * SoCs except the Armada 370). Unfortunately, such decisions
+        * are taken very early in the kernel boot process, at a point
+        * where we don't know yet on which SoC we are running.
+        */
+       if (!is_smp())
+               return 0;
+
        np = of_find_matching_node(NULL, of_coherency_table);
        if (np) {
                struct resource res;
@@ -151,6 +174,9 @@ static int __init coherency_late_init(void)
 {
        struct device_node *np;
 
+       if (!is_smp())
+               return 0;
+
        np = of_find_matching_node(NULL, of_coherency_table);
        if (np) {
                bus_register_notifier(&platform_bus_type,
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index eefb30cfcabd..2b9cff960af2 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -148,26 +148,6 @@ static inline int omap4_init_static_deps(void)
        struct clockdomain *ducati_clkdm, *l3_2_clkdm;
        int ret = 0;
 
-       if (omap_rev() == OMAP4430_REV_ES1_0) {
-               WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
-               return -ENODEV;
-       }
-
-       pr_err("Power Management for TI OMAP4.\n");
-       /*
-        * OMAP4 chip PM currently works only with certain (newer)
-        * versions of bootloaders. This is due to missing code in the
-        * kernel to properly reset and initialize some devices.
-        * http://www.spinics.net/lists/arm-kernel/msg218641.html
-        */
-       pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM 
support\n");
-
-       ret = pwrdm_for_each(pwrdms_setup, NULL);
-       if (ret) {
-               pr_err("Failed to setup powerdomains\n");
-               return ret;
-       }
-
        /*
         * The dynamic dependency between MPUSS -> MEMIF and
         * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
@@ -231,6 +211,15 @@ int __init omap4_pm_init(void)
 
        pr_info("Power Management for TI OMAP4+ devices.\n");
 
+       /*
+        * OMAP4 chip PM currently works only with certain (newer)
+        * versions of bootloaders. This is due to missing code in the
+        * kernel to properly reset and initialize some devices.
+        * http://www.spinics.net/lists/arm-kernel/msg218641.html
+        */
+       if (cpu_is_omap44xx())
+               pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM 
support\n");
+
        ret = pwrdm_for_each(pwrdms_setup, NULL);
        if (ret) {
                pr_err("Failed to setup powerdomains.\n");
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index e9c149c042e0..456d67c1f0fa 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -21,6 +21,7 @@ struct sleep_save_sp {
        phys_addr_t save_ptr_stash_phys;
 };
 
+extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
 extern void cpu_resume(void);
 extern int cpu_suspend(unsigned long);
 
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b1925729c692..ede186cdd452 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -49,28 +49,39 @@
        orr     \dst, \dst, \mask               // dst|=(aff3>>rs3)
        .endm
 /*
- * Save CPU state for a suspend.  This saves callee registers, and allocates
- * space on the kernel stack to save the CPU specific registers + some
- * other data for resume.
+ * Save CPU state for a suspend and execute the suspend finisher.
+ * On success it will return 0 through cpu_resume - ie through a CPU
+ * soft/hard reboot from the reset vector.
+ * On failure it returns the suspend finisher return value or force
+ * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
+ * is not allowed to return, if it does this must be considered failure).
+ * It saves callee registers, and allocates space on the kernel stack
+ * to save the CPU specific registers + some other data for resume.
  *
  *  x0 = suspend finisher argument
+ *  x1 = suspend finisher function pointer
  */
-ENTRY(__cpu_suspend)
+ENTRY(__cpu_suspend_enter)
        stp     x29, lr, [sp, #-96]!
        stp     x19, x20, [sp,#16]
        stp     x21, x22, [sp,#32]
        stp     x23, x24, [sp,#48]
        stp     x25, x26, [sp,#64]
        stp     x27, x28, [sp,#80]
+       /*
+        * Stash suspend finisher and its argument in x20 and x19
+        */
+       mov     x19, x0
+       mov     x20, x1
        mov     x2, sp
        sub     sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
-       mov     x1, sp
+       mov     x0, sp
        /*
-        * x1 now points to struct cpu_suspend_ctx allocated on the stack
+        * x0 now points to struct cpu_suspend_ctx allocated on the stack
         */
-       str     x2, [x1, #CPU_CTX_SP]
-       ldr     x2, =sleep_save_sp
-       ldr     x2, [x2, #SLEEP_SAVE_SP_VIRT]
+       str     x2, [x0, #CPU_CTX_SP]
+       ldr     x1, =sleep_save_sp
+       ldr     x1, [x1, #SLEEP_SAVE_SP_VIRT]
 #ifdef CONFIG_SMP
        mrs     x7, mpidr_el1
        ldr     x9, =mpidr_hash
@@ -82,11 +93,21 @@ ENTRY(__cpu_suspend)
        ldp     w3, w4, [x9, #MPIDR_HASH_SHIFTS]
        ldp     w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
        compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
-       add     x2, x2, x8, lsl #3
+       add     x1, x1, x8, lsl #3
 #endif
-       bl      __cpu_suspend_finisher
+       bl      __cpu_suspend_save
+       /*
+        * Grab suspend finisher in x20 and its argument in x19
+        */
+       mov     x0, x19
+       mov     x1, x20
+       /*
+        * We are ready for power down, fire off the suspend finisher
+        * in x1, with argument in x0
+        */
+       blr     x1
         /*
-        * Never gets here, unless suspend fails.
+        * Never gets here, unless suspend finisher fails.
         * Successful cpu_suspend should return from cpu_resume, returning
         * through this code path is considered an error
         * If the return value is set to 0 force x0 = -EOPNOTSUPP
@@ -103,7 +124,7 @@ ENTRY(__cpu_suspend)
        ldp     x27, x28, [sp, #80]
        ldp     x29, lr, [sp], #96
        ret
-ENDPROC(__cpu_suspend)
+ENDPROC(__cpu_suspend_enter)
        .ltorg
 
 /*
@@ -126,14 +147,12 @@ cpu_resume_after_mmu:
        ret
 ENDPROC(cpu_resume_after_mmu)
 
-       .data
 ENTRY(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
 #ifdef CONFIG_SMP
        mrs     x1, mpidr_el1
-       adr     x4, mpidr_hash_ptr
-       ldr     x5, [x4]
-       add     x8, x4, x5              // x8 = struct mpidr_hash phys address
+       adrp    x8, mpidr_hash
+       add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
         /* retrieve mpidr_hash members to compute the hash */
        ldr     x2, [x8, #MPIDR_HASH_MASK]
        ldp     w3, w4, [x8, #MPIDR_HASH_SHIFTS]
@@ -143,14 +162,15 @@ ENTRY(cpu_resume)
 #else
        mov     x7, xzr
 #endif
-       adr     x0, sleep_save_sp
+       adrp    x0, sleep_save_sp
+       add     x0, x0, #:lo12:sleep_save_sp
        ldr     x0, [x0, #SLEEP_SAVE_SP_PHYS]
        ldr     x0, [x0, x7, lsl #3]
        /* load sp from context */
        ldr     x2, [x0, #CPU_CTX_SP]
-       adr     x1, sleep_idmap_phys
+       adrp    x1, sleep_idmap_phys
        /* load physical address of identity map page table in x1 */
-       ldr     x1, [x1]
+       ldr     x1, [x1, #:lo12:sleep_idmap_phys]
        mov     sp, x2
        /*
         * cpu_do_resume expects x0 to contain context physical address
@@ -159,26 +179,3 @@ ENTRY(cpu_resume)
        bl      cpu_do_resume           // PC relative jump, MMU off
        b       cpu_resume_mmu          // Resume MMU, never returns
 ENDPROC(cpu_resume)
-
-       .align 3
-mpidr_hash_ptr:
-       /*
-        * offset of mpidr_hash symbol from current location
-        * used to obtain run-time mpidr_hash address with MMU off
-         */
-       .quad   mpidr_hash - .
-/*
- * physical address of identity mapped page tables
- */
-       .type   sleep_idmap_phys, #object
-ENTRY(sleep_idmap_phys)
-       .quad   0
-/*
- * struct sleep_save_sp {
- *     phys_addr_t *save_ptr_stash;
- *     phys_addr_t save_ptr_stash_phys;
- * };
- */
-       .type   sleep_save_sp, #object
-ENTRY(sleep_save_sp)
-       .space  SLEEP_SAVE_SP_SZ        // struct sleep_save_sp
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 1fa9ce4afd8f..2d6b6065fe7f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -5,26 +5,24 @@
 #include <asm/debug-monitors.h>
 #include <asm/pgtable.h>
 #include <asm/memory.h>
+#include <asm/mmu_context.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend(unsigned long);
+extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
 /*
- * This is called by __cpu_suspend() to save the state, and do whatever
+ * This is called by __cpu_suspend_enter() to save the state, and do whatever
  * flushing is required to ensure that when the CPU goes to sleep we have
  * the necessary data available when the caches are not searched.
  *
- * @arg: Argument to pass to suspend operations
- * @ptr: CPU context virtual address
- * @save_ptr: address of the location where the context physical address
- *            must be saved
+ * ptr: CPU context virtual address
+ * save_ptr: address of the location where the context physical address
+ *           must be saved
  */
-int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
-                          phys_addr_t *save_ptr)
+void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
+                               phys_addr_t *save_ptr)
 {
-       int cpu = smp_processor_id();
-
        *save_ptr = virt_to_phys(ptr);
 
        cpu_do_suspend(ptr);
@@ -35,8 +33,6 @@ int __cpu_suspend_finisher(unsigned long arg, struct 
cpu_suspend_ctx *ptr,
         */
        __flush_dcache_area(ptr, sizeof(*ptr));
        __flush_dcache_area(save_ptr, sizeof(*save_ptr));
-
-       return cpu_ops[cpu]->cpu_suspend(arg);
 }
 
 /*
@@ -56,15 +52,15 @@ void __init cpu_suspend_set_dbg_restorer(void 
(*hw_bp_restore)(void *))
 }
 
 /**
- * cpu_suspend
+ * cpu_suspend() - function to enter a low-power state
+ * @arg: argument to pass to CPU suspend operations
  *
- * @arg: argument to pass to the finisher function
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+ * operations back-end error code otherwise.
  */
 int cpu_suspend(unsigned long arg)
 {
-       struct mm_struct *mm = current->active_mm;
-       int ret, cpu = smp_processor_id();
-       unsigned long flags;
+       int cpu = smp_processor_id();
 
        /*
         * If cpu_ops have not been registered or suspend
@@ -72,6 +68,21 @@ int cpu_suspend(unsigned long arg)
         */
        if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
                return -EOPNOTSUPP;
+       return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * __cpu_suspend
+ *
+ * arg: argument to pass to the finisher function
+ * fn: finisher function pointer
+ *
+ */
+int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+{
+       struct mm_struct *mm = current->active_mm;
+       int ret;
+       unsigned long flags;
 
        /*
         * From this point debug exceptions are disabled to prevent
@@ -86,16 +97,27 @@ int cpu_suspend(unsigned long arg)
         * page tables, so that the thread address space is properly
         * set-up on function return.
         */
-       ret = __cpu_suspend(arg);
+       ret = __cpu_suspend_enter(arg, fn);
        if (ret == 0) {
-               cpu_switch_mm(mm->pgd, mm);
+               /*
+                * We are resuming from reset with TTBR0_EL1 set to the
+                * idmap to enable the MMU; restore the active_mm mappings in
+                * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+                * the thread entered __cpu_suspend with TTBR0_EL1 set to
+                * reserved TTBR0 page tables and should be restored as such.
+                */
+               if (mm == &init_mm)
+                       cpu_set_reserved_ttbr0();
+               else
+                       cpu_switch_mm(mm->pgd, mm);
+
                flush_tlb_all();
 
                /*
                 * Restore per-cpu offset before any kernel
                 * subsystem relying on it has a chance to run.
                 */
-               set_my_cpu_offset(per_cpu_offset(cpu));
+               set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 
                /*
                 * Restore HW breakpoint registers to sane values
@@ -116,10 +138,10 @@ int cpu_suspend(unsigned long arg)
        return ret;
 }
 
-extern struct sleep_save_sp sleep_save_sp;
-extern phys_addr_t sleep_idmap_phys;
+struct sleep_save_sp sleep_save_sp;
+phys_addr_t sleep_idmap_phys;
 
-static int cpu_suspend_init(void)
+static int __init cpu_suspend_init(void)
 {
        void *ctx_ptr;
 
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 27c93f41166f..fc0927a162ff 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -78,7 +78,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t 
slb_error_bits)
        }
        if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
                if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
-                       cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+                       cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
                /* reset error bits */
                dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
        }
@@ -109,7 +109,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
                break;
        case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
                if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
-                       cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE);
+                       cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
                        handled = 1;
                }
                break;
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 75702e207b29..f7089fcfaa5d 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c)
 
 static int udbg_uart_getc_poll(void)
 {
-       if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
+       if (!udbg_uart_in)
+               return -1;
+
+       if (!(udbg_uart_in(UART_LSR) & LSR_DR))
                return udbg_uart_in(UART_RBR);
+
        return -1;
 }
 
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 2a46ca720afc..2874be9aef0a 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
                native_read_tscp(&p);
        } else {
                /* Load per CPU data from GDT */
-               asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+               asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
        }
 
        return p;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c 
b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 047f540cf3f7..2f9858894d5a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -2886,6 +2886,17 @@ static struct intel_uncore_box 
*uncore_event_to_box(struct perf_event *event)
        return uncore_pmu_to_box(uncore_event_to_pmu(event), 
smp_processor_id());
 }
 
+/*
+ * Using uncore_pmu_event_init pmu event_init callback
+ * as a detection point for uncore events.
+ */
+static int uncore_pmu_event_init(struct perf_event *event);
+
+static bool is_uncore_event(struct perf_event *event)
+{
+       return event->pmu->event_init == uncore_pmu_event_init;
+}
+
 static int
 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, 
bool dogrp)
 {
@@ -2900,13 +2911,18 @@ uncore_collect_events(struct intel_uncore_box *box, 
struct perf_event *leader, b
                return -EINVAL;
 
        n = box->n_events;
-       box->event_list[n] = leader;
-       n++;
+
+       if (is_uncore_event(leader)) {
+               box->event_list[n] = leader;
+               n++;
+       }
+
        if (!dogrp)
                return n;
 
        list_for_each_entry(event, &leader->sibling_list, group_entry) {
-               if (event->state <= PERF_EVENT_STATE_OFF)
+               if (!is_uncore_event(event) ||
+                   event->state <= PERF_EVENT_STATE_OFF)
                        continue;
 
                if (n >= max_count)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 49088b8a3ee3..dcae8fa2bf04 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4384,7 +4384,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
         * zap all shadow pages.
         */
        if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
-               printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for 
mmio generation wraparound\n");
+               printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for 
mmio generation wraparound\n");
                kvm_mmu_invalidate_zap_all_pages(kvm);
        }
 }
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 431e87544411..ab6ba35a9357 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -117,30 +117,45 @@ subsys_initcall(init_vdso);
 
 struct linux_binprm;
 
-/* Put the vdso above the (randomized) stack with another randomized offset.
-   This way there is no hole in the middle of address space.
-   To save memory make sure it is still in the same PTE as the stack top.
-   This doesn't give that many random bits */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
        unsigned long addr, end;
        unsigned offset;
-       end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+       /*
+        * Round up the start address.  It can start out unaligned as a result
+        * of stack start randomization.
+        */
+       start = PAGE_ALIGN(start);
+
+       /* Round the lowest possible end address up to a PMD boundary. */
+       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
        if (end >= TASK_SIZE_MAX)
                end = TASK_SIZE_MAX;
        end -= len;
-       /* This loses some more bits than a modulo, but is cheaper */
-       offset = get_random_int() & (PTRS_PER_PTE - 1);
-       addr = start + (offset << PAGE_SHIFT);
-       if (addr >= end)
-               addr = end;
+
+       if (end > start) {
+               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+               addr = start + (offset << PAGE_SHIFT);
+       } else {
+               addr = start;
+       }
 
        /*
-        * page-align it here so that get_unmapped_area doesn't
-        * align it wrongfully again to the next page. addr can come in 4K
-        * unaligned here as a result of stack start randomization.
+        * Forcibly align the final address in case we have a hardware
+        * issue that requires alignment for performance reasons.
         */
-       addr = PAGE_ALIGN(addr);
        addr = align_vdso_addr(addr);
 
        return addr;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index f8721278601c..78d38352bf8d 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -95,7 +95,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg)
        unsigned int *map;
 
        /* If cpus are offline, map them to first hctx */
-       map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
+       map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
                                reg->numa_node);
        if (!map)
                return NULL;
diff --git a/block/genhd.c b/block/genhd.c
index e6723bd4d7a1..a8d586a729bb 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int 
partno)
        struct disk_part_tbl *old_ptbl = disk->part_tbl;
        struct disk_part_tbl *new_ptbl;
        int len = old_ptbl ? old_ptbl->len : 0;
-       int target = partno + 1;
+       int i, target;
        size_t size;
-       int i;
+
+       /*
+        * check for int overflow, since we can get here from blkpg_ioctl()
+        * with a user passed 'partno'.
+        */
+       target = partno + 1;
+       if (target < 0)
+               return -EINVAL;
 
        /* disk_max_parts() is zero during initialization, ignore if so */
        if (disk_max_parts(disk) && target > disk_max_parts(disk))
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c14a00d3dca6..19f650556e2e 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
 
        device->power.state = ACPI_STATE_UNKNOWN;
        if (!acpi_device_is_present(device))
-               return 0;
+               return -ENXIO;
 
        result = acpi_device_get_power(device, &state);
        if (result)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 493a342efa44..666beea3bf1c 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -865,7 +865,7 @@ static void acpi_free_power_resources_lists(struct 
acpi_device *device)
        if (device->wakeup.flags.valid)
                acpi_power_resources_list_free(&device->wakeup.resources);
 
-       if (!device->flags.power_manageable)
+       if (!device->power.flags.power_resources)
                return;
 
        for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1554,10 +1554,8 @@ static void acpi_bus_get_power_flags(struct acpi_device 
*device)
                        device->power.flags.power_resources)
                device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 
1;
 
-       if (acpi_bus_init_power(device)) {
-               acpi_free_power_resources_lists(device);
+       if (acpi_bus_init_power(device))
                device->flags.power_manageable = 0;
-       }
 }
 
 static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2043,13 +2041,18 @@ static void acpi_bus_attach(struct acpi_device *device)
        /* Skip devices that are not present. */
        if (!acpi_device_is_present(device)) {
                device->flags.visited = false;
+               device->flags.power_manageable = 0;
                return;
        }
        if (device->handler)
                goto ok;
 
        if (!device->flags.initialized) {
-               acpi_bus_update_power(device, NULL);
+               device->flags.power_manageable =
+                       device->power.states[ACPI_STATE_D0].flags.valid;
+               if (acpi_bus_init_power(device))
+                       device->flags.power_manageable = 0;
+
                device->flags.initialized = true;
        }
        device->flags.visited = false;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 59dc8086e4fa..45d0fa78981c 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -254,13 +254,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
                                   const char *buf, size_t count)
 {
        struct device *dev;
+       int err = -EINVAL;
 
        dev = bus_find_device_by_name(bus, NULL, buf);
        if (!dev)
                return -ENODEV;
-       if (bus_rescan_devices_helper(dev, NULL) != 0)
-               return -EINVAL;
-       return count;
+       if (bus_rescan_devices_helper(dev, NULL) == 0)
+               err = count;
+       put_device(dev);
+       return err;
 }
 
 static struct device *next_device(struct klist_iter *i)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 7cd42ea30d6d..d92c7d9b959a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1743,6 +1743,7 @@ static const struct hid_device_id 
hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) 
},
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, 
USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) 
},
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 91bc66b4b151..4850da34d3ef 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -511,6 +511,7 @@
 #define USB_DEVICE_ID_KYE_GPEN_560     0x5003
 #define USB_DEVICE_ID_KYE_EASYPEN_I405X        0x5010
 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X       0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2     0x501a
 #define USB_DEVICE_ID_KYE_EASYPEN_M610X        0x5013
 
 #define USB_VENDOR_ID_LABTEC           0x1020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a713e6211419..4b87bb164f30 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        {}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b92bf01a1ae8..158fcf577fae 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 
*rdesc,
                }
                break;
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
                if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
                        rdesc = mousepen_i608x_rdesc_fixed;
                        *rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct 
hid_device_id *id)
        switch (id->product) {
        case USB_DEVICE_ID_KYE_EASYPEN_I405X:
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
        case USB_DEVICE_ID_KYE_EASYPEN_M610X:
                ret = kye_tablet_enable(hdev);
                if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1a07e07d99a0..47d7e74231e5 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -35,6 +35,8 @@ static struct class *pyra_class;
 static void profile_activated(struct pyra_device *pyra,
                unsigned int new_profile)
 {
+       if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return;
        pyra->actual_profile = new_profile;
        pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
 }
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
        if (off != 0 || count != PYRA_SIZE_SETTINGS)
                return -EINVAL;
 
-       mutex_lock(&pyra->pyra_lock);
-
        settings = (struct pyra_settings const *)buf;
+       if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return -EINVAL;
+
+       mutex_lock(&pyra->pyra_lock);
 
        retval = pyra_set_settings(usb_dev, settings);
        if (retval) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 42eebd14de1f..6e5d8fe0ce8f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -136,6 +136,7 @@ struct i2c_hid {
                                                   * descriptor. */
        unsigned int            bufsize;        /* i2c buffer size */
        char                    *inbuf;         /* Input buffer */
+       char                    *rawbuf;        /* Raw Input buffer */
        char                    *cmdbuf;        /* Command buffer */
        char                    *argsbuf;       /* Command arguments buffer */
 
@@ -355,7 +356,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
        int ret, ret_size;
-       int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+       int size = ihid->bufsize;
 
        ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
        if (ret != size) {
@@ -482,9 +483,11 @@ static void i2c_hid_find_max_report(struct hid_device 
*hid, unsigned int type,
 static void i2c_hid_free_buffers(struct i2c_hid *ihid)
 {
        kfree(ihid->inbuf);
+       kfree(ihid->rawbuf);
        kfree(ihid->argsbuf);
        kfree(ihid->cmdbuf);
        ihid->inbuf = NULL;
+       ihid->rawbuf = NULL;
        ihid->cmdbuf = NULL;
        ihid->argsbuf = NULL;
        ihid->bufsize = 0;
@@ -500,10 +503,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, 
size_t report_size)
                       report_size; /* report */
 
        ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
+       ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
        ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
        ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
 
-       if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+       if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
                i2c_hid_free_buffers(ihid);
                return -ENOMEM;
        }
@@ -530,12 +534,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
 
        ret = i2c_hid_get_report(client,
                        report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
-                       report_number, ihid->inbuf, ask_count);
+                       report_number, ihid->rawbuf, ask_count);
 
        if (ret < 0)
                return ret;
 
-       ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
+       ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
 
        if (ret_count <= 2)
                return 0;
@@ -544,7 +548,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
 
        /* The query buffer contains the size, dropping it in the reply */
        count = min(count, ret_count - 2);
-       memcpy(buf, ihid->inbuf + 2, count);
+       memcpy(buf, ihid->rawbuf + 2, count);
 
        return count;
 }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index deb364306636..473c0c43af52 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -116,6 +116,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, 
HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, 
HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, 
HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, 
HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, 
HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, 
HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, 
HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index fa920469bf10..505fe29c75b0 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -202,9 +202,16 @@ static void vmbus_process_rescind_offer(struct work_struct 
*work)
        unsigned long flags;
        struct vmbus_channel *primary_channel;
        struct vmbus_channel_relid_released msg;
+       struct device *dev;
+
+       if (channel->device_obj) {
+               dev = get_device(&channel->device_obj->device);
+               if (dev) {
+                       vmbus_device_unregister(channel->device_obj);
+                       put_device(dev);
+               }
+       }
 
-       if (channel->device_obj)
-               vmbus_device_unregister(channel->device_obj);
        memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
        msg.child_relid = channel->offermsg.child_relid;
        msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 40f6b47c28f6..8855ecbc36be 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1768,7 +1768,7 @@ static int __domain_mapping(struct dmar_domain *domain, 
unsigned long iov_pfn,
        struct dma_pte *first_pte = NULL, *pte = NULL;
        phys_addr_t uninitialized_var(pteval);
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       unsigned long sg_res;
+       unsigned long sg_res = 0;
        unsigned int largepage_lvl = 0;
        unsigned long lvl_pages = 0;
 
@@ -1779,10 +1779,8 @@ static int __domain_mapping(struct dmar_domain *domain, 
unsigned long iov_pfn,
 
        prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
 
-       if (sg)
-               sg_res = 0;
-       else {
-               sg_res = nr_pages + 1;
+       if (!sg) {
+               sg_res = nr_pages;
                pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
        }
 
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 6b1a6ef9f1a8..0c3a64708409 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -490,6 +490,8 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct 
dma_mapping *m, void *uaddr,
                                 m->nr_pages,
                                 1,             /* write by caller */
                                 m->page_list); /* ptrs to pages */
+       if (rc < 0)
+               goto fail_get_user_pages;
 
        /* assumption: get_user_pages can be killed by signals. */
        if (rc < m->nr_pages) {
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9ddef4763541..7e0176321aff 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1343,6 +1343,8 @@ static void sdhci_request(struct mmc_host *mmc, struct 
mmc_request *mrq)
 
        sdhci_runtime_pm_get(host);
 
+       present = mmc_gpio_get_cd(host->mmc);
+
        spin_lock_irqsave(&host->lock, flags);
 
        WARN_ON(host->mrq != NULL);
@@ -1371,7 +1373,6 @@ static void sdhci_request(struct mmc_host *mmc, struct 
mmc_request *mrq)
         *     zero: cd-gpio is used, and card is removed
         *     one: cd-gpio is used, and card is present
         */
-       present = mmc_gpio_get_cd(host->mmc);
        if (present < 0) {
                /* If polling, assume that the card is always present. */
                if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -2082,15 +2083,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
+       int present;
 
        /* First check if client has provided their own card event */
        if (host->ops->card_event)
                host->ops->card_event(host);
 
+       present = sdhci_do_get_cd(host);
+
        spin_lock_irqsave(&host->lock, flags);
 
        /* Check host->mrq first in case we are runtime suspended */
-       if (host->mrq && !sdhci_do_get_cd(host)) {
+       if (host->mrq && !present) {
                pr_err("%s: Card removed during transfer!\n",
                        mmc_hostname(host->mmc));
                pr_err("%s: Resetting controller.\n",
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
index eeab96973cf0..b55bc52a1340 100644
--- a/drivers/mtd/tests/torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -264,7 +264,9 @@ static int __init tort_init(void)
                int i;
                void *patt;
 
-               mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+               err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt);
+               if (err)
+                       goto out;
 
                /* Check if the eraseblocks contain only 0xFF bytes */
                if (check) {
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index ec2c2dc1c1ca..2a1b6e037e1a 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct 
ubi_volume *vol,
        ubi_assert(!vol->updating && !vol->changing_leb);
        vol->updating = 1;
 
+       vol->upd_buf = vmalloc(ubi->leb_size);
+       if (!vol->upd_buf)
+               return -ENOMEM;
+
        err = set_update_marker(ubi, vol);
        if (err)
                return err;
@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct 
ubi_volume *vol,
                err = clear_update_marker(ubi, vol, 0);
                if (err)
                        return err;
+
+               vfree(vol->upd_buf);
                vol->updating = 0;
                return 0;
        }
 
-       vol->upd_buf = vmalloc(ubi->leb_size);
-       if (!vol->upd_buf)
-               return -ENOMEM;
-
        vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
                               vol->usable_leb_size);
        vol->upd_bytes = bytes;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 02317c1c0238..68b924ec222e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1205,7 +1205,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, 
struct ubi_work *wrk,
 
        err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
        if (err) {
-               kmem_cache_free(ubi_wl_entry_slab, e1);
                if (e2)
                        kmem_cache_free(ubi_wl_entry_slab, e2);
                goto out_ro;
@@ -1219,10 +1218,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, 
struct ubi_work *wrk,
                dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
                       e2->pnum, vol_id, lnum);
                err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
-               if (err) {
-                       kmem_cache_free(ubi_wl_entry_slab, e2);
+               if (err)
                        goto out_ro;
-               }
        }
 
        dbg_wl("done");
@@ -1258,10 +1255,9 @@ out_not_moved:
 
        ubi_free_vid_hdr(ubi, vid_hdr);
        err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
-       if (err) {
-               kmem_cache_free(ubi_wl_entry_slab, e2);
+       if (err)
                goto out_ro;
-       }
+
        mutex_unlock(&ubi->move_mutex);
        return 0;
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 0b7a4c3b01a2..03e7f0cbda8c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -734,7 +734,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
        dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
        if (!dev->cmd_buf) {
                err = -ENOMEM;
-               goto lbl_set_intf_data;
+               goto lbl_free_candev;
        }
 
        dev->udev = usb_dev;
@@ -773,7 +773,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
        err = register_candev(netdev);
        if (err) {
                dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
-               goto lbl_free_cmd_buf;
+               goto lbl_restore_intf_data;
        }
 
        if (dev->prev_siblings)
@@ -786,14 +786,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
        if (dev->adapter->dev_init) {
                err = dev->adapter->dev_init(dev);
                if (err)
-                       goto lbl_free_cmd_buf;
+                       goto lbl_unregister_candev;
        }
 
        /* set bus off */
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 0);
                if (err)
-                       goto lbl_free_cmd_buf;
+                       goto lbl_unregister_candev;
        }
 
        /* get device number early */
@@ -805,11 +805,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter 
*peak_usb_adapter,
 
        return 0;
 
-lbl_free_cmd_buf:
-       kfree(dev->cmd_buf);
+lbl_unregister_candev:
+       unregister_candev(netdev);
 
-lbl_set_intf_data:
+lbl_restore_intf_data:
        usb_set_intfdata(intf, dev->prev_siblings);
+       kfree(dev->cmd_buf);
+
+lbl_free_candev:
        free_candev(netdev);
 
        return err;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c 
b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 263dd921edc4..f7f796a2c50b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device 
*dev, int req_id,
        if (!(dev->state & PCAN_USB_STATE_CONNECTED))
                return 0;
 
-       memset(req_addr, '\0', req_size);
-
        req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
 
        switch (req_id) {
@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device 
*dev, int req_id,
        default:
                p = usb_rcvctrlpipe(dev->udev, 0);
                req_type |= USB_DIR_IN;
+               memset(req_addr, '\0', req_size);
                break;
        }
 
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c 
b/drivers/net/wireless/ath/ath5k/qcu.c
index 0583c69d26db..ddaad712c59a 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum 
ath5k_tx_queue queue_type,
        } else {
                switch (queue_type) {
                case AR5K_TX_QUEUE_DATA:
-                       for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
-                               ah->ah_txq[queue].tqi_type !=
-                               AR5K_TX_QUEUE_INACTIVE; queue++) {
-
-                               if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
-                                       return -EINVAL;
-                       }
+                       queue = queue_info->tqi_subtype;
                        break;
                case AR5K_TX_QUEUE_UAPSD:
                        queue = AR5K_TX_QUEUE_ID_UAPSD;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h 
b/drivers/net/wireless/ath/ath9k/hw.h
index 0acd4b5a4892..32ae0a47fed0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -216,8 +216,8 @@
 #define AH_WOW_BEACON_MISS             BIT(3)
 
 enum ath_hw_txq_subtype {
-       ATH_TXQ_AC_BE = 0,
-       ATH_TXQ_AC_BK = 1,
+       ATH_TXQ_AC_BK = 0,
+       ATH_TXQ_AC_BE = 1,
        ATH_TXQ_AC_VI = 2,
        ATH_TXQ_AC_VO = 3,
 };
diff --git a/drivers/net/wireless/ath/ath9k/mac.c 
b/drivers/net/wireless/ath/ath9k/mac.c
index 5f727588ca27..8f93ed373fb5 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum 
ath9k_tx_queue type,
                q = ATH9K_NUM_TX_QUEUES - 3;
                break;
        case ATH9K_TX_QUEUE_DATA:
-               for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
-                       if (ah->txq[q].tqi_type ==
-                           ATH9K_TX_QUEUE_INACTIVE)
-                               break;
-               if (q == ATH9K_NUM_TX_QUEUES) {
-                       ath_err(common, "No available TX queue\n");
-                       return -1;
-               }
+               q = qinfo->tqi_subtype;
                break;
        default:
                ath_err(common, "Invalid TX queue type: %u\n", type);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h 
b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index d8948aa9c2d2..60dc387cc554 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -1394,7 +1394,7 @@ enum iwl_sf_scenario {
 #define SF_NUM_TIMEOUT_TYPES 2         /* Aging timer and Idle timer */
 
 /* smart FIFO default values */
-#define SF_W_MARK_SISO 4096
+#define SF_W_MARK_SISO 6144
 #define SF_W_MARK_MIMO2 8192
 #define SF_W_MARK_MIMO3 6144
 #define SF_W_MARK_LEGACY 4096
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 34dff3a09b98..5b428db6a150 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -214,14 +214,17 @@ int __pci_read_base(struct pci_dev *dev, enum 
pci_bar_type type,
                res->flags |= IORESOURCE_SIZEALIGN;
                if (res->flags & IORESOURCE_IO) {
                        l &= PCI_BASE_ADDRESS_IO_MASK;
+                       sz &= PCI_BASE_ADDRESS_IO_MASK;
                        mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
                } else {
                        l &= PCI_BASE_ADDRESS_MEM_MASK;
+                       sz &= PCI_BASE_ADDRESS_MEM_MASK;
                        mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
                }
        } else {
                res->flags |= (l & IORESOURCE_ROM_ENABLE);
                l &= PCI_ROM_ADDRESS_MASK;
+               sz &= PCI_ROM_ADDRESS_MASK;
                mask = (u32)PCI_ROM_ADDRESS_MASK;
        }
 
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
index 7854a656628f..110eab817a4f 100644
--- a/drivers/rtc/rtc-isl12057.c
+++ b/drivers/rtc/rtc-isl12057.c
@@ -89,7 +89,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 
*regs)
        tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
 
        if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
-               tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f);
+               tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
                if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
                        tm->tm_hour += 12;
        } else {                                            /* 24 hour mode */
@@ -98,7 +98,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 
*regs)
 
        tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
        tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
-       tm->tm_mon  = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */
+       tm->tm_mon  = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
        tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
 }
 
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index 3eb3642ae299..d2b1ab3162c2 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -290,14 +290,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
        rtc_div = ((32768 / RTC_HZ) / 2) - 1;
        sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV);
 
-       rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
-                       &sirfsoc_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtcdrv->rtc)) {
-               err = PTR_ERR(rtcdrv->rtc);
-               dev_err(&pdev->dev, "can't register RTC device\n");
-               return err;
-       }
-
        /* 0x3 -> RTC_CLK */
        sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK,
                        rtcdrv->rtc_base + RTC_CLOCK_SWITCH);
@@ -312,6 +304,14 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
        rtcdrv->overflow_rtc =
                sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE);
 
+       rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+                       &sirfsoc_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtcdrv->rtc)) {
+               err = PTR_ERR(rtcdrv->rtc);
+               dev_err(&pdev->dev, "can't register RTC device\n");
+               return err;
+       }
+
        rtcdrv->irq = platform_get_irq(pdev, 0);
        err = devm_request_irq(
                        &pdev->dev,
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 119f7af94537..4dcb2929c01f 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -362,18 +362,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct 
spi_transfer *t,
 static void fsl_spi_do_one_msg(struct spi_message *m)
 {
        struct spi_device *spi = m->spi;
-       struct spi_transfer *t;
+       struct spi_transfer *t, *first;
        unsigned int cs_change;
        const int nsecs = 50;
        int status;
 
-       cs_change = 1;
-       status = 0;
+       /* Don't allow changes if CS is active */
+       first = list_first_entry(&m->transfers, struct spi_transfer,
+                       transfer_list);
        list_for_each_entry(t, &m->transfers, transfer_list) {
-               if (t->bits_per_word || t->speed_hz) {
-                       /* Don't allow changes if CS is active */
+               if ((first->bits_per_word != t->bits_per_word) ||
+                       (first->speed_hz != t->speed_hz)) {
                        status = -EINVAL;
+                       dev_err(&spi->dev,
+                               "bits_per_word/speed_hz should be same for the 
same SPI transfer\n");
+                       return;
+               }
+       }
 
+       cs_change = 1;
+       status = -EINVAL;
+       list_for_each_entry(t, &m->transfers, transfer_list) {
+               if (t->bits_per_word || t->speed_hz) {
                        if (cs_change)
                                status = fsl_spi_setup_transfer(spi, t);
                        if (status < 0)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 28ac3f3b7ec3..d46b4ccec8cd 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -321,7 +321,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
 
 static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
 {
-       *read_buf_addr(ldata, ldata->read_head++) = c;
+       *read_buf_addr(ldata, ldata->read_head) = c;
+       ldata->read_head++;
 }
 
 /**
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 9cd706df3b33..7d3a3f5cb5ba 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -544,11 +544,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, 
unsigned int level,
                              unsigned int old)
 {
        struct s3c24xx_uart_port *ourport = to_ourport(port);
+       int timeout = 10000;
 
        ourport->pm_level = level;
 
        switch (level) {
        case 3:
+               while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
+                       udelay(100);
+
                if (!IS_ERR(ourport->baudclk))
                        clk_disable_unprepare(ourport->baudclk);
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 331f06a91cc3..d7049c393a33 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1169,10 +1169,11 @@ next_desc:
        } else {
                control_interface = usb_ifnum_to_if(usb_dev, 
union_header->bMasterInterface0);
                data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = 
union_header->bSlaveInterface0));
-               if (!control_interface || !data_interface) {
-                       dev_dbg(&intf->dev, "no interfaces\n");
-                       return -ENODEV;
-               }
+       }
+
+       if (!control_interface || !data_interface) {
+               dev_dbg(&intf->dev, "no interfaces\n");
+               return -ENODEV;
        }
 
        if (data_interface_num != call_interface_num)
@@ -1448,6 +1449,7 @@ alloc_fail8:
                                &dev_attr_wCountryCodes);
                device_remove_file(&acm->control->dev,
                                &dev_attr_iCountryCodeRelDate);
+               kfree(acm->country_codes);
        }
        device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
 alloc_fail7:
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ebd8f218a788..9df5d6ec7eec 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
        dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
        phys_addr_t paddr = dma;
 
-       BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
-
        paddr |= baddr & ~PAGE_MASK;
 
        return paddr;
@@ -447,11 +445,11 @@ static void xen_unmap_single(struct device *hwdev, 
dma_addr_t dev_addr,
 
        BUG_ON(dir == DMA_NONE);
 
-       xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
+       xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr)) {
-               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+               swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
                return;
        }
 
@@ -495,14 +493,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t 
dev_addr,
        BUG_ON(dir == DMA_NONE);
 
        if (target == SYNC_FOR_CPU)
-               xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+               xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr))
                swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
 
        if (target == SYNC_FOR_DEVICE)
-               xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
+               xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
 
        if (dir != DMA_FROM_DEVICE)
                return;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 451b00c86f6c..12e35566d2fc 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1854,6 +1854,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 {
        struct btrfs_delayed_node *delayed_node;
 
+       /*
+        * we don't do delayed inode updates during log recovery because it
+        * leads to enospc problems.  This means we also can't do
+        * delayed inode refs
+        */
+       if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+               return -EAGAIN;
+
        delayed_node = btrfs_get_or_create_delayed_node(inode);
        if (IS_ERR(delayed_node))
                return PTR_ERR(delayed_node);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index b53278c9fd97..94a85ee5b990 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -676,7 +676,7 @@ static int ceph_writepages_start(struct address_space 
*mapping,
        int rc = 0;
        unsigned wsize = 1 << inode->i_blkbits;
        struct ceph_osd_request *req = NULL;
-       int do_sync;
+       int do_sync = 0;
        u64 truncate_size, snap_size;
        u32 truncate_seq;
 
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index f4f050a69a48..339c41216d14 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1461,15 +1461,18 @@ static void discard_cap_releases(struct ceph_mds_client 
*mdsc,
 
        dout("discard_cap_releases mds%d\n", session->s_mds);
 
-       /* zero out the in-progress message */
-       msg = list_first_entry(&session->s_cap_releases,
-                              struct ceph_msg, list_head);
-       head = msg->front.iov_base;
-       num = le32_to_cpu(head->num);
-       dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num);
-       head->num = cpu_to_le32(0);
-       msg->front.iov_len = sizeof(*head);
-       session->s_num_cap_releases += num;
+       if (!list_empty(&session->s_cap_releases)) {
+               /* zero out the in-progress message */
+               msg = list_first_entry(&session->s_cap_releases,
+                                       struct ceph_msg, list_head);
+               head = msg->front.iov_base;
+               num = le32_to_cpu(head->num);
+               dout("discard_cap_releases mds%d %p %u\n",
+                    session->s_mds, msg, num);
+               head->num = cpu_to_le32(0);
+               msg->front.iov_len = sizeof(*head);
+               session->s_num_cap_releases += num;
+       }
 
        /* requeue completed messages */
        while (!list_empty(&session->s_cap_releases_done)) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a16315957ef3..23a51f08e3b5 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -476,12 +476,28 @@ __writeback_single_inode(struct inode *inode, struct 
writeback_control *wbc)
         * write_inode()
         */
        spin_lock(&inode->i_lock);
-       /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
-       if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
-               inode->i_state &= ~I_DIRTY_PAGES;
+
        dirty = inode->i_state & I_DIRTY;
-       inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+       inode->i_state &= ~I_DIRTY;
+
+       /*
+        * Paired with smp_mb() in __mark_inode_dirty().  This allows
+        * __mark_inode_dirty() to test i_state without grabbing i_lock -
+        * either they see the I_DIRTY bits cleared or we see the dirtied
+        * inode.
+        *
+        * I_DIRTY_PAGES is always cleared together above even if @mapping
+        * still has dirty pages.  The flag is reinstated after smp_mb() if
+        * necessary.  This guarantees that either __mark_inode_dirty()
+        * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
+        */
+       smp_mb();
+
+       if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+               inode->i_state |= I_DIRTY_PAGES;
+
        spin_unlock(&inode->i_lock);
+
        /* Don't write the inode if only I_DIRTY_PAGES was set */
        if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
                int err = write_inode(inode, wbc);
@@ -1145,12 +1161,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
        }
 
        /*
-        * make sure that changes are seen by all cpus before we test i_state
-        * -- mikulas
+        * Paired with smp_mb() in __writeback_single_inode() for the
+        * following lockless i_state test.  See there for details.
         */
        smp_mb();
 
-       /* avoid the locking if we can */
        if ((inode->i_state & flags) == flags)
                return;
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 34d2a1f2f400..daa53da1d286 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1209,15 +1209,14 @@ static int copy_cred(struct svc_cred *target, struct 
svc_cred *source)
        return 0;
 }
 
-static long long
+static int
 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
 {
-       long long res;
-
-       res = o1->len - o2->len;
-       if (res)
-               return res;
-       return (long long)memcmp(o1->data, o2->data, o1->len);
+       if (o1->len < o2->len)
+               return -1;
+       if (o1->len > o2->len)
+               return 1;
+       return memcmp(o1->data, o2->data, o1->len);
 }
 
 static int same_name(const char *n1, const char *n2)
@@ -1401,7 +1400,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct 
rb_root *root)
 static struct nfs4_client *
 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
 {
-       long long cmp;
+       int cmp;
        struct rb_node *node = root->rb_node;
        struct nfs4_client *clp;
 
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 86573350350e..dd1afa38f2ae 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1809,6 +1809,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char 
*components,
                }
                else
                        end++;
+               if (found_esc)
+                       end = next;
+
                str = end;
        }
        *pp = p;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 1e0bbae06ee7..09480c53fd74 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -49,6 +49,8 @@ struct nilfs_iget_args {
        int for_gc;
 };
 
+static int nilfs_iget_test(struct inode *inode, void *opaque);
+
 void nilfs_inode_add_blocks(struct inode *inode, int n)
 {
        struct nilfs_root *root = NILFS_I(inode)->i_root;
@@ -347,6 +349,17 @@ const struct address_space_operations nilfs_aops = {
        .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
+static int nilfs_insert_inode_locked(struct inode *inode,
+                                    struct nilfs_root *root,
+                                    unsigned long ino)
+{
+       struct nilfs_iget_args args = {
+               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+       };
+
+       return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
+}
+
 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 {
        struct super_block *sb = dir->i_sb;
@@ -382,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t 
mode)
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
                err = nilfs_bmap_read(ii->i_bmap, NULL);
                if (err < 0)
-                       goto failed_bmap;
+                       goto failed_after_creation;
 
                set_bit(NILFS_I_BMAP, &ii->i_state);
                /* No lock is needed; iget() ensures it. */
@@ -398,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t 
mode)
        spin_lock(&nilfs->ns_next_gen_lock);
        inode->i_generation = nilfs->ns_next_generation++;
        spin_unlock(&nilfs->ns_next_gen_lock);
-       insert_inode_hash(inode);
+       if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
+               err = -EIO;
+               goto failed_after_creation;
+       }
 
        err = nilfs_init_acl(inode, dir);
        if (unlikely(err))
-               goto failed_acl; /* never occur. When supporting
+               goto failed_after_creation; /* never occur. When supporting
                                    nilfs_init_acl(), proper cancellation of
                                    above jobs should be considered */
 
        return inode;
 
- failed_acl:
- failed_bmap:
+ failed_after_creation:
        clear_nlink(inode);
+       unlock_new_inode(inode);
        iput(inode);  /* raw_inode will be deleted through
-                        generic_delete_inode() */
+                        nilfs_evict_inode() */
        goto failed;
 
  failed_ifile_create_inode:
@@ -460,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode,
        inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
        inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
-       if (inode->i_nlink == 0 && inode->i_mode == 0)
-               return -EINVAL; /* this inode is deleted */
+       if (inode->i_nlink == 0)
+               return -ESTALE; /* this inode is deleted */
 
        inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
        ii->i_flags = le32_to_cpu(raw_inode->i_flags);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 9de78f08989e..0f84b257932c 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, 
struct inode *inode)
        int err = nilfs_add_link(dentry, inode);
        if (!err) {
                d_instantiate(dentry, inode);
+               unlock_new_inode(inode);
                return 0;
        }
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        return err;
 }
@@ -182,6 +184,7 @@ out:
 out_fail:
        drop_nlink(inode);
        nilfs_mark_inode_dirty(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct 
inode *dir,
        inode_inc_link_count(inode);
        ihold(inode);
 
-       err = nilfs_add_nondir(dentry, inode);
-       if (!err)
+       err = nilfs_add_link(dentry, inode);
+       if (!err) {
+               d_instantiate(dentry, inode);
                err = nilfs_transaction_commit(dir->i_sb);
-       else
+       } else {
+               inode_dec_link_count(inode);
+               iput(inode);
                nilfs_transaction_abort(dir->i_sb);
+       }
 
        return err;
 }
@@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry 
*dentry, umode_t mode)
 
        nilfs_mark_inode_dirty(inode);
        d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
 out:
        if (!err)
                err = nilfs_transaction_commit(dir->i_sb);
@@ -255,6 +263,7 @@ out_fail:
        drop_nlink(inode);
        drop_nlink(inode);
        nilfs_mark_inode_dirty(inode);
+       unlock_new_inode(inode);
        iput(inode);
 out_dir:
        drop_nlink(dir);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index aeb44e879c51..bb6ee06118ca 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -899,7 +899,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int 
num_pages)
        }
 }
 
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
 {
        int i;
 
@@ -920,7 +920,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt 
*wc)
                page_cache_release(wc->w_target_page);
        }
        ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
+}
 
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+{
+       ocfs2_unlock_pages(wc);
        brelse(wc->w_di_bh);
        kfree(wc);
 }
@@ -2045,11 +2049,19 @@ out_write_size:
        di->i_mtime_nsec = di->i_ctime_nsec = 
cpu_to_le32(inode->i_mtime.tv_nsec);
        ocfs2_journal_dirty(handle, wc->w_di_bh);
 
+       /* unlock pages before dealloc since it needs acquiring j_trans_barrier
+        * lock, or it will cause a deadlock since journal commit threads holds
+        * this lock and will ask for the page lock when flushing the data.
+        * put it here to preserve the unlock order.
+        */
+       ocfs2_unlock_pages(wc);
+
        ocfs2_commit_trans(osb, handle);
 
        ocfs2_run_deallocs(osb, &wc->w_dealloc);
 
-       ocfs2_free_write_ctxt(wc);
+       brelse(wc->w_di_bh);
+       kfree(wc);
 
        return copied;
 }
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index feed025fe064..b2427623dd6c 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
                                     struct inode *inode,
                                     const char *symname);
 
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+                            struct buffer_head **bh1,
+                            struct inode *inode1,
+                            struct buffer_head **bh2,
+                            struct inode *inode2,
+                            int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
 /* An orphan dir name is an 8 byte value, printed as a hex string */
 #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
 
@@ -656,8 +664,10 @@ static int ocfs2_link(struct dentry *old_dentry,
 {
        handle_t *handle;
        struct inode *inode = old_dentry->d_inode;
+       struct inode *old_dir = old_dentry->d_parent->d_inode;
        int err;
        struct buffer_head *fe_bh = NULL;
+       struct buffer_head *old_dir_bh = NULL;
        struct buffer_head *parent_fe_bh = NULL;
        struct ocfs2_dinode *fe = NULL;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -674,19 +684,33 @@ static int ocfs2_link(struct dentry *old_dentry,
 
        dquot_initialize(dir);
 
-       err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+       err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+                       &parent_fe_bh, dir, 0);
        if (err < 0) {
                if (err != -ENOENT)
                        mlog_errno(err);
                return err;
        }
 
+       /* make sure both dirs have bhs
+        * get an extra ref on old_dir_bh if old==new */
+       if (!parent_fe_bh) {
+               if (old_dir_bh) {
+                       parent_fe_bh = old_dir_bh;
+                       get_bh(parent_fe_bh);
+               } else {
+                       mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+                       err = -EIO;
+                       goto out;
+               }
+       }
+
        if (!dir->i_nlink) {
                err = -ENOENT;
                goto out;
        }
 
-       err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+       err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
                        old_dentry->d_name.len, &old_de_ino);
        if (err) {
                err = -ENOENT;
@@ -779,10 +803,11 @@ out_unlock_inode:
        ocfs2_inode_unlock(inode, 1);
 
 out:
-       ocfs2_inode_unlock(dir, 1);
+       ocfs2_double_unlock(old_dir, dir);
 
        brelse(fe_bh);
        brelse(parent_fe_bh);
+       brelse(old_dir_bh);
 
        ocfs2_free_dir_lookup_result(&lookup);
 
@@ -991,14 +1016,15 @@ leave:
 }
 
 /*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
  * if they have the same id, then the 1st one is the only one locked.
  */
 static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct buffer_head **bh1,
                             struct inode *inode1,
                             struct buffer_head **bh2,
-                            struct inode *inode2)
+                            struct inode *inode2,
+                            int rename)
 {
        int status;
        struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
@@ -1028,7 +1054,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                }
                /* lock id2 */
                status = ocfs2_inode_lock_nested(inode2, bh2, 1,
-                                                OI_LS_RENAME1);
+                               rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
@@ -1037,7 +1063,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        }
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+       status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+                       rename == 1 ?  OI_LS_RENAME2 : OI_LS_PARENT);
        if (status < 0) {
                /*
                 * An error return must mean that no cluster locks
@@ -1137,7 +1164,7 @@ static int ocfs2_rename(struct inode *old_dir,
 
        /* if old and new are the same, this'll just do one lock. */
        status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
-                                  &new_dir_bh, new_dir);
+                                  &new_dir_bh, new_dir, 1);
        if (status < 0) {
                mlog_errno(status);
                goto bail;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index fa8cef2cca3a..e7d95f959333 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
 MODULE_PARM_DESC(mem_size,
                "size of reserved RAM used to store oops/panic logs");
 
+static unsigned int mem_type;
+module_param(mem_type, uint, 0600);
+MODULE_PARM_DESC(mem_type,
+               "set to 1 to try to use unbuffered memory (default 0)");
+
 static int dump_oops = 1;
 module_param(dump_oops, int, 0600);
 MODULE_PARM_DESC(dump_oops,
@@ -79,6 +84,7 @@ struct ramoops_context {
        struct persistent_ram_zone *fprz;
        phys_addr_t phys_addr;
        unsigned long size;
+       unsigned int memtype;
        size_t record_size;
        size_t console_size;
        size_t ftrace_size;
@@ -353,7 +359,8 @@ static int ramoops_init_przs(struct device *dev, struct 
ramoops_context *cxt,
                size_t sz = cxt->record_size;
 
                cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
-                                                 &cxt->ecc_info);
+                                                 &cxt->ecc_info,
+                                                 cxt->memtype);
                if (IS_ERR(cxt->przs[i])) {
                        err = PTR_ERR(cxt->przs[i]);
                        dev_err(dev, "failed to request mem region 
(0x%zx@0x%llx): %d\n",
@@ -383,7 +390,7 @@ static int ramoops_init_prz(struct device *dev, struct 
ramoops_context *cxt,
                return -ENOMEM;
        }
 
-       *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
+       *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 
cxt->memtype);
        if (IS_ERR(*prz)) {
                int err = PTR_ERR(*prz);
 
@@ -431,6 +438,7 @@ static int ramoops_probe(struct platform_device *pdev)
        cxt->dump_read_cnt = 0;
        cxt->size = pdata->mem_size;
        cxt->phys_addr = pdata->mem_address;
+       cxt->memtype = pdata->mem_type;
        cxt->record_size = pdata->record_size;
        cxt->console_size = pdata->console_size;
        cxt->ftrace_size = pdata->ftrace_size;
@@ -561,6 +569,7 @@ static void ramoops_register_dummy(void)
 
        dummy_data->mem_size = mem_size;
        dummy_data->mem_address = mem_address;
+       dummy_data->mem_type = 0;
        dummy_data->record_size = record_size;
        dummy_data->console_size = ramoops_console_size;
        dummy_data->ftrace_size = ramoops_ftrace_size;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index de272d426763..bda61a759b68 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
        persistent_ram_update_header_ecc(prz);
 }
 
-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
+static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+               unsigned int memtype)
 {
        struct page **pages;
        phys_addr_t page_start;
@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t 
size)
        page_start = start - offset_in_page(start);
        page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
 
-       prot = pgprot_noncached(PAGE_KERNEL);
+       if (memtype)
+               prot = pgprot_noncached(PAGE_KERNEL);
+       else
+               prot = pgprot_writecombine(PAGE_KERNEL);
 
        pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
        if (!pages) {
@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t 
size)
        return vaddr;
 }
 
-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
+static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+               unsigned int memtype)
 {
+       void *va;
+
        if (!request_mem_region(start, size, "persistent_ram")) {
                pr_err("request mem region (0x%llx@0x%llx) failed\n",
                        (unsigned long long)size, (unsigned long long)start);
@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, 
size_t size)
        buffer_start_add = buffer_start_add_locked;
        buffer_size_add = buffer_size_add_locked;
 
-       return ioremap(start, size);
+       if (memtype)
+               va = ioremap(start, size);
+       else
+               va = ioremap_wc(start, size);
+
+       return va;
 }
 
 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
-               struct persistent_ram_zone *prz)
+               struct persistent_ram_zone *prz, int memtype)
 {
        prz->paddr = start;
        prz->size = size;
 
        if (pfn_valid(start >> PAGE_SHIFT))
-               prz->vaddr = persistent_ram_vmap(start, size);
+               prz->vaddr = persistent_ram_vmap(start, size, memtype);
        else
-               prz->vaddr = persistent_ram_iomap(start, size);
+               prz->vaddr = persistent_ram_iomap(start, size, memtype);
 
        if (!prz->vaddr) {
                pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -502,7 +514,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 }
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
-                       u32 sig, struct persistent_ram_ecc_info *ecc_info)
+                       u32 sig, struct persistent_ram_ecc_info *ecc_info,
+                       unsigned int memtype)
 {
        struct persistent_ram_zone *prz;
        int ret = -ENOMEM;
@@ -513,7 +526,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t 
start, size_t size,
                goto err;
        }
 
-       ret = persistent_ram_buffer_map(start, size, prz);
+       ret = persistent_ram_buffer_map(start, size, prz, memtype);
        if (ret)
                goto err;
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d5039daf1e1c..46b8ab56b9db 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1866,7 +1866,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
-  #define expand_upwards(vma, address) do { } while (0)
+  #define expand_upwards(vma, address) (0)
 #endif
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975d40db..4af3fdc85b01 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
 };
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
-                       u32 sig, struct persistent_ram_ecc_info *ecc_info);
+                       u32 sig, struct persistent_ram_ecc_info *ecc_info,
+                       unsigned int memtype);
 void persistent_ram_free(struct persistent_ram_zone *prz);
 void persistent_ram_zap(struct persistent_ram_zone *prz);
 
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone 
*prz,
 struct ramoops_platform_data {
        unsigned long   mem_size;
        unsigned long   mem_address;
+       unsigned int    mem_type;
        unsigned long   record_size;
        unsigned long   console_size;
        unsigned long   ftrace_size;
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 67e1bbf83695..dc7bb01f580f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct 
task_struct *p)
        /*
         * For all intents and purposes a preempted task is a running task.
         */
-       if (task_preempt_count(p) & PREEMPT_ACTIVE)
+       if (preempt_count() & PREEMPT_ACTIVE)
                state = TASK_RUNNING | TASK_STATE_MAX;
 #endif
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4bbb27adf23d..69cffb46db17 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7240,11 +7240,11 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (move_group) {
                synchronize_rcu();
-               perf_install_in_context(ctx, group_leader, event->cpu);
+               perf_install_in_context(ctx, group_leader, group_leader->cpu);
                get_ctx(ctx);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_install_in_context(ctx, sibling, event->cpu);
+                       perf_install_in_context(ctx, sibling, sibling->cpu);
                        get_ctx(ctx);
                }
        }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 37dac98c0749..8d3c5ddfdfdd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -550,24 +550,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 static
 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
 {
-       int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
-       int rorun = dl_se->runtime <= 0;
-
-       if (!rorun && !dmiss)
-               return 0;
-
-       /*
-        * If we are beyond our current deadline and we are still
-        * executing, then we have already used some of the runtime of
-        * the next instance. Thus, if we do not account that, we are
-        * stealing bandwidth from the system at each deadline miss!
-        */
-       if (dmiss) {
-               dl_se->runtime = rorun ? dl_se->runtime : 0;
-               dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
-       }
-
-       return 1;
+       return (dl_se->runtime <= 0);
 }
 
 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -806,10 +789,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
         * parameters of the task might need updating. Otherwise,
         * we want a replenishment of its runtime.
         */
-       if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
-               replenish_dl_entity(dl_se, pi_se);
-       else
+       if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
                update_dl_entity(dl_se, pi_se);
+       else if (flags & ENQUEUE_REPLENISH)
+               replenish_dl_entity(dl_se, pi_se);
 
        __enqueue_dl_entity(dl_se);
 }
diff --git a/mm/memory.c b/mm/memory.c
index 48d7365ba4e4..924429e5ef4d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3204,7 +3204,7 @@ static inline int check_stack_guard_page(struct 
vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) 
{
                struct vm_area_struct *next = vma->vm_next;
@@ -3213,7 +3213,7 @@ static inline int check_stack_guard_page(struct 
vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index b91ac800d7b7..085bcd890ad2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2058,14 +2058,17 @@ static int acct_stack_growth(struct vm_area_struct 
*vma, unsigned long size, uns
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index deb139e6b8ed..be6a689a71a6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2860,18 +2860,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int 
order, long remaining,
                return false;
 
        /*
-        * There is a potential race between when kswapd checks its watermarks
-        * and a process gets throttled. There is also a potential race if
-        * processes get throttled, kswapd wakes, a large process exits therby
-        * balancing the zones that causes kswapd to miss a wakeup. If kswapd
-        * is going to sleep, no process should be sleeping on pfmemalloc_wait
-        * so wake them now if necessary. If necessary, processes will wake
-        * kswapd and get throttled again
+        * The throttled processes are normally woken up in balance_pgdat() as
+        * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+        * race between when kswapd checks the watermarks and a process gets
+        * throttled. There is also a potential race if processes get
+        * throttled, kswapd wakes, a large process exits thereby balancing the
+        * zones, which causes kswapd to exit balance_pgdat() before reaching
+        * the wake up checks. If kswapd is going to sleep, no process should
+        * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+        * the wake up is premature, processes will wake kswapd and get
+        * throttled again. The difference from wake ups in balance_pgdat() is
+        * that here we are under prepare_to_wait().
         */
-       if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
-               wake_up(&pgdat->pfmemalloc_wait);
-               return false;
-       }
+       if (waitqueue_active(&pgdat->pfmemalloc_wait))
+               wake_up_all(&pgdat->pfmemalloc_wait);
 
        return pgdat_balanced(pgdat, order, classzone_idx);
 }
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index da058da413e7..2438cc351952 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1753,7 +1753,7 @@ sub dump_struct($$) {
        # strip kmemcheck_bitfield_{begin,end}.*;
        $members =~ s/kmemcheck_bitfield_.*?;//gos;
        # strip attributes
-       $members =~ s/__aligned\s*\(.+\)//gos;
+       $members =~ s/__aligned\s*\([^;]*\)//gos;
 
        create_parameterlist($members, ';', $file);
        check_sections($file, $declaration_name, "struct", $sectcheck, 
$struct_actual, $nested);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index dafcf82139e2..f6e5c4ed03ed 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -338,8 +338,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, 
hda_nid_t nid,
        unsigned int parm;
 
        parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
-       if (parm == -1)
+       if (parm == -1) {
+               *start_id = 0;
                return 0;
+       }
        *start_id = (parm >> 16) & 0x7fff;
        return (int)(parm & 0x7fff);
 }
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 15270a2e71cc..12f28d7e0fdc 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -593,9 +593,9 @@ static void stac_store_hints(struct hda_codec *codec)
                        spec->gpio_mask;
        }
        if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
-               spec->gpio_mask &= spec->gpio_mask;
-       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
                spec->gpio_dir &= spec->gpio_mask;
+       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+               spec->gpio_data &= spec->gpio_mask;
        if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
                spec->eapd_mask &= spec->gpio_mask;
        if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index ddfb0fddd030..9dd260f9dd06 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1378,8 +1378,8 @@ static const struct snd_soc_dapm_route 
max98090_dapm_routes[] = {
        {"STENL Mux", "Sidetone Left", "DMICL"},
        {"STENR Mux", "Sidetone Right", "ADCR"},
        {"STENR Mux", "Sidetone Right", "DMICR"},
-       {"DACL", "NULL", "STENL Mux"},
-       {"DACR", "NULL", "STENL Mux"},
+       {"DACL", NULL, "STENL Mux"},
+       {"DACR", NULL, "STENL Mux"},
 
        {"AIFINL", NULL, "SHDN"},
        {"AIFINR", NULL, "SHDN"},
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index 4068f2491232..bb3878c9625f 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev,
                goto done;
        }
 
+       if (ssfw_head->version != 1) {
+               dev_err(dev,
+                       "Failed to load firmware: Invalid version %d. Supported 
firmware versions: 1\n",
+                       ssfw_head->version);
+               goto done;
+       }
+
        crc = crc32(0, fw->data + sizeof(*ssfw_head),
                        fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 25c31f1655f6..2f6357578616 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream 
*substream,
        snd_soc_dai_set_dma_data(dai, substream, NULL);
 }
 
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+                         struct snd_soc_dai *dai)
+{
+       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               i2s_write_reg(dev->i2s_base, TXFFR, 1);
+       else
+               i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+       return 0;
+}
+
 static int dw_i2s_trigger(struct snd_pcm_substream *substream,
                int cmd, struct snd_soc_dai *dai)
 {
@@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
        .startup        = dw_i2s_startup,
        .shutdown       = dw_i2s_shutdown,
        .hw_params      = dw_i2s_hw_params,
+       .prepare        = dw_i2s_prepare,
        .trigger        = dw_i2s_trigger,
 };
 
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index d1d72ff50347..621bc9ebb55e 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -328,8 +328,11 @@ static struct usbmix_name_map gamecom780_map[] = {
        {}
 };
 
-static const struct usbmix_name_map kef_x300a_map[] = {
-       { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
+/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
+ * when anything attempts to access FU 10 (control)
+ */
+static const struct usbmix_name_map scms_usb3318_map[] = {
+       { 10, NULL },
        { 0 }
 };
 
@@ -425,8 +428,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = ebox44_map,
        },
        {
+               /* KEF X300A */
                .id = USB_ID(0x27ac, 0x1000),
-               .map = kef_x300a_map,
+               .map = scms_usb3318_map,
+       },
+       {
+               /* Arcam rPAC */
+               .id = USB_ID(0x25c4, 0x0003),
+               .map = scms_usb3318_map,
        },
        { 0 } /* terminator */
 };
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index a59743fa3ef7..0001c9aa8b71 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -36,6 +36,7 @@ struct events_stats {
        u32 nr_invalid_chains;
        u32 nr_unknown_id;
        u32 nr_unprocessable_samples;
+       u32 nr_unordered_events;
 };
 
 enum hist_column {
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5da6ce74c676..c1f20e91f968 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -638,8 +638,7 @@ int perf_session_queue_event(struct perf_session *s, union 
perf_event *event,
                return -ETIME;
 
        if (timestamp < s->ordered_samples.last_flush) {
-               printf("Warning: Timestamp below last timeslice flush\n");
-               return -EINVAL;
+               s->stats.nr_unordered_events++;
        }
 
        if (!list_empty(sc)) {
@@ -1135,6 +1134,8 @@ static void perf_session__warn_about_errors(const struct 
perf_session *session,
                            "Do you have a KVM guest running and not using 
'perf kvm'?\n",
                            session->stats.nr_unprocessable_samples);
        }
+       if (session->stats.nr_unordered_events != 0)
+               ui__warning("%u out of order events recorded.\n", 
session->stats.nr_unordered_events);
 }
 
 volatile int session_done;
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to