diff --git a/Makefile b/Makefile
index 41fe3014b712..ccf2602f664d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 137
+SUBLEVEL = 138
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
index 7fde0f88da88..51ed90be770a 100644
--- a/arch/alpha/include/asm/termios.h
+++ b/arch/alpha/include/asm/termios.h
@@ -72,9 +72,15 @@
 })
 
 #define user_termios_to_kernel_termios(k, u) \
-       copy_from_user(k, u, sizeof(struct termios))
+       copy_from_user(k, u, sizeof(struct termios2))
 
 #define kernel_termios_to_user_termios(u, k) \
+       copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+       copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
        copy_to_user(u, k, sizeof(struct termios))
 
 #endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/uapi/asm/ioctls.h 
b/arch/alpha/include/uapi/asm/ioctls.h
index f30c94ae1bdb..7ee8ab577e11 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -31,6 +31,11 @@
 #define TCXONC         _IO('t', 30)
 #define TCFLSH         _IO('t', 31)
 
+#define TCGETS2                _IOR('T', 42, struct termios2)
+#define TCSETS2                _IOW('T', 43, struct termios2)
+#define TCSETSW2       _IOW('T', 44, struct termios2)
+#define TCSETSF2       _IOW('T', 45, struct termios2)
+
 #define TIOCSWINSZ     _IOW('t', 103, struct winsize)
 #define TIOCGWINSZ     _IOR('t', 104, struct winsize)
 #define        TIOCSTART       _IO('t', 110)           /* start output, like 
^Q */
diff --git a/arch/alpha/include/uapi/asm/termbits.h 
b/arch/alpha/include/uapi/asm/termbits.h
index 879dd3589921..483c7ec2a879 100644
--- a/arch/alpha/include/uapi/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
@@ -25,6 +25,19 @@ struct termios {
        speed_t c_ospeed;               /* output speed */
 };
 
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+       tcflag_t c_iflag;               /* input mode flags */
+       tcflag_t c_oflag;               /* output mode flags */
+       tcflag_t c_cflag;               /* control mode flags */
+       tcflag_t c_lflag;               /* local mode flags */
+       cc_t c_cc[NCCS];                /* control characters */
+       cc_t c_line;                    /* line discipline (== c_cc[19]) */
+       speed_t c_ispeed;               /* input speed */
+       speed_t c_ospeed;               /* output speed */
+};
+
 /* Alpha has matching termios and ktermios */
 
 struct ktermios {
@@ -147,6 +160,7 @@ struct ktermios {
 #define B3000000  00034
 #define B3500000  00035
 #define B4000000  00036
+#define BOTHER    00037
 
 #define CSIZE  00001400
 #define   CS5  00000000
@@ -164,6 +178,9 @@ struct ktermios {
 #define CMSPAR   010000000000          /* mark or space (stick) parity */
 #define CRTSCTS          020000000000          /* flow control */
 
+#define CIBAUD 07600000
+#define IBSHIFT        16
+
 /* c_lflag bits */
 #define ISIG   0x00000080
 #define ICANON 0x00000100
diff --git a/arch/arm/configs/imx_v6_v7_defconfig 
b/arch/arm/configs/imx_v6_v7_defconfig
index 8ec4dbbb50b0..47c3fb8d4313 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -361,6 +361,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_JFFS2_FS=y
 CONFIG_UBIFS_FS=y
 CONFIG_NFS_FS=y
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 20436972537f..a670c70f4def 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1092,8 +1092,6 @@ static void cpu_init_hyp_mode(void *dummy)
 
        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
        __cpu_init_stage2();
-
-       kvm_arm_init_debug();
 }
 
 static void cpu_hyp_reinit(void)
@@ -1108,6 +1106,8 @@ static void cpu_hyp_reinit(void)
                if (__hyp_get_vectors() == hyp_default_vectors)
                        cpu_init_hyp_mode(NULL);
        }
+
+       kvm_arm_init_debug();
 }
 
 static void cpu_hyp_reset(void)
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h 
b/arch/mips/include/asm/mach-loongson64/irq.h
index d18c45c7c394..19ff9ce46c02 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -9,7 +9,7 @@
 #define MIPS_CPU_IRQ_BASE 56
 
 #define LOONGSON_UART_IRQ   (MIPS_CPU_IRQ_BASE + 2) /* UART */
-#define LOONGSON_HT1_IRQ    (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
+#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
 #define LOONGSON_TIMER_IRQ  (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
 
 #define LOONGSON_HT1_CFG_BASE          loongson_sysconf.ht_control_base
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 1723b1762297..e757f36cea6f 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -34,6 +34,9 @@ static void crash_shutdown_secondary(void *passed_regs)
        if (!cpu_online(cpu))
                return;
 
+       /* We won't be sent IPIs any more. */
+       set_cpu_online(cpu, false);
+
        local_irq_disable();
        if (!cpumask_test_cpu(cpu, &cpus_in_crash))
                crash_save_cpu(regs, cpu);
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 59725204105c..32b567e88b02 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -96,6 +96,9 @@ machine_kexec(struct kimage *image)
                        *ptr = (unsigned long) phys_to_virt(*ptr);
        }
 
+       /* Mark offline BEFORE disabling local irq. */
+       set_cpu_online(smp_processor_id(), false);
+
        /*
         * we do not want to be bothered.
         */
diff --git a/arch/mips/loongson64/loongson-3/irq.c 
b/arch/mips/loongson64/loongson-3/irq.c
index 8e7649088353..027f53e3bc81 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/loongson-3/irq.c
@@ -44,51 +44,8 @@ void mach_irq_dispatch(unsigned int pending)
        }
 }
 
-static struct irqaction cascade_irqaction = {
-       .handler = no_action,
-       .flags = IRQF_NO_SUSPEND,
-       .name = "cascade",
-};
-
-static inline void mask_loongson_irq(struct irq_data *d)
-{
-       clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
-       irq_disable_hazard();
-
-       /* Workaround: UART IRQ may deliver to any core */
-       if (d->irq == LOONGSON_UART_IRQ) {
-               int cpu = smp_processor_id();
-               int node_id = cpu_logical_map(cpu) / 
loongson_sysconf.cores_per_node;
-               int core_id = cpu_logical_map(cpu) % 
loongson_sysconf.cores_per_node;
-               u64 intenclr_addr = smp_group[node_id] |
-                       (u64)(&LOONGSON_INT_ROUTER_INTENCLR);
-               u64 introuter_lpc_addr = smp_group[node_id] |
-                       (u64)(&LOONGSON_INT_ROUTER_LPC);
-
-               *(volatile u32 *)intenclr_addr = 1 << 10;
-               *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
-       }
-}
-
-static inline void unmask_loongson_irq(struct irq_data *d)
-{
-       /* Workaround: UART IRQ may deliver to any core */
-       if (d->irq == LOONGSON_UART_IRQ) {
-               int cpu = smp_processor_id();
-               int node_id = cpu_logical_map(cpu) / 
loongson_sysconf.cores_per_node;
-               int core_id = cpu_logical_map(cpu) % 
loongson_sysconf.cores_per_node;
-               u64 intenset_addr = smp_group[node_id] |
-                       (u64)(&LOONGSON_INT_ROUTER_INTENSET);
-               u64 introuter_lpc_addr = smp_group[node_id] |
-                       (u64)(&LOONGSON_INT_ROUTER_LPC);
-
-               *(volatile u32 *)intenset_addr = 1 << 10;
-               *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
-       }
-
-       set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
-       irq_enable_hazard();
-}
+static inline void mask_loongson_irq(struct irq_data *d) { }
+static inline void unmask_loongson_irq(struct irq_data *d) { }
 
  /* For MIPS IRQs which shared by all cores */
 static struct irq_chip loongson_irq_chip = {
@@ -126,12 +83,11 @@ void __init mach_init_irq(void)
        mips_cpu_irq_init();
        init_i8259_irqs();
        irq_set_chip_and_handler(LOONGSON_UART_IRQ,
-                       &loongson_irq_chip, handle_level_irq);
-
-       /* setup HT1 irq */
-       setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
+                       &loongson_irq_chip, handle_percpu_irq);
+       irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
+                       &loongson_irq_chip, handle_percpu_irq);
 
-       set_c0_status(STATUSF_IP2 | STATUSF_IP6);
+       set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 014649be158d..2d6886f09ba3 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -116,8 +116,12 @@ static void pcibios_scanbus(struct pci_controller *hose)
        if (pci_has_flag(PCI_PROBE_ONLY)) {
                pci_bus_claim_resources(bus);
        } else {
+               struct pci_bus *child;
+
                pci_bus_size_bridges(bus);
                pci_bus_assign_resources(bus);
+               list_for_each_entry(child, &bus->children, node)
+                       pcie_bus_configure_settings(child);
        }
        pci_bus_add_devices(bus);
 }
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index 0fbd0a0e1cda..e88f4e7f39f3 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -83,7 +83,8 @@ END(hpmc_pim_data)
        .text
 
        .import intr_save, code
-ENTRY_CFI(os_hpmc)
+       .align 16
+ENTRY(os_hpmc)
 .os_hpmc:
 
        /*
@@ -299,11 +300,14 @@ os_hpmc_6:
 
        b .
        nop
-ENDPROC_CFI(os_hpmc)
+       .align 16       /* make function length multiple of 16 bytes */
 .os_hpmc_end:
 
 
        __INITRODATA
-       .export os_hpmc_size
+.globl os_hpmc_size
+       .align 4
+       .type   os_hpmc_size, @object
+       .size   os_hpmc_size, 4
 os_hpmc_size:
        .word .os_hpmc_end-.os_hpmc
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 12866ccb5694..5c2199857aa8 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -47,8 +47,10 @@ p_end:               .long   _end
 p_pstack:      .long   _platform_stack_top
 #endif
 
-       .weak   _zimage_start
        .globl  _zimage_start
+       /* Clang appears to require the .weak directive to be after the symbol
+        * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921  */
+       .weak   _zimage_start
 _zimage_start:
        .globl  _zimage_start_lib
 _zimage_start_lib:
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index e5bfbf62827a..8336b9016ca9 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char 
*buf, size_t len)
        int n = 0, l = 0;
        char buffer[128];
 
+       if (!pdn) {
+               pr_warn("EEH: Note: No error log for absent device.\n");
+               return 0;
+       }
+
        n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
                       edev->phb->global_number, pdn->busno,
                       PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 0b50019505a5..79cf21be8f6e 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -481,6 +481,9 @@ static void setup_page_sizes(void)
                for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
                        struct mmu_psize_def *def = &mmu_psize_defs[psize];
 
+                       if (!def->shift)
+                               continue;
+
                        if (tlb1ps & (1U << (def->shift - 10))) {
                                def->flags |= MMU_PAGE_SIZE_DIRECT;
 
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index ca20a892021b..6c6877d628ef 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -31,7 +31,7 @@ $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
              $(addprefix $(obj)/,$(host-progs))
        $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
 
-OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
+OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
 
 vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
diff --git a/arch/xtensa/include/asm/processor.h 
b/arch/xtensa/include/asm/processor.h
index b42d68bfe3cf..521c1e789e6e 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -24,7 +24,11 @@
 # error Linux requires the Xtensa Windowed Registers Option.
 #endif
 
-#define ARCH_SLAB_MINALIGN     XCHAL_DATA_WIDTH
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
 
 /*
  * User space process size: 1 GB.
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 23ce62e60435..27c8e07ace43 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -88,9 +88,12 @@ _SetupMMU:
        initialize_mmu
 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
        rsr     a2, excsave1
-       movi    a3, 0x08000000
+       movi    a3, XCHAL_KSEG_PADDR
+       bltu    a2, a3, 1f
+       sub     a2, a2, a3
+       movi    a3, XCHAL_KSEG_SIZE
        bgeu    a2, a3, 1f
-       movi    a3, 0xd0000000
+       movi    a3, XCHAL_KSEG_CACHED_VADDR
        add     a2, a2, a3
        wsr     a2, excsave1
 1:
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 31411fc82662..e8358ea0a9f9 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -109,6 +109,7 @@ SECTIONS
   .fixup   : { *(.fixup) }
 
   EXCEPTION_TABLE(16)
+  NOTES
   /* Data section */
 
   _sdata = .;
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 987e8f503522..ff4280800cd0 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2435,7 +2435,7 @@ static int cdrom_ioctl_select_disc(struct 
cdrom_device_info *cdi,
                return -ENOSYS;
 
        if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
-               if ((int)arg >= cdi->capacity)
+               if (arg >= cdi->capacity)
                        return -EINVAL;
        }
 
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 2bb2551c6245..a17a428fa706 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -133,6 +133,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
 {
        struct clk_pll *pll = to_clk_pll(hw);
 
+       if (!pll->div || !pll->mul)
+               return 0;
+
        return (parent_rate / pll->div) * (pll->mul + 1);
 }
 
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index fbaa84a33c46..14071a57c926 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -245,6 +245,36 @@ static const struct platform_device_id s2mps11_clk_id[] = {
 };
 MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
 
+#ifdef CONFIG_OF
+/*
+ * Device is instantiated through parent MFD device and device matching is done
+ * through platform_device_id.
+ *
+ * However if device's DT node contains proper clock compatible and driver is
+ * built as a module, then the *module* matching will be done trough DT 
aliases.
+ * This requires of_device_id table.  In the same time this will not change the
+ * actual *device* matching so do not add .of_match_table.
+ */
+static const struct of_device_id s2mps11_dt_match[] = {
+       {
+               .compatible = "samsung,s2mps11-clk",
+               .data = (void *)S2MPS11X,
+       }, {
+               .compatible = "samsung,s2mps13-clk",
+               .data = (void *)S2MPS13X,
+       }, {
+               .compatible = "samsung,s2mps14-clk",
+               .data = (void *)S2MPS14X,
+       }, {
+               .compatible = "samsung,s5m8767-clk",
+               .data = (void *)S5M8767X,
+       }, {
+               /* Sentinel */
+       },
+};
+MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
+#endif
+
 static struct platform_driver s2mps11_clk_driver = {
        .driver = {
                .name  = "s2mps11-clk",
diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
index 2a5015c736ce..43e82fa64422 100644
--- a/drivers/clk/hisilicon/reset.c
+++ b/drivers/clk/hisilicon/reset.c
@@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct 
platform_device *pdev)
                return NULL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       rstc->membase = devm_ioremap(&pdev->dev,
-                               res->start, resource_size(res));
-       if (!rstc->membase)
+       rstc->membase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(rstc->membase))
                return NULL;
 
        spin_lock_init(&rstc->lock);
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index e8075359366b..ebce5260068b 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw 
*hw,
 static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
 {
        struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
-       int num_parents = clk_hw_get_num_parents(hw);
        u32 val;
 
        val = clk_readl(ddrclk->reg_base +
                        ddrclk->mux_offset) >> ddrclk->mux_shift;
        val &= GENMASK(ddrclk->mux_width - 1, 0);
 
-       if (val >= num_parents)
-               return -EINVAL;
-
        return val;
 }
 
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 0efd36e483ab..a2b545fdee81 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -19,6 +19,13 @@
 DEFINE_RAW_SPINLOCK(i8253_lock);
 EXPORT_SYMBOL(i8253_lock);
 
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
 #ifdef CONFIG_CLKSRC_I8253
 /*
  * Since the PIT overflows every tick, its not very useful
@@ -108,8 +115,11 @@ static int pit_shutdown(struct clock_event_device *evt)
        raw_spin_lock(&i8253_lock);
 
        outb_p(0x30, PIT_MODE);
-       outb_p(0, PIT_CH0);
-       outb_p(0, PIT_CH0);
+
+       if (i8253_clear_counter_on_shutdown) {
+               outb_p(0, PIT_CH0);
+               outb_p(0, PIT_CH0);
+       }
 
        raw_spin_unlock(&i8253_lock);
        return 0;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c 
b/drivers/gpu/drm/drm_dp_mst_topology.c
index db7890cb254e..b59441d109a5 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1230,6 +1230,9 @@ static struct drm_dp_mst_branch 
*drm_dp_get_mst_branch_device(struct drm_dp_mst_
        mutex_lock(&mgr->lock);
        mstb = mgr->mst_primary;
 
+       if (!mstb)
+               goto out;
+
        for (i = 0; i < lct - 1; i++) {
                int shift = (i % 2) ? 0 : 4;
                int port_num = (rad[i / 2] >> shift) & 0xf;
diff --git a/drivers/gpu/drm/i915/intel_audio.c 
b/drivers/gpu/drm/i915/intel_audio.c
index 6c70a5bfd7d8..840522867436 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -76,6 +76,9 @@ static const struct {
 /* HDMI N/CTS table */
 #define TMDS_297M 297000
 #define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
 static const struct {
        int sample_rate;
        int clock;
@@ -96,6 +99,20 @@ static const struct {
        { 176400, TMDS_297M, 18816, 247500 },
        { 192000, TMDS_296M, 23296, 281250 },
        { 192000, TMDS_297M, 20480, 247500 },
+       { 44100, TMDS_593M, 8918, 937500 },
+       { 44100, TMDS_594M, 9408, 990000 },
+       { 48000, TMDS_593M, 5824, 562500 },
+       { 48000, TMDS_594M, 6144, 594000 },
+       { 32000, TMDS_593M, 5824, 843750 },
+       { 32000, TMDS_594M, 3072, 445500 },
+       { 88200, TMDS_593M, 17836, 937500 },
+       { 88200, TMDS_594M, 18816, 990000 },
+       { 96000, TMDS_593M, 11648, 562500 },
+       { 96000, TMDS_594M, 12288, 594000 },
+       { 176400, TMDS_593M, 35672, 937500 },
+       { 176400, TMDS_594M, 37632, 990000 },
+       { 192000, TMDS_593M, 23296, 562500 },
+       { 192000, TMDS_594M, 24576, 594000 },
 };
 
 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 67db1577ee49..fd11be6b23b9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -368,7 +368,8 @@ static u64 execlists_update_context(struct 
drm_i915_gem_request *rq)
 
        reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
 
-       /* True 32b PPGTT with dynamic page allocation: update PDP
+       /*
+        * True 32b PPGTT with dynamic page allocation: update PDP
         * registers and point the unallocated PDPs to scratch page.
         * PML4 is allocated during ppgtt init, so this is not needed
         * in 48-bit mode.
@@ -376,6 +377,17 @@ static u64 execlists_update_context(struct 
drm_i915_gem_request *rq)
        if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
                execlists_update_context_pdps(ppgtt, reg_state);
 
+       /*
+        * Make sure the context image is complete before we submit it to HW.
+        *
+        * Ostensibly, writes (including the WCB) should be flushed prior to
+        * an uncached write such as our mmio register access, the empirical
+        * evidence (esp. on Braswell) suggests that the WC write into memory
+        * may not be visible to the HW prior to the completion of the UC
+        * register write and that we may begin execution from the context
+        * before its image is complete leading to invalid PD chasing.
+        */
+       wmb();
        return ce->lrc_desc;
 }
 
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 
b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 7def04049498..6a0b25e0823f 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -273,6 +273,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
        }
 
        txn->last_pat->next_pa = 0;
+       /* ensure that the written descriptors are visible to DMM */
+       wmb();
+
+       /*
+        * NOTE: the wmb() above should be enough, but there seems to be a bug
+        * in OMAP's memory barrier implementation, which in some rare cases may
+        * cause the writes not to be observable after wmb().
+        */
+
+       /* read back to ensure the data is in RAM */
+       readl(&txn->last_pat->next_pa);
 
        /* write to PAT_DESCR to clear out any pending transaction */
        dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c 
b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 8c8cbe837e61..f2033ab36f37 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -478,6 +478,11 @@ static int rockchip_drm_platform_remove(struct 
platform_device *pdev)
        return 0;
 }
 
+static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+{
+       rockchip_drm_platform_remove(pdev);
+}
+
 static const struct of_device_id rockchip_drm_dt_ids[] = {
        { .compatible = "rockchip,display-subsystem", },
        { /* sentinel */ },
@@ -487,6 +492,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
 static struct platform_driver rockchip_drm_platform_driver = {
        .probe = rockchip_drm_platform_probe,
        .remove = rockchip_drm_platform_remove,
+       .shutdown = rockchip_drm_platform_shutdown,
        .driver = {
                .name = "rockchip-drm",
                .of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 5d9c2b03d83a..e83f8111a5fb 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -897,9 +897,6 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
 
        /* tvp5150 has some special limits */
        rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
-       rect.width = clamp_t(unsigned int, rect.width,
-                            TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
-                            TVP5150_H_MAX - rect.left);
        rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
 
        /* Calculate height based on current standard */
@@ -913,9 +910,16 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
        else
                hmax = TVP5150_V_MAX_OTHERS;
 
-       rect.height = clamp_t(unsigned int, rect.height,
+       /*
+        * alignments:
+        *  - width = 2 due to UYVY colorspace
+        *  - height, image = no special alignment
+        */
+       v4l_bound_align_image(&rect.width,
+                             TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
+                             TVP5150_H_MAX - rect.left, 1, &rect.height,
                              hmax - TVP5150_MAX_CROP_TOP - rect.top,
-                             hmax - rect.top);
+                             hmax - rect.top, 0, 0);
 
        tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
        tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
diff --git a/drivers/media/pci/cx23885/altera-ci.c 
b/drivers/media/pci/cx23885/altera-ci.c
index aaf4e46ff3e9..a0c1ff97f905 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -660,6 +660,10 @@ static int altera_hw_filt_init(struct altera_ci_config 
*config, int hw_filt_nr)
                }
 
                temp_int = append_internal(inter);
+               if (!temp_int) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
                inter->filts_used = 1;
                inter->dev = config->dev;
                inter->fpga_rw = config->fpga_rw;
@@ -694,6 +698,7 @@ static int altera_hw_filt_init(struct altera_ci_config 
*config, int hw_filt_nr)
                     __func__, ret);
 
        kfree(pid_filt);
+       kfree(inter);
 
        return ret;
 }
@@ -728,6 +733,10 @@ int altera_ci_init(struct altera_ci_config *config, int 
ci_nr)
                }
 
                temp_int = append_internal(inter);
+               if (!temp_int) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
                inter->cis_used = 1;
                inter->dev = config->dev;
                inter->fpga_rw = config->fpga_rw;
@@ -796,6 +805,7 @@ int altera_ci_init(struct altera_ci_config *config, int 
ci_nr)
        ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
 
        kfree(state);
+       kfree(inter);
 
        return ret;
 }
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 58329d2dacd1..75c816a5dded 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -196,7 +196,7 @@ comment "Disk-On-Chip Device Drivers"
 config MTD_DOCG3
        tristate "M-Systems Disk-On-Chip G3"
        select BCH
-       select BCH_CONST_PARAMS
+       select BCH_CONST_PARAMS if !MTD_NAND_BCH
        select BITREVERSE
        ---help---
          This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c 
b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 31f61a744d66..9473d12ce239 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -541,8 +541,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, 
u8 *string)
                for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
                        BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
                                   ETH_GSTRING_LEN));
-                       memcpy(string, bnad_net_stats_strings[i],
-                              ETH_GSTRING_LEN);
+                       strncpy(string, bnad_net_stats_strings[i],
+                               ETH_GSTRING_LEN);
                        string += ETH_GSTRING_LEN;
                }
                bmap = bna_tx_rid_mask(&bnad->bna);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c 
b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index e84574b1eae7..2a81f6d72140 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1826,11 +1826,12 @@ static void e1000_get_ethtool_stats(struct net_device 
*netdev,
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        int i;
-       char *p = NULL;
        const struct e1000_stats *stat = e1000_gstrings_stats;
 
        e1000_update_stats(adapter);
-       for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+       for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
+               char *p;
+
                switch (stat->type) {
                case NETDEV_STATS:
                        p = (char *)netdev + stat->stat_offset;
@@ -1841,15 +1842,13 @@ static void e1000_get_ethtool_stats(struct net_device 
*netdev,
                default:
                        WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
                                  stat->type, i);
-                       break;
+                       continue;
                }
 
                if (stat->sizeof_stat == sizeof(u64))
                        data[i] = *(u64 *)p;
                else
                        data[i] = *(u32 *)p;
-
-               stat++;
        }
 /* BUG_ON(i != E1000_STATS_LEN); */
 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c 
b/drivers/net/ethernet/intel/e1000/e1000_main.c
index dd112aa5cebb..39a09e18c1b7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -521,8 +521,6 @@ void e1000_down(struct e1000_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        u32 rctl, tctl;
 
-       netif_carrier_off(netdev);
-
        /* disable receives in the hardware */
        rctl = er32(RCTL);
        ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -538,6 +536,15 @@ void e1000_down(struct e1000_adapter *adapter)
        E1000_WRITE_FLUSH();
        msleep(10);
 
+       /* Set the carrier off after transmits have been disabled in the
+        * hardware, to avoid race conditions with e1000_watchdog() (which
+        * may be running concurrently to us, checking for the carrier
+        * bit to decide whether it should enable transmits again). Such
+        * a race condition would result into transmission being disabled
+        * in the hardware until the next IFF_DOWN+IFF_UP cycle.
+        */
+       netif_carrier_off(netdev);
+
        napi_disable(&adapter->napi);
 
        e1000_irq_disable(adapter);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 2db1f7a04baf..0b29ee9ee8c3 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -126,9 +126,14 @@ static int __init of_numa_parse_distance_map_v1(struct 
device_node *map)
                distance = of_read_number(matrix, 1);
                matrix++;
 
+               if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+                   (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+                       pr_err("Invalid distance[node%d -> node%d] = %d\n",
+                              nodea, nodeb, distance);
+                       return -EINVAL;
+               }
+
                numa_set_distance(nodea, nodeb, distance);
-               pr_debug("distance[node%d -> node%d] = %d\n",
-                        nodea, nodeb, distance);
 
                /* Set default distance of node B->A same as A->B */
                if (nodeb > nodea)
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e79f2a181ad2..b9ec4a16db1f 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
        tv64.tv_sec = rtc_tm_to_time64(&tm);
 
 #if BITS_PER_LONG == 32
-       if (tv64.tv_sec > INT_MAX)
+       if (tv64.tv_sec > INT_MAX) {
+               err = -ERANGE;
                goto err_read;
+       }
 #endif
 
        err = do_settimeofday64(&tv64);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b6d9e3104b89..2e3a70a6b300 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4894,7 +4894,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                                         * The next call disables the board
                                         * completely.
                                         */
-                                       ha->isp_ops->reset_adapter(vha);
+                                       qla2x00_abort_isp_cleanup(vha);
                                        vha->flags.online = 0;
                                        clear_bit(ISP_ABORT_RETRY,
                                            &vha->dpc_flags);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a1b01d66c9ab..bf29ad454118 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3580,10 +3580,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t 
loop_id,
        mcp->mb[0] = MBC_PORT_PARAMS;
        mcp->mb[1] = loop_id;
        mcp->mb[2] = BIT_0;
-       if (IS_CNA_CAPABLE(vha->hw))
-               mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
-       else
-               mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+       mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
        mcp->mb[9] = vha->vp_idx;
        mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_3|MBX_1|MBX_0;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 793395451982..ea6b62cece88 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -661,7 +661,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
                uart_write_wakeup(port);
 }
 
-static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
 {
        struct uart_port *port = &s->p[portno].port;
 
@@ -670,7 +670,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, 
int portno)
 
                iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
                if (iir & SC16IS7XX_IIR_NO_INT_BIT)
-                       break;
+                       return false;
 
                iir &= SC16IS7XX_IIR_ID_MASK;
 
@@ -692,16 +692,23 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, 
int portno)
                                            port->line, iir);
                        break;
                }
-       } while (1);
+       } while (0);
+       return true;
 }
 
 static void sc16is7xx_ist(struct kthread_work *ws)
 {
        struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
-       int i;
 
-       for (i = 0; i < s->devtype->nr_uart; ++i)
-               sc16is7xx_port_irq(s, i);
+       while (1) {
+               bool keep_polling = false;
+               int i;
+
+               for (i = 0; i < s->devtype->nr_uart; ++i)
+                       keep_polling |= sc16is7xx_port_irq(s, i);
+               if (!keep_polling)
+                       break;
+       }
 }
 
 static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index e6429d419b80..e6c4321d695c 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -354,7 +354,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int 
*line)
        mutex_lock(&tty_mutex);
        /* Search through the tty devices to look for a match */
        list_for_each_entry(p, &tty_drivers, tty_drivers) {
-               if (strncmp(name, p->name, len) != 0)
+               if (!len || strncmp(name, p->name, len) != 0)
                        continue;
                stp = str;
                if (*stp == ',')
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index bf36ac9aee41..11bb9a5c700d 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -325,7 +325,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
                else
                        cbaud += 15;
        }
-       return baud_table[cbaud];
+       return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
 }
 EXPORT_SYMBOL(tty_termios_baud_rate);
 
@@ -361,7 +361,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios 
*termios)
                else
                        cbaud += 15;
        }
-       return baud_table[cbaud];
+       return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
 #else
        return tty_termios_baud_rate(termios);
 #endif
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 9e36632b6f0e..17f9ad6fdfa5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -999,7 +999,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
                                prot_bytes = vhost32_to_cpu(vq, 
v_req_pi.pi_bytesin);
                        }
                        /*
-                        * Set prot_iter to data_iter, and advance past any
+                        * Set prot_iter to data_iter and truncate it to
+                        * prot_bytes, and advance data_iter past any
                         * preceeding prot_bytes that may be present.
                         *
                         * Also fix up the exp_data_len to reflect only the
@@ -1008,6 +1009,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct 
vhost_virtqueue *vq)
                        if (prot_bytes) {
                                exp_data_len -= prot_bytes;
                                prot_iter = data_iter;
+                               iov_iter_truncate(&prot_iter, prot_bytes);
                                iov_iter_advance(&data_iter, prot_bytes);
                        }
                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
diff --git a/drivers/video/fbdev/aty/mach64_accel.c 
b/drivers/video/fbdev/aty/mach64_accel.c
index 182bd680141f..e9dfe0e40b8b 100644
--- a/drivers/video/fbdev/aty/mach64_accel.c
+++ b/drivers/video/fbdev/aty/mach64_accel.c
@@ -126,7 +126,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info 
*info)
 
        /* set host attributes */
        wait_for_fifo(13, par);
-       aty_st_le32(HOST_CNTL, 0, par);
+       aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
 
        /* set pattern attributes */
        aty_st_le32(PAT_REG0, 0, par);
@@ -232,7 +232,8 @@ void atyfb_copyarea(struct fb_info *info, const struct 
fb_copyarea *area)
                rotation = rotation24bpp(dx, direction);
        }
 
-       wait_for_fifo(4, par);
+       wait_for_fifo(5, par);
+       aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
        aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par);
        aty_st_le32(SRC_Y_X, (sx << 16) | sy, par);
        aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par);
@@ -268,7 +269,8 @@ void atyfb_fillrect(struct fb_info *info, const struct 
fb_fillrect *rect)
                rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT);
        }
 
-       wait_for_fifo(3, par);
+       wait_for_fifo(4, par);
+       aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
        aty_st_le32(DP_FRGD_CLR, color, par);
        aty_st_le32(DP_SRC,
                    BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE,
@@ -283,7 +285,7 @@ void atyfb_imageblit(struct fb_info *info, const struct 
fb_image *image)
 {
        struct atyfb_par *par = (struct atyfb_par *) info->par;
        u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width;
-       u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix;
+       u32 pix_width, rotation = 0, src, mix;
 
        if (par->asleep)
                return;
@@ -295,8 +297,7 @@ void atyfb_imageblit(struct fb_info *info, const struct 
fb_image *image)
                return;
        }
 
-       pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
-       host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
+       pix_width = par->crtc.dp_pix_width;
 
        switch (image->depth) {
        case 1:
@@ -344,7 +345,7 @@ void atyfb_imageblit(struct fb_info *info, const struct 
fb_image *image)
                 * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit
                 * this hwaccelerated triple has an issue with not aligned data
                 */
-               if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
+               if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 
== 0)
                        pix_width |= DP_HOST_TRIPLE_EN;
        }
 
@@ -369,19 +370,18 @@ void atyfb_imageblit(struct fb_info *info, const struct 
fb_image *image)
                mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D;
        }
 
-       wait_for_fifo(6, par);
-       aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par);
+       wait_for_fifo(5, par);
        aty_st_le32(DP_PIX_WIDTH, pix_width, par);
        aty_st_le32(DP_MIX, mix, par);
        aty_st_le32(DP_SRC, src, par);
-       aty_st_le32(HOST_CNTL, host_cntl, par);
+       aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
        aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | 
rotation, par);
 
        draw_rect(dx, dy, width, image->height, par);
        src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
 
        /* manual triple each pixel */
-       if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) 
{
+       if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width 
& DP_HOST_TRIPLE_EN)) {
                int inbit, outbit, mult24, byte_id_in_dword, width;
                u8 *pbitmapin = (u8*)image->data, *pbitmapout;
                u32 hostdword;
@@ -414,7 +414,7 @@ void atyfb_imageblit(struct fb_info *info, const struct 
fb_image *image)
                                }
                        }
                        wait_for_fifo(1, par);
-                       aty_st_le32(HOST_DATA0, hostdword, par);
+                       aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par);
                }
        } else {
                u32 *pbitmap, dwords = (src_bytes + 3) / 4;
@@ -423,8 +423,4 @@ void atyfb_imageblit(struct fb_info *info, const struct 
fb_image *image)
                        aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), 
par);
                }
        }
-
-       /* restore pix_width */
-       wait_for_fifo(1, par);
-       aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
 }
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index d7b78d531e63..398a3eddb2df 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -204,6 +204,14 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, 
struct file_lock *fl)
                        break;
                if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
                        break;
+               /*
+                * p9_client_lock_dotl overwrites flock.client_id with the
+                * server message, free and reuse the client name
+                */
+               if (flock.client_id != fid->clnt->name) {
+                       kfree(flock.client_id);
+                       flock.client_id = fid->clnt->name;
+               }
        }
 
        /* map 9p status to VFS status */
@@ -235,6 +243,8 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, 
struct file_lock *fl)
                locks_lock_file_wait(filp, fl);
                fl->fl_type = fl_type;
        }
+       if (flock.client_id != fid->clnt->name)
+               kfree(flock.client_id);
 out:
        return res;
 }
@@ -269,7 +279,7 @@ static int v9fs_file_getlock(struct file *filp, struct 
file_lock *fl)
 
        res = p9_client_getlock_dotl(fid, &glock);
        if (res < 0)
-               return res;
+               goto out;
        /* map 9p lock type to os lock type */
        switch (glock.type) {
        case P9_LOCK_TYPE_RDLCK:
@@ -290,7 +300,9 @@ static int v9fs_file_getlock(struct file *filp, struct 
file_lock *fl)
                        fl->fl_end = glock.start + glock.length - 1;
                fl->fl_pid = glock.proc_id;
        }
-       kfree(glock.client_id);
+out:
+       if (glock.client_id != fid->clnt->name)
+               kfree(glock.client_id);
        return res;
 }
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17e143d91fa9..1b1a9e35e082 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1548,12 +1548,11 @@ static noinline int run_delalloc_nocow(struct inode 
*inode,
        }
        btrfs_release_path(path);
 
-       if (cur_offset <= end && cow_start == (u64)-1) {
+       if (cur_offset <= end && cow_start == (u64)-1)
                cow_start = cur_offset;
-               cur_offset = end;
-       }
 
        if (cow_start != (u64)-1) {
+               cur_offset = end;
                ret = cow_file_range(inode, locked_page, cow_start, end, end,
                                     page_started, nr_written, 1, NULL);
                if (ret)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index cbf512b64597..96ad2778405b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3911,9 +3911,17 @@ static noinline int btrfs_clone_files(struct file *file, 
struct file *file_src,
                goto out_unlock;
        if (len == 0)
                olen = len = src->i_size - off;
-       /* if we extend to eof, continue to block boundary */
-       if (off + len == src->i_size)
+       /*
+        * If we extend to eof, continue to block boundary if and only if the
+        * destination end offset matches the destination file's size, otherwise
+        * we would be corrupting data by placing the eof block into the middle
+        * of a file.
+        */
+       if (off + len == src->i_size) {
+               if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+                       goto out_unlock;
                len = ALIGN(src->i_size, bs) - off;
+       }
 
        if (len == 0) {
                ret = 0;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1f754336f801..30d9d9e7057d 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1077,8 +1077,12 @@ static struct dentry *splice_dentry(struct dentry *dn, 
struct inode *in)
        if (IS_ERR(realdn)) {
                pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
                       PTR_ERR(realdn), dn, in, ceph_vinop(in));
-               dput(dn);
-               dn = realdn; /* note realdn contains the error */
+               dn = realdn;
+               /*
+                * Caller should release 'dn' in the case of error.
+                * If 'req->r_dentry' is passed to this function,
+                * caller should leave 'req->r_dentry' untouched.
+                */
                goto out;
        } else if (realdn) {
                dout("dn %p (%d) spliced with %p (%d) "
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 314b4edac72b..fea6db1ee065 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * 
buffer, int length)
 
                /* back up enough to print this bus id with '/' */
                length -= cur;
-               strncpy(buffer + length,config_item_name(p),cur);
+               memcpy(buffer + length, config_item_name(p), cur);
                *(buffer + --length) = '/';
        }
 }
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index bc727c393a89..3c3757ee11f0 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -124,6 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct 
inode *inode,
        if (!is_dx_block && type == INDEX) {
                ext4_error_inode(inode, func, line, block,
                       "directory leaf block found instead of index block");
+               brelse(bh);
                return ERR_PTR(-EFSCORRUPTED);
        }
        if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -2842,7 +2843,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
                        list_del_init(&EXT4_I(inode)->i_orphan);
                        mutex_unlock(&sbi->s_orphan_lock);
                }
-       }
+       } else
+               brelse(iloc.bh);
+
        jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
        jbd_debug(4, "orphan inode %lu will point to %d\n",
                        inode->i_ino, NEXT_ORPHAN(inode));
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 1da301ee78ce..9be605c63ae1 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -442,16 +442,18 @@ static int set_flexbg_block_bitmap(struct super_block 
*sb, handle_t *handle,
 
                BUFFER_TRACE(bh, "get_write_access");
                err = ext4_journal_get_write_access(handle, bh);
-               if (err)
+               if (err) {
+                       brelse(bh);
                        return err;
+               }
                ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
                           block - start, count2);
                ext4_set_bits(bh->b_data, block - start, count2);
 
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (unlikely(err))
                        return err;
-               brelse(bh);
        }
 
        return 0;
@@ -588,7 +590,6 @@ static int setup_new_flex_group_blocks(struct super_block 
*sb,
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
-                       bh = NULL;
                        goto out;
                }
                overhead = ext4_group_overhead_blocks(sb, group);
@@ -600,9 +601,9 @@ static int setup_new_flex_group_blocks(struct super_block 
*sb,
                ext4_mark_bitmap_end(group_data[i].blocks_count,
                                     sb->s_blocksize * 8, bh->b_data);
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (err)
                        goto out;
-               brelse(bh);
 
 handle_ib:
                if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -617,18 +618,16 @@ static int setup_new_flex_group_blocks(struct super_block 
*sb,
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
-                       bh = NULL;
                        goto out;
                }
 
                ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
                                     sb->s_blocksize * 8, bh->b_data);
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (err)
                        goto out;
-               brelse(bh);
        }
-       bh = NULL;
 
        /* Mark group tables in block bitmap */
        for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -659,7 +658,6 @@ static int setup_new_flex_group_blocks(struct super_block 
*sb,
        }
 
 out:
-       brelse(bh);
        err2 = ext4_journal_stop(handle);
        if (err2 && !err)
                err = err2;
@@ -846,6 +844,7 @@ static int add_new_gdb(handle_t *handle, struct inode 
*inode,
        err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
        if (unlikely(err)) {
                ext4_std_error(sb, err);
+               iloc.bh = NULL;
                goto exit_inode;
        }
        brelse(dind);
@@ -897,6 +896,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
                                     sizeof(struct buffer_head *),
                                     GFP_NOFS);
        if (!n_group_desc) {
+               brelse(gdb_bh);
                err = -ENOMEM;
                ext4_warning(sb, "not enough memory for %lu groups",
                             gdb_num + 1);
@@ -912,8 +912,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
        kvfree(o_group_desc);
        BUFFER_TRACE(gdb_bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, gdb_bh);
-       if (unlikely(err))
-               brelse(gdb_bh);
        return err;
 }
 
@@ -1095,8 +1093,10 @@ static void update_backups(struct super_block *sb, 
sector_t blk_off, char *data,
                           backup_block, backup_block -
                           ext4_group_first_block_no(sb, group));
                BUFFER_TRACE(bh, "get_write_access");
-               if ((err = ext4_journal_get_write_access(handle, bh)))
+               if ((err = ext4_journal_get_write_access(handle, bh))) {
+                       brelse(bh);
                        break;
+               }
                lock_buffer(bh);
                memcpy(bh->b_data, data, size);
                if (rest)
@@ -1991,7 +1991,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t 
n_blocks_count)
 
        err = ext4_alloc_flex_bg_array(sb, n_group + 1);
        if (err)
-               return err;
+               goto out;
 
        err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
        if (err)
@@ -2027,6 +2027,10 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t 
n_blocks_count)
                n_blocks_count_retry = 0;
                free_flex_gd(flex_gd);
                flex_gd = NULL;
+               if (resize_inode) {
+                       iput(resize_inode);
+                       resize_inode = NULL;
+               }
                goto retry;
        }
 
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index f88d4804c3a8..75177eb498ed 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3897,6 +3897,14 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
        sbi->s_groups_count = blocks_count;
        sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
                        (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+               ret = -EINVAL;
+               goto failed_mount;
+       }
        db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
                   EXT4_DESC_PER_BLOCK(sb);
        if (ext4_has_feature_meta_bg(sb)) {
@@ -3916,14 +3924,6 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
                ret = -ENOMEM;
                goto failed_mount;
        }
-       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
-           le32_to_cpu(es->s_inodes_count)) {
-               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
-                        le32_to_cpu(es->s_inodes_count),
-                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
-               ret = -EINVAL;
-               goto failed_mount;
-       }
 
        bgl_lock_init(sbi->s_blockgroup_lock);
 
@@ -4305,6 +4305,7 @@ static int ext4_fill_super(struct super_block *sb, void 
*data, int silent)
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
 failed_mount5:
        ext4_ext_release(sb);
        ext4_release_system_zone(sb);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 38385bcb9148..22f765069655 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1221,6 +1221,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode 
*inode, int name_index,
                        error = ext4_xattr_block_set(handle, inode, &i, &bs);
                } else if (error == -ENOSPC) {
                        if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+                               brelse(bs.bh);
+                               bs.bh = NULL;
                                error = ext4_xattr_block_find(inode, &i, &bs);
                                if (error)
                                        goto cleanup;
@@ -1391,6 +1393,8 @@ static int ext4_xattr_move_to_block(handle_t *handle, 
struct inode *inode,
        kfree(buffer);
        if (is)
                brelse(is->iloc.bh);
+       if (bs)
+               brelse(bs->bh);
        kfree(is);
        kfree(bs);
 
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c94bab6103f5..b4253181b5d4 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -383,12 +383,19 @@ static void request_end(struct fuse_conn *fc, struct 
fuse_req *req)
        if (test_bit(FR_BACKGROUND, &req->flags)) {
                spin_lock(&fc->lock);
                clear_bit(FR_BACKGROUND, &req->flags);
-               if (fc->num_background == fc->max_background)
+               if (fc->num_background == fc->max_background) {
                        fc->blocked = 0;
-
-               /* Wake up next waiter, if any */
-               if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
                        wake_up(&fc->blocked_waitq);
+               } else if (!fc->blocked) {
+                       /*
+                        * Wake up next waiter, if any.  It's okay to use
+                        * waitqueue_active(), as we've already synced up
+                        * fc->blocked with waiters with the wake_up() call
+                        * above.
+                        */
+                       if (waitqueue_active(&fc->blocked_waitq))
+                               wake_up(&fc->blocked_waitq);
+               }
 
                if (fc->num_background == fc->congestion_threshold &&
                    fc->connected && fc->bdi_initialized) {
@@ -1303,12 +1310,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, 
struct file *file,
                goto out_end;
        }
        list_move_tail(&req->list, &fpq->processing);
-       spin_unlock(&fpq->lock);
+       __fuse_get_request(req);
        set_bit(FR_SENT, &req->flags);
+       spin_unlock(&fpq->lock);
        /* matches barrier in request_wait_answer() */
        smp_mb__after_atomic();
        if (test_bit(FR_INTERRUPTED, &req->flags))
                queue_interrupt(fiq, req);
+       fuse_put_request(fc, req);
 
        return reqsize;
 
@@ -1706,8 +1715,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct 
inode *inode,
        req->in.args[1].size = total_len;
 
        err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
-       if (err)
+       if (err) {
                fuse_retrieve_end(fc, req);
+               fuse_put_request(fc, req);
+       }
 
        return err;
 }
@@ -1866,16 +1877,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
 
        /* Is it an interrupt reply? */
        if (req->intr_unique == oh.unique) {
+               __fuse_get_request(req);
                spin_unlock(&fpq->lock);
 
                err = -EINVAL;
-               if (nbytes != sizeof(struct fuse_out_header))
+               if (nbytes != sizeof(struct fuse_out_header)) {
+                       fuse_put_request(fc, req);
                        goto err_finish;
+               }
 
                if (oh.error == -ENOSYS)
                        fc->no_interrupt = 1;
                else if (oh.error == -EAGAIN)
                        queue_interrupt(&fc->iq, req);
+               fuse_put_request(fc, req);
 
                fuse_copy_finish(cs);
                return nbytes;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4408abf6675b..1cd46e667e3d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2900,10 +2900,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter 
*iter)
        }
 
        if (io->async) {
+               bool blocking = io->blocking;
+
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
 
                /* we have a non-extending, async request, so return */
-               if (!io->blocking)
+               if (!blocking)
                        return -EIOCBQUEUED;
 
                wait_for_completion(&wait);
diff --git a/fs/namespace.c b/fs/namespace.c
index 0a9e766b4087..41f906a6f5d9 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1599,8 +1599,13 @@ static int do_umount(struct mount *mnt, int flags)
 
        namespace_lock();
        lock_mount_hash();
-       event++;
 
+       /* Recheck MNT_LOCKED with the locks held */
+       retval = -EINVAL;
+       if (mnt->mnt.mnt_flags & MNT_LOCKED)
+               goto out;
+
+       event++;
        if (flags & MNT_DETACH) {
                if (!list_empty(&mnt->mnt_list))
                        umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1614,6 +1619,7 @@ static int do_umount(struct mount *mnt, int flags)
                        retval = 0;
                }
        }
+out:
        unlock_mount_hash();
        namespace_unlock();
        return retval;
@@ -1704,7 +1710,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
                goto dput_and_out;
        if (!check_mnt(mnt))
                goto dput_and_out;
-       if (mnt->mnt.mnt_flags & MNT_LOCKED)
+       if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
                goto dput_and_out;
        retval = -EPERM;
        if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1782,8 +1788,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry 
*dentry,
                for (s = r; s; s = next_mnt(s, r)) {
                        if (!(flag & CL_COPY_UNBINDABLE) &&
                            IS_MNT_UNBINDABLE(s)) {
-                               s = skip_mnt_tree(s);
-                               continue;
+                               if (s->mnt.mnt_flags & MNT_LOCKED) {
+                                       /* Both unbindable and locked. */
+                                       q = ERR_PTR(-EPERM);
+                                       goto out;
+                               } else {
+                                       s = skip_mnt_tree(s);
+                                       continue;
+                               }
                        }
                        if (!(flag & CL_COPY_MNT_NS_FILE) &&
                            is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1836,7 +1848,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        namespace_lock();
        lock_mount_hash();
-       umount_tree(real_mount(mnt), UMOUNT_SYNC);
+       umount_tree(real_mount(mnt), 0);
        unlock_mount_hash();
        namespace_unlock();
 }
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index e9495516527d..66985a6a7047 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1016,6 +1016,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct 
nfsd4_compound_state *cstate,
 {
        __be32 status;
 
+       if (!cstate->save_fh.fh_dentry)
+               return nfserr_nofilehandle;
+
        status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
                                            src_stateid, RD_STATE, src, NULL);
        if (status) {
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 3ecb9f337b7d..20e610419501 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1896,8 +1896,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
                                /* On error, skip the f_pos to the
                                   next block. */
                                ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 
1;
-                               brelse(bh);
-                               continue;
+                               break;
                        }
                        if (le64_to_cpu(de->inode)) {
                                unsigned char d_type = DT_UNKNOWN;
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 1816c5e26581..a8a574897d3c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -77,7 +77,13 @@ struct ceph_options {
 
 #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
 #define CEPH_MSG_MAX_MIDDLE_LEN        (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN  (16*1024*1024)
+
+/*
+ * Handle the largest possible rbd object in one message.
+ * There is no limit on the size of cephfs objects, but it has to obey
+ * rsize and wsize mount options anyway.
+ */
+#define CEPH_MSG_MAX_DATA_LEN  (32*1024*1024)
 
 #define CEPH_AUTH_NAME_DEFAULT   "guest"
 
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 48c76d612d40..b699d59d0f4f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -109,6 +109,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                        unsigned long addr, unsigned long sz);
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write);
 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
@@ -131,6 +133,18 @@ static inline unsigned long hugetlb_total_pages(void)
        return 0;
 }
 
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+                                               pte_t *ptep)
+{
+       return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+                               struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
+
 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w)  ({ BUG(); 0; })
 #define follow_huge_addr(mm, addr, write)      ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
 #define PIT_LATCH      ((PIT_TICK_RATE + HZ/2) / HZ)
 
 extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
 extern struct clock_event_device i8253_clockevent;
 extern void clockevent_i8253_init(bool oneshot);
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 493d07931ea5..11a5a46ce72b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2187,6 +2187,12 @@ static inline struct vm_area_struct 
*find_exact_vma(struct mm_struct *mm,
        return vma;
 }
 
+static inline bool range_in_vma(struct vm_area_struct *vma,
+                               unsigned long start, unsigned long end)
+{
+       return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
 void vma_set_page_prot(struct vm_area_struct *vma);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 50d1d5c25deb..60e108c5c173 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -451,8 +451,7 @@ void __ubsan_handle_shift_out_of_bounds(struct 
shift_out_of_bounds_data *data,
 EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
 
 
-void __noreturn
-__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
 {
        unsigned long flags;
 
diff --git a/mm/gup.c b/mm/gup.c
index be4ccddac26f..d71da7216c6e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1122,8 +1122,6 @@ int __mm_populate(unsigned long start, unsigned long len, 
int ignore_errors)
        int locked = 0;
        long ret = 0;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
        for (nstart = start; nstart < end; nstart = nend) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9c566e4b06ce..5e3a4db36310 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3220,7 +3220,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
 {
-       pte_t *src_pte, *dst_pte, entry;
+       pte_t *src_pte, *dst_pte, entry, dst_entry;
        struct page *ptepage;
        unsigned long addr;
        int cow;
@@ -3248,15 +3248,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, 
struct mm_struct *src,
                        break;
                }
 
-               /* If the pagetables are shared don't copy or take references */
-               if (dst_pte == src_pte)
+               /*
+                * If the pagetables are shared don't copy or take references.
+                * dst_pte == src_pte is the common case of src/dest sharing.
+                *
+                * However, src could have 'unshared' and dst shares with
+                * another vma.  If dst_pte !none, this implies sharing.
+                * Check here before taking page table lock, and once again
+                * after taking the lock below.
+                */
+               dst_entry = huge_ptep_get(dst_pte);
+               if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
                        continue;
 
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                entry = huge_ptep_get(src_pte);
-               if (huge_pte_none(entry)) { /* skip none entry */
+               dst_entry = huge_ptep_get(dst_pte);
+               if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
+                       /*
+                        * Skip if src entry none.  Also, skip in the
+                        * unlikely case dst entry !none as this implies
+                        * sharing with another vma.
+                        */
                        ;
                } else if (unlikely(is_hugetlb_entry_migration(entry) ||
                                    is_hugetlb_entry_hwpoisoned(entry))) {
@@ -4318,12 +4333,40 @@ static bool vma_shareable(struct vm_area_struct *vma, 
unsigned long addr)
        /*
         * check on proper vm_flags and page table alignment
         */
-       if (vma->vm_flags & VM_MAYSHARE &&
-           vma->vm_start <= base && end <= vma->vm_end)
+       if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
                return true;
        return false;
 }
 
+/*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+       unsigned long check_addr = *start;
+
+       if (!(vma->vm_flags & VM_MAYSHARE))
+               return;
+
+       for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+               unsigned long a_start = check_addr & PUD_MASK;
+               unsigned long a_end = a_start + PUD_SIZE;
+
+               /*
+                * If sharing is possible, adjust start/end if necessary.
+                */
+               if (range_in_vma(vma, a_start, a_end)) {
+                       if (a_start < *start)
+                               *start = a_start;
+                       if (a_end > *end)
+                               *end = a_end;
+               }
+       }
+}
+
 /*
  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  * and returns the corresponding pte. While this is not necessary for the
@@ -4420,6 +4463,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long 
*addr, pte_t *ptep)
 {
        return 0;
 }
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
 #define want_pmd_share()       (0)
 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 69c4a0c92ebb..e21d9b44247b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2027,8 +2027,36 @@ alloc_pages_vma(gfp_t gfp, int order, struct 
vm_area_struct *vma,
                nmask = policy_nodemask(gfp, pol);
                if (!nmask || node_isset(hpage_node, *nmask)) {
                        mpol_cond_put(pol);
-                       page = __alloc_pages_node(hpage_node,
-                                               gfp | __GFP_THISNODE, order);
+                       /*
+                        * We cannot invoke reclaim if __GFP_THISNODE
+                        * is set. Invoking reclaim with
+                        * __GFP_THISNODE set, would cause THP
+                        * allocations to trigger heavy swapping
+                        * despite there may be tons of free memory
+                        * (including potentially plenty of THP
+                        * already available in the buddy) on all the
+                        * other NUMA nodes.
+                        *
+                        * At most we could invoke compaction when
+                        * __GFP_THISNODE is set (but we would need to
+                        * refrain from invoking reclaim even if
+                        * compaction returned COMPACT_SKIPPED because
+                        * there wasn't not enough memory to succeed
+                        * compaction). For now just avoid
+                        * __GFP_THISNODE instead of limiting the
+                        * allocation path to a strict and single
+                        * compaction invocation.
+                        *
+                        * Supposedly if direct reclaim was enabled by
+                        * the caller, the app prefers THP regardless
+                        * of the node it comes from so this would be
+                        * more desiderable behavior than only
+                        * providing THP originated from the local
+                        * node in such case.
+                        */
+                       if (!(gfp & __GFP_DIRECT_RECLAIM))
+                               gfp |= __GFP_THISNODE;
+                       page = __alloc_pages_node(hpage_node, gfp, order);
                        goto out;
                }
        }
diff --git a/mm/mmap.c b/mm/mmap.c
index aa97074a4a99..283755645d17 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2876,21 +2876,15 @@ static inline void verify_mm_writelocked(struct 
mm_struct *mm)
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-static int do_brk(unsigned long addr, unsigned long request)
+static int do_brk(unsigned long addr, unsigned long len)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       unsigned long flags, len;
+       unsigned long flags;
        struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
 
-       len = PAGE_ALIGN(request);
-       if (len < request)
-               return -ENOMEM;
-       if (!len)
-               return 0;
-
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2959,12 +2953,19 @@ static int do_brk(unsigned long addr, unsigned long 
request)
        return 0;
 }
 
-int vm_brk(unsigned long addr, unsigned long len)
+int vm_brk(unsigned long addr, unsigned long request)
 {
        struct mm_struct *mm = current->mm;
+       unsigned long len;
        int ret;
        bool populate;
 
+       len = PAGE_ALIGN(request);
+       if (len < request)
+               return -ENOMEM;
+       if (!len)
+               return 0;
+
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index 94488b0362f8..a7276d8c96f3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1476,6 +1476,9 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
        pte_t pteval;
        spinlock_t *ptl;
        int ret = SWAP_AGAIN;
+       unsigned long sh_address;
+       bool pmd_sharing_possible = false;
+       unsigned long spmd_start, spmd_end;
        struct rmap_private *rp = arg;
        enum ttu_flags flags = rp->flags;
 
@@ -1491,6 +1494,32 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
                        goto out;
        }
 
+       /*
+        * Only use the range_start/end mmu notifiers if huge pmd sharing
+        * is possible.  In the normal case, mmu_notifier_invalidate_page
+        * is sufficient as we only unmap a page.  However, if we unshare
+        * a pmd, we will unmap a PUD_SIZE range.
+        */
+       if (PageHuge(page)) {
+               spmd_start = address;
+               spmd_end = spmd_start + vma_mmu_pagesize(vma);
+
+               /*
+                * Check if pmd sharing is possible.  If possible, we could
+                * unmap a PUD_SIZE range.  spmd_start/spmd_end will be
+                * modified if sharing is possible.
+                */
+               adjust_range_if_pmd_sharing_possible(vma, &spmd_start,
+                                                               &spmd_end);
+               if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) {
+                       sh_address = address;
+
+                       pmd_sharing_possible = true;
+                       mmu_notifier_invalidate_range_start(vma->vm_mm,
+                                                       spmd_start, spmd_end);
+               }
+       }
+
        pte = page_check_address(page, mm, address, &ptl,
                                 PageTransCompound(page));
        if (!pte)
@@ -1524,6 +1553,30 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
                }
        }
 
+       /*
+        * Call huge_pmd_unshare to potentially unshare a huge pmd.  Pass
+        * sh_address as it will be modified if unsharing is successful.
+        */
+       if (PageHuge(page) && huge_pmd_unshare(mm, &sh_address, pte)) {
+               /*
+                * huge_pmd_unshare unmapped an entire PMD page.  There is
+                * no way of knowing exactly which PMDs may be cached for
+                * this mm, so flush them all.  spmd_start/spmd_end cover
+                * this PUD_SIZE range.
+                */
+               flush_cache_range(vma, spmd_start, spmd_end);
+               flush_tlb_range(vma, spmd_start, spmd_end);
+
+               /*
+                * The ref count of the PMD page was dropped which is part
+                * of the way map counting is done for shared PMDs.  When
+                * there is no other sharing, huge_pmd_unshare returns false
+                * and we will unmap the actual page and drop map count
+                * to zero.
+                */
+               goto out_unmap;
+       }
+
        /* Nuke the page table entry. */
        flush_cache_page(vma, address, page_to_pfn(page));
        if (should_defer_flush(mm, flags)) {
@@ -1621,6 +1674,9 @@ static int try_to_unmap_one(struct page *page, struct 
vm_area_struct *vma,
        if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
                mmu_notifier_invalidate_page(mm, address);
 out:
+       if (pmd_sharing_possible)
+               mmu_notifier_invalidate_range_end(vma->vm_mm,
+                                                       spmd_start, spmd_end);
        return ret;
 }
 
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 16d287565987..145f80518064 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const 
char *fmt, ...);
 void p9stat_free(struct p9_wstat *stbuf)
 {
        kfree(stbuf->name);
+       stbuf->name = NULL;
        kfree(stbuf->uid);
+       stbuf->uid = NULL;
        kfree(stbuf->gid);
+       stbuf->gid = NULL;
        kfree(stbuf->muid);
+       stbuf->muid = NULL;
        kfree(stbuf->extension);
+       stbuf->extension = NULL;
 }
 EXPORT_SYMBOL(p9stat_free);
 
diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
index db3586ba1211..19b3f4fbea52 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -918,19 +918,22 @@ static unsigned int early_drop_list(struct net *net,
        return drops;
 }
 
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
 {
-       unsigned int i;
+       unsigned int i, bucket;
 
        for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
                struct hlist_nulls_head *ct_hash;
-               unsigned int hash, hsize, drops;
+               unsigned int hsize, drops;
 
                rcu_read_lock();
                nf_conntrack_get_ht(&ct_hash, &hsize);
-               hash = reciprocal_scale(_hash++, hsize);
+               if (!i)
+                       bucket = reciprocal_scale(hash, hsize);
+               else
+                       bucket = (bucket + 1) % hsize;
 
-               drops = early_drop_list(net, &ct_hash[hash]);
+               drops = early_drop_list(net, &ct_hash[bucket]);
                rcu_read_unlock();
 
                if (drops) {
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 7f1071e103ca..1b38fc486351 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t 
len)
                WARN_ON_ONCE(xdr->iov);
                return;
        }
-       if (fraglen) {
+       if (fraglen)
                xdr->end = head->iov_base + head->iov_len;
-               xdr->page_ptr--;
-       }
        /* (otherwise assume xdr->end is already set) */
+       xdr->page_ptr--;
        head->iov_len = len;
        buf->len = len;
        xdr->p = head->iov_base + head->iov_len;
diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c 
b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
index 2bda81c7bf23..df1d7d4b1c89 100644
--- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
@@ -98,7 +98,7 @@ void texasr(void *in)
 
 int test_tmspr()
 {
-       pthread_t       thread;
+       pthread_t       *thread;
        int             thread_num;
        unsigned long   i;
 
@@ -107,21 +107,28 @@ int test_tmspr()
        /* To cause some context switching */
        thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN);
 
+       thread = malloc(thread_num * sizeof(pthread_t));
+       if (thread == NULL)
+               return EXIT_FAILURE;
+
        /* Test TFIAR and TFHAR */
-       for (i = 0 ; i < thread_num ; i += 2){
-               if (pthread_create(&thread, NULL, (void*)tfiar_tfhar, (void 
*)i))
+       for (i = 0; i < thread_num; i += 2) {
+               if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar,
+                                  (void *)i))
                        return EXIT_FAILURE;
        }
-       if (pthread_join(thread, NULL) != 0)
-               return EXIT_FAILURE;
-
        /* Test TEXASR */
-       for (i = 0 ; i < thread_num ; i++){
-               if (pthread_create(&thread, NULL, (void*)texasr, (void *)i))
+       for (i = 1; i < thread_num; i += 2) {
+               if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i))
                        return EXIT_FAILURE;
        }
-       if (pthread_join(thread, NULL) != 0)
-               return EXIT_FAILURE;
+
+       for (i = 0; i < thread_num; i++) {
+               if (pthread_join(thread[i], NULL) != 0)
+                       return EXIT_FAILURE;
+       }
+
+       free(thread);
 
        if (passed)
                return 0;

Reply via email to