diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
index b2bdea1953e6..9abe55280cf6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3324,6 +3324,13 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
        spia_pedr=
        spia_peddr=
 
+       stack_guard_gap=        [MM]
+                       override the default stack gap protection. The value
+                       is in page units and it defines how many pages prior
+                       to (for stacks growing down) resp. after (for stacks
+                       growing up) the main stack are reserved for no other
+                       mapping. Default value is 256 pages.
+
        stacktrace      [FTRACE]
                        Enabled the stack tracer on boot up.
 
diff --git a/Makefile b/Makefile
index 8ad497f954d7..e44d3f45b72a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 18
-SUBLEVEL = 57
+SUBLEVEL = 58
 EXTRAVERSION =
 NAME = Diseased Newt
 
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 2e06d56e987b..cf4ae6958240 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5e85ed371364..8f9d1cf505dd 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
                        addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start))
+                               (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index 836f14707a62..efa59f1f8022 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, 
unsigned long addr, unsi
                addr = PAGE_ALIGN(addr);
                vma = find_vma(current->mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        goto success;
        }
 
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index f1baadd56e82..9be924f08f34 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct 
file *filp,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5aba01ac457f..4dda73c44fee 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, 
unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        unsigned long task_size = TASK_SIZE;
        int do_color_align, last_mmap;
        struct vm_unmapped_area_info info;
@@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, 
unsigned long addr,
                else
                        addr = PAGE_ALIGN(addr);
 
-               vma = find_vma(mm, addr);
+               vma = find_vma_prev(mm, addr, &prev);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)) &&
+                   (!prev || addr >= vm_end_gap(prev)))
                        goto found_addr;
        }
 
@@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
                          const unsigned long len, const unsigned long pgoff,
                          const unsigned long flags)
 {
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        int do_color_align, last_mmap;
@@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
                        addr = COLOR_ALIGN(addr, last_mmap, pgoff);
                else
                        addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
+
+               vma = find_vma_prev(mm, addr, &prev);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)) &&
+                   (!prev || addr >= vm_end_gap(prev)))
                        goto found_addr;
        }
 
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index ded0ea1afde4..4c1462611576 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, 
unsigned long addr,
        if ((mm->task_size - len) < addr)
                return 0;
        vma = find_vma(mm, addr);
-       return (!vma || (addr + len) <= vma->vm_start);
+       return (!vma || (addr + len) <= vm_start_gap(vma));
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2f751a..9dcb55f8afc0 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -378,7 +378,7 @@ void __init vmem_map_init(void)
        ro_end = (unsigned long)&_eshared & PAGE_MASK;
        for_each_memblock(memory, reg) {
                start = reg->base;
-               end = reg->base + reg->size - 1;
+               end = reg->base + reg->size;
                if (start >= ro_end || end <= ro_start)
                        vmem_add_mem(start, end - start, 0);
                else if (start >= ro_start && end <= ro_end)
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177807c2..7df7d5944188 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, 
unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c690c8e16a96..7f0f7c01b297 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, 
unsigned long addr, unsi
 
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
@@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
 
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 981a769b9558..21fc668a19a8 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
 
 void bad_trap(struct pt_regs *regs, long lvl)
 {
-       char buffer[32];
+       char buffer[36];
        siginfo_t info;
 
        if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl)
 
 void bad_trap_tl1(struct pt_regs *regs, long lvl)
 {
-       char buffer[32];
+       char buffer[36];
        
        if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
                       0, lvl, SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 4242eab12e10..2b6fae615fb1 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long 
addr,
                addr = ALIGN(addr, HPAGE_SIZE);
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 8a00c7b7b862..52deac203eac 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -237,7 +237,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, 
unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
        if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 30277e27431a..d050393d3be2 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -127,7 +127,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long 
addr,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (end - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
@@ -166,7 +166,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vma->vm_start))
+                               (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 9161f764121e..c504866dc807 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long 
addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 47b6436e41c2..3686a1db25b2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@ void __init initmem_init(void)
        printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
                        (ulong) pfn_to_kaddr(highstart_pfn));
 
+       __vmalloc_start_set = true;
        setup_bootmem_allocator();
 }
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 5d3f7a119ed1..1ff0b92eeae7 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, 
unsigned long addr,
                /* At this point:  (!vmm || addr < vmm->vm_end). */
                if (TASK_SIZE - len < addr)
                        return -ENOMEM;
-               if (!vmm || addr + len <= vmm->vm_start)
+               if (!vmm || addr + len <= vm_start_gap(vmm))
                        return addr;
                addr = vmm->vm_end;
                if (flags & MAP_SHARED)
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 93e7c1b32edd..5610cd537da7 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state,
                        continue;
                bsd_start = le32_to_cpu(p->p_offset);
                bsd_size = le32_to_cpu(p->p_size);
+               if (memcmp(flavour, "bsd\0", 4) == 0)
+                       bsd_start += offset;
                if (offset == bsd_start && size == bsd_size)
                        /* full parent partition, we have it already */
                        continue;
diff --git a/drivers/cpufreq/cpufreq_conservative.c 
b/drivers/cpufreq/cpufreq_conservative.c
index 25a70d06c5bf..55836a538a68 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -204,8 +204,8 @@ static ssize_t store_down_threshold(struct dbs_data 
*dbs_data, const char *buf,
        int ret;
        ret = sscanf(buf, "%u", &input);
 
-       /* cannot be lower than 11 otherwise freq will not fall */
-       if (ret != 1 || input < 11 || input > 100 ||
+       /* cannot be lower than 1 otherwise freq will not fall */
+       if (ret != 1 || input < 1 || input > 100 ||
                        input >= cs_tuners->up_threshold)
                return -EINVAL;
 
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index f056767d95a5..4158d7e431b4 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -256,8 +256,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void 
*private)
 
 static void calibrate_as3935(struct as3935_state *st)
 {
-       mutex_lock(&st->lock);
-
        /* mask disturber interrupt bit */
        as3935_write(st, AS3935_INT, BIT(5));
 
@@ -267,8 +265,6 @@ static void calibrate_as3935(struct as3935_state *st)
 
        mdelay(2);
        as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
-       mutex_unlock(&st->lock);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -305,6 +301,8 @@ static int as3935_resume(struct spi_device *spi)
        val &= ~AS3935_AFE_PWR_BIT;
        ret = as3935_write(st, AS3935_AFE_GAIN, val);
 
+       calibrate_as3935(st);
+
 err_resume:
        mutex_unlock(&st->lock);
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c 
b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index 9515f3a68f8f..122815e1cb65 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
        memset(&tvdata,0,sizeof(tvdata));
 
        eeprom = pvr2_eeprom_fetch(hdw);
-       if (!eeprom) return -EINVAL;
-
-       {
-               struct i2c_client fake_client;
-               /* Newer version expects a useless client interface */
-               fake_client.addr = hdw->eeprom_addr;
-               fake_client.adapter = &hdw->i2c_adap;
-               tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
-       }
+       if (!eeprom)
+               return -EINVAL;
+
+       tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
 
        trace_eeprom("eeprom assumed v4l tveeprom module");
        trace_eeprom("eeprom direct call results:");
diff --git a/drivers/media/v4l2-core/videobuf2-core.c 
b/drivers/media/v4l2-core/videobuf2-core.c
index 1c2cc6fee351..d5c300150cf4 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1120,7 +1120,7 @@ EXPORT_SYMBOL_GPL(vb2_create_bufs);
  */
 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
 {
-       if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+       if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
                return NULL;
 
        return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 0f8cd6bbe914..1224921217f2 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -376,8 +376,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata)
                                 * and use SDR Mode
                                 */
                                reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
-                                       | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
                                        | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+                               reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
                        } else if (pdata->port_mode[i] ==
                                        OMAP_EHCI_PORT_MODE_HSIC) {
                                /*
diff --git a/drivers/misc/c2port/c2port-duramar2150.c 
b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301d57d9..3dc61ea7dc64 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void)
 
        duramar2150_c2port_dev = c2port_device_register("uc",
                                        &duramar2150_c2port_ops, NULL);
-       if (!duramar2150_c2port_dev) {
-               ret = -ENODEV;
+       if (IS_ERR(duramar2150_c2port_dev)) {
+               ret = PTR_ERR(duramar2150_c2port_dev);
                goto free_region;
        }
 
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 09c23a5ba1d4..991aff587f63 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -246,6 +246,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can 
*gsdev)
                             sizeof(*dm),
                             1000);
 
+       kfree(dm);
+
        return rc;
 }
 
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c 
b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 28dbbdc393eb..d4e610b94dd8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
 #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
 
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl                ioread32be
+#define xemaclite_writel       iowrite32be
+#else
+#define xemaclite_readl                ioread32
+#define xemaclite_writel       iowrite32
+#endif
+
 /**
  * struct net_local - Our private per device data
  * @ndev:              instance of the network device
@@ -158,15 +166,15 @@ static void xemaclite_enable_interrupts(struct net_local 
*drvdata)
        u32 reg_data;
 
        /* Enable the Tx interrupts for the first Buffer */
-       reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
-       __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
-                    drvdata->base_addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+                        drvdata->base_addr + XEL_TSR_OFFSET);
 
        /* Enable the Rx interrupts for the first buffer */
-       __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+       xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + 
XEL_RSR_OFFSET);
 
        /* Enable the Global Interrupt Enable */
-       __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+       xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + 
XEL_GIER_OFFSET);
 }
 
 /**
@@ -181,17 +189,17 @@ static void xemaclite_disable_interrupts(struct net_local 
*drvdata)
        u32 reg_data;
 
        /* Disable the Global Interrupt Enable */
-       __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+       xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + 
XEL_GIER_OFFSET);
 
        /* Disable the Tx interrupts for the first buffer */
-       reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
-       __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
-                    drvdata->base_addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+                        drvdata->base_addr + XEL_TSR_OFFSET);
 
        /* Disable the Rx interrupts for the first buffer */
-       reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
-       __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
-                    drvdata->base_addr + XEL_RSR_OFFSET);
+       reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+       xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+                        drvdata->base_addr + XEL_RSR_OFFSET);
 }
 
 /**
@@ -323,7 +331,7 @@ static int xemaclite_send_data(struct net_local *drvdata, 
u8 *data,
                byte_count = ETH_FRAME_LEN;
 
        /* Check if the expected buffer is available */
-       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
        if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
             XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
 
@@ -336,7 +344,7 @@ static int xemaclite_send_data(struct net_local *drvdata, 
u8 *data,
 
                addr = (void __iomem __force *)((u32 __force)addr ^
                                                 XEL_BUFFER_OFFSET);
-               reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+               reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
 
                if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
                     XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -347,16 +355,16 @@ static int xemaclite_send_data(struct net_local *drvdata, 
u8 *data,
        /* Write the frame to the buffer */
        xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
 
-       __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
-                    addr + XEL_TPLR_OFFSET);
+       xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+                        addr + XEL_TPLR_OFFSET);
 
        /* Update the Tx Status Register to indicate that there is a
         * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
         * is used by the interrupt handler to check whether a frame
         * has been transmitted */
-       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
        reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
-       __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
 
        return 0;
 }
@@ -371,7 +379,7 @@ static int xemaclite_send_data(struct net_local *drvdata, 
u8 *data,
  *
  * Return:     Total number of bytes received
  */
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
 {
        void __iomem *addr;
        u16 length, proto_type;
@@ -381,7 +389,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, 
u8 *data)
        addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
 
        /* Verify which buffer has valid data */
-       reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
 
        if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
                if (drvdata->rx_ping_pong != 0)
@@ -398,27 +406,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, 
u8 *data)
                        return 0;       /* No data was available */
 
                /* Verify that buffer has valid data */
-               reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+               reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
                if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
                     XEL_RSR_RECV_DONE_MASK)
                        return 0;       /* No data was available */
        }
 
        /* Get the protocol type of the ethernet frame that arrived */
-       proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+       proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
                        XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
                        XEL_RPLR_LENGTH_MASK);
 
        /* Check if received ethernet frame is a raw ethernet frame
         * or an IP packet or an ARP packet */
-       if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+       if (proto_type > ETH_DATA_LEN) {
 
                if (proto_type == ETH_P_IP) {
-                       length = ((ntohl(__raw_readl(addr +
+                       length = ((ntohl(xemaclite_readl(addr +
                                        XEL_HEADER_IP_LENGTH_OFFSET +
                                        XEL_RXBUFF_OFFSET)) >>
                                        XEL_HEADER_SHIFT) &
                                        XEL_RPLR_LENGTH_MASK);
+                       length = min_t(u16, length, ETH_DATA_LEN);
                        length += ETH_HLEN + ETH_FCS_LEN;
 
                } else if (proto_type == ETH_P_ARP)
@@ -431,14 +440,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, 
u8 *data)
                /* Use the length in the frame, plus the header and trailer */
                length = proto_type + ETH_HLEN + ETH_FCS_LEN;
 
+       if (WARN_ON(length > maxlen))
+               length = maxlen;
+
        /* Read from the EmacLite device */
        xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
                                data, length);
 
        /* Acknowledge the frame */
-       reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
        reg_data &= ~XEL_RSR_RECV_DONE_MASK;
-       __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+       xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
 
        return length;
 }
@@ -465,14 +477,14 @@ static void xemaclite_update_address(struct net_local 
*drvdata,
 
        xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
 
-       __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+       xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
 
        /* Update the MAC address in the EmacLite */
-       reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
-       __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+       reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+       xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + 
XEL_TSR_OFFSET);
 
        /* Wait for EmacLite to finish with the MAC address update */
-       while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+       while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
                XEL_TSR_PROG_MAC_ADDR) != 0)
                ;
 }
@@ -605,7 +617,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
 
        skb_reserve(skb, 2);
 
-       len = xemaclite_recv_data(lp, (u8 *) skb->data);
+       len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
 
        if (!len) {
                dev->stats.rx_errors++;
@@ -642,32 +654,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void 
*dev_id)
        u32 tx_status;
 
        /* Check if there is Rx Data available */
-       if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+       if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
                         XEL_RSR_RECV_DONE_MASK) ||
-           (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+           (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
                         & XEL_RSR_RECV_DONE_MASK))
 
                xemaclite_rx_handler(dev);
 
        /* Check if the Transmission for the first buffer is completed */
-       tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+       tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
        if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
                (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-               __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+               xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
 
                tx_complete = true;
        }
 
        /* Check if the Transmission for the second buffer is completed */
-       tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+       tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + 
XEL_TSR_OFFSET);
        if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
                (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
 
                tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
-               __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
-                            XEL_TSR_OFFSET);
+               xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+                                XEL_TSR_OFFSET);
 
                tx_complete = true;
        }
@@ -700,7 +712,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
        /* wait for the MDIO interface to not be busy or timeout
           after some time.
        */
-       while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+       while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
                        XEL_MDIOCTRL_MDIOSTS_MASK) {
                if (time_before_eq(end, jiffies)) {
                        WARN_ON(1);
@@ -736,17 +748,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int 
phy_id, int reg)
         * MDIO Address register. Set the Status bit in the MDIO Control
         * register to start a MDIO read transaction.
         */
-       ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-       __raw_writel(XEL_MDIOADDR_OP_MASK |
-                    ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
-                    lp->base_addr + XEL_MDIOADDR_OFFSET);
-       __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
-                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+                        ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+                        lp->base_addr + XEL_MDIOADDR_OFFSET);
+       xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+                        lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        if (xemaclite_mdio_wait(lp))
                return -ETIMEDOUT;
 
-       rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+       rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
 
        dev_dbg(&lp->ndev->dev,
                "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -783,13 +795,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int 
phy_id, int reg,
         * Data register. Finally, set the Status bit in the MDIO Control
         * register to start a MDIO write transaction.
         */
-       ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
-       __raw_writel(~XEL_MDIOADDR_OP_MASK &
-                    ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
-                    lp->base_addr + XEL_MDIOADDR_OFFSET);
-       __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
-       __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
-                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+                        ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+                        lp->base_addr + XEL_MDIOADDR_OFFSET);
+       xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+       xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+                        lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        return 0;
 }
@@ -834,8 +846,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, 
struct device *dev)
        /* Enable the MDIO bus by asserting the enable bit in MDIO Control
         * register.
         */
-       __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
-                    lp->base_addr + XEL_MDIOCTRL_OFFSET);
+       xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+                        lp->base_addr + XEL_MDIOCTRL_OFFSET);
 
        bus = mdiobus_alloc();
        if (!bus) {
@@ -1138,8 +1150,8 @@ static int xemaclite_of_probe(struct platform_device 
*ofdev)
                dev_warn(dev, "No MAC address found\n");
 
        /* Clear the Tx CSR's in case this is a restart */
-       __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
-       __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+       xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+       xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
 
        /* Set the MAC address in the EmacLite device */
        xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c 
b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 9224e029ef2b..70c233f2ea19 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -873,7 +873,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 
*pbuf,  int len)
                return _FAIL;
 
 
-       if (len > MAX_IE_SZ)
+       if (len < 0 || len > MAX_IE_SZ)
                return _FAIL;
 
        pbss_network->IELength = len;
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 55d9c00112cc..871860810334 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
 #define UARTn_FRAME            0x04
 #define UARTn_FRAME_DATABITS__MASK     0x000f
 #define UARTn_FRAME_DATABITS(n)                ((n) - 3)
+#define UARTn_FRAME_PARITY__MASK       0x0300
 #define UARTn_FRAME_PARITY_NONE                0x0000
 #define UARTn_FRAME_PARITY_EVEN                0x0200
 #define UARTn_FRAME_PARITY_ODD         0x0300
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct 
efm32_uart_port *efm_port,
                        16 * (4 + (clkdiv >> 6)));
 
        frame = efm32_uart_read32(efm_port, UARTn_FRAME);
-       if (frame & UARTn_FRAME_PARITY_ODD)
+       switch (frame & UARTn_FRAME_PARITY__MASK) {
+       case UARTn_FRAME_PARITY_ODD:
                *parity = 'o';
-       else if (frame & UARTn_FRAME_PARITY_EVEN)
+               break;
+       case UARTn_FRAME_PARITY_EVEN:
                *parity = 'e';
-       else
+               break;
+       default:
                *parity = 'n';
+       }
 
        *bits = (frame & UARTn_FRAME_DATABITS__MASK) -
                        UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 122bd73c48e8..58a444f243cc 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2461,6 +2461,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct 
hc_driver *driver,
                hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
                                GFP_KERNEL);
                if (!hcd->bandwidth_mutex) {
+                       kfree(hcd->address0_mutex);
                        kfree(hcd);
                        dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
                        return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8de05cf013ed..d2c941b03302 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1346,7 +1346,13 @@ static int hub_configure(struct usb_hub *hub,
        if (ret < 0) {
                message = "can't read hub descriptor";
                goto fail;
-       } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
+       }
+
+       maxchild = USB_MAXCHILDREN;
+       if (hub_is_superspeed(hdev))
+               maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
+
+       if (hub->descriptor->bNbrPorts > maxchild) {
                message = "hub has too many ports!";
                ret = -ENODEV;
                goto fail;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c 
b/drivers/usb/gadget/udc/dummy_hcd.c
index 53c747fed9d7..a2bf16b6b930 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1935,7 +1935,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc)
        desc->wHubCharacteristics = cpu_to_le16(0x0001);
        desc->bNbrPorts = 1;
        desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
-       desc->u.ss.DeviceRemovable = 0xffff;
+       desc->u.ss.DeviceRemovable = 0;
 }
 
 static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -1945,8 +1945,8 @@ static inline void hub_descriptor(struct 
usb_hub_descriptor *desc)
        desc->bDescLength = 9;
        desc->wHubCharacteristics = cpu_to_le16(0x0001);
        desc->bNbrPorts = 1;
-       desc->u.hs.DeviceRemovable[0] = 0xff;
-       desc->u.hs.DeviceRemovable[1] = 0xff;
+       desc->u.hs.DeviceRemovable[0] = 0;
+       desc->u.hs.DeviceRemovable[1] = 0xff;   /* PortPwrCtrlMask */
 }
 
 static int dummy_hub_control(
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index f130bb2f7bbe..a9f12e382f2a 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, 
struct r8a66597_td *td)
                        time = 30;
                        break;
                default:
-                       time = 300;
+                       time = 50;
                        break;
                }
 
@@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
                pipe = td->pipe;
                pipe_stop(r8a66597, pipe);
 
+               /* Select a different address or endpoint */
                new_td = td;
                do {
                        list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597)
                                new_td = td;
                                break;
                        }
-               } while (td != new_td && td->address == new_td->address);
+               } while (td != new_td && td->address == new_td->address &&
+                       td->pipe->info.epnum == new_td->pipe->info.epnum);
 
                start_transfer(r8a66597, new_td);
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 575582e807d3..6bc448701a9c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -185,6 +185,9 @@ static void xhci_pci_quirks(struct device *dev, struct 
xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
                        pdev->device == 0x1042)
                xhci->quirks |= XHCI_BROKEN_STREAMS;
+       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+                       pdev->device == 0x1142)
+               xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f78d13ba7d19..32c46a5c489e 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -400,6 +400,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
                mutex_unlock(&server->srv_mutex);
        } while (server->tcpStatus == CifsNeedReconnect);
 
+       if (server->tcpStatus == CifsNeedNegotiate)
+               mod_delayed_work(cifsiod_wq, &server->echo, 0);
+
        return rc;
 }
 
@@ -409,18 +412,27 @@ cifs_echo_request(struct work_struct *work)
        int rc;
        struct TCP_Server_Info *server = container_of(work,
                                        struct TCP_Server_Info, echo.work);
+       unsigned long echo_interval;
+
+       /*
+        * If we need to renegotiate, set echo interval to zero to
+        * immediately call echo service where we can renegotiate.
+        */
+       if (server->tcpStatus == CifsNeedNegotiate)
+               echo_interval = 0;
+       else
+               echo_interval = SMB_ECHO_INTERVAL;
 
        /*
-        * We cannot send an echo if it is disabled or until the
-        * NEGOTIATE_PROTOCOL request is done, which is indicated by
-        * server->ops->need_neg() == true. Also, no need to ping if
-        * we got a response recently.
+        * We cannot send an echo if it is disabled.
+        * Also, no need to ping if we got a response recently.
         */
 
        if (server->tcpStatus == CifsNeedReconnect ||
-           server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+           server->tcpStatus == CifsExiting ||
+           server->tcpStatus == CifsNew ||
            (server->ops->can_echo && !server->ops->can_echo(server)) ||
-           time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+           time_before(jiffies, server->lstrp + echo_interval - HZ))
                goto requeue_echo;
 
        rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index cc9f2546ea4a..52fc4e25a7cb 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item,
        ret = -ENOMEM;
        sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
        if (sl) {
-               sl->sl_target = config_item_get(item);
                spin_lock(&configfs_dirent_lock);
                if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
                        spin_unlock(&configfs_dirent_lock);
-                       config_item_put(item);
                        kfree(sl);
                        return -ENOENT;
                }
+               sl->sl_target = config_item_get(item);
                list_add(&sl->sl_list, &target_sd->s_links);
                spin_unlock(&configfs_dirent_lock);
                ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1e2872b25343..148c4e9cf22f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -171,7 +171,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long 
addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index af33fb77196f..aea2d0bf7174 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -284,11 +284,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct 
*vma, int is_pid)
 
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
-       if (stack_guard_page_start(vma, start))
-               start += PAGE_SIZE;
        end = vma->vm_end;
-       if (stack_guard_page_end(vma, end))
-               end -= PAGE_SIZE;
 
        seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/include/linux/log2.h b/include/linux/log2.h
index f38fae23bdac..c373295f359f 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
  *  ... and so on.
  */
 
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+       return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
 
+#define order_base_2(n)                                \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               ((n) == 0 || (n) == 1) ? 0 :    \
+               ilog2((n) - 1) + 1) :           \
+       __order_base_2(n)                       \
+)
 #endif /* _LINUX_LOG2_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index db853dee1ec5..54ad2e45bc6b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1242,34 +1242,6 @@ int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
-/* Is the vma a continuation of the stack vma above it? */
-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
-}
-
-static inline int stack_guard_page_start(struct vm_area_struct *vma,
-                                            unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_growsdown(vma->vm_prev, addr);
-}
-
-/* Is the vma a continuation of the stack vma below it? */
-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
-{
-       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
-}
-
-static inline int stack_guard_page_end(struct vm_area_struct *vma,
-                                          unsigned long addr)
-{
-       return (vma->vm_flags & VM_GROWSUP) &&
-               (vma->vm_end == addr) &&
-               !vma_growsup(vma->vm_next, addr);
-}
-
 extern struct task_struct *task_of_stack(struct task_struct *task,
                                struct vm_area_struct *vma, bool in_group);
 
@@ -1930,6 +1902,7 @@ void page_cache_async_readahead(struct address_space 
*mapping,
 
 unsigned long max_sane_readahead(unsigned long nr);
 
+extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
@@ -1958,6 +1931,30 @@ static inline struct vm_area_struct * 
find_vma_intersection(struct mm_struct * m
        return vma;
 }
 
+static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+{
+       unsigned long vm_start = vma->vm_start;
+
+       if (vma->vm_flags & VM_GROWSDOWN) {
+               vm_start -= stack_guard_gap;
+               if (vm_start > vma->vm_start)
+                       vm_start = 0;
+       }
+       return vm_start;
+}
+
+static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+{
+       unsigned long vm_end = vma->vm_end;
+
+       if (vma->vm_flags & VM_GROWSUP) {
+               vm_end += stack_guard_gap;
+               if (vm_end < vma->vm_end)
+                       vm_end = -PAGE_SIZE;
+       }
+       return vm_end;
+}
+
 static inline unsigned long vma_pages(struct vm_area_struct *vma)
 {
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 331499d597fa..9ce10d4a0245 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -22,6 +22,9 @@
  */
 #define USB_MAXCHILDREN                31
 
+/* See USB 3.1 spec Table 10-5 */
+#define USB_SS_MAXPORTS                15
+
 /*
  * Hub request types
  */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a9104b4608b..e7ef539c56d9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1156,8 +1156,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, 
struct irqaction *new)
                        ret = __irq_set_trigger(desc, irq,
                                        new->flags & IRQF_TRIGGER_MASK);
 
-                       if (ret)
+                       if (ret) {
+                               irq_release_resources(desc);
                                goto out_mask;
+                       }
                }
 
                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index a7077d3ae52f..3d7fb75354b7 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -614,6 +614,14 @@ static int alarm_timer_set(struct k_itimer *timr, int 
flags,
 
        /* start the timer */
        timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+
+       /*
+        * Rate limit to the tick as a hot fix to prevent DOS. Will be
+        * mopped up later.
+        */
+       if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+               timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
+
        exp = timespec_to_ktime(new_setting->it_value);
        /* Convert (if necessary) to absolute time */
        if (flags != TIMER_ABSTIME) {
diff --git a/mm/gup.c b/mm/gup.c
index 3cec4df06e6b..ce1630bf0b95 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -275,11 +275,6 @@ static int faultin_page(struct task_struct *tsk, struct 
vm_area_struct *vma,
        unsigned int fault_flags = 0;
        int ret;
 
-       /* For mlock, just skip the stack guard page. */
-       if ((*flags & FOLL_MLOCK) &&
-                       (stack_guard_page_start(vma, address) ||
-                        stack_guard_page_end(vma, address + PAGE_SIZE)))
-               return -ENOENT;
        if (*flags & FOLL_WRITE)
                fault_flags |= FAULT_FLAG_WRITE;
        if (nonblocking)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 185099e55cfb..e14e75754154 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1189,7 +1189,10 @@ int memory_failure(unsigned long pfn, int trapno, int 
flags)
         * page_remove_rmap() in try_to_unmap_one(). So to determine page status
         * correctly, we save a copy of the page flags at this time.
         */
-       page_flags = p->flags;
+       if (PageHuge(p))
+               page_flags = hpage->flags;
+       else
+               page_flags = p->flags;
 
        /*
         * unpoison always clear PG_hwpoison inside page lock
diff --git a/mm/memory.c b/mm/memory.c
index 6ca26c332712..0c4f5e36b155 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2580,40 +2580,6 @@ out_release:
 }
 
 /*
- * This is like a special single-page "expand_{down|up}wards()",
- * except we must first make sure that 'address{-|+}PAGE_SIZE'
- * doesn't hit another vma.
- */
-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned 
long address)
-{
-       address &= PAGE_MASK;
-       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
-               struct vm_area_struct *prev = vma->vm_prev;
-
-               /*
-                * Is there a mapping abutting this one below?
-                *
-                * That's only ok if it's the same stack mapping
-                * that has gotten split..
-                */
-               if (prev && prev->vm_end == address)
-                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
-
-               return expand_downwards(vma, address - PAGE_SIZE);
-       }
-       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) 
{
-               struct vm_area_struct *next = vma->vm_next;
-
-               /* As VM_GROWSDOWN but s/below/above/ */
-               if (next && next->vm_start == address + PAGE_SIZE)
-                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
-
-               return expand_upwards(vma, address + PAGE_SIZE);
-       }
-       return 0;
-}
-
-/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2633,10 +2599,6 @@ static int do_anonymous_page(struct mm_struct *mm, 
struct vm_area_struct *vma,
        if (vma->vm_flags & VM_SHARED)
                return VM_FAULT_SIGBUS;
 
-       /* Check if we need to add a guard page to the stack */
-       if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGSEGV;
-
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
diff --git a/mm/mmap.c b/mm/mmap.c
index f03267136fb7..f975ec90f710 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -290,6 +290,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        unsigned long retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
+       struct vm_area_struct *next;
        unsigned long min_brk;
        bool populate;
 
@@ -334,7 +335,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        }
 
        /* Check against existing mmap mappings. */
-       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
+       next = find_vma(mm, oldbrk);
+       if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
                goto out;
 
        /* Ok, looks good - let it rip. */
@@ -357,10 +359,22 @@ out:
 
 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 {
-       unsigned long max, subtree_gap;
-       max = vma->vm_start;
-       if (vma->vm_prev)
-               max -= vma->vm_prev->vm_end;
+       unsigned long max, prev_end, subtree_gap;
+
+       /*
+        * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
+        * allow two stack_guard_gaps between them here, and when choosing
+        * an unmapped area; whereas when expanding we only require one.
+        * That's a little inconsistent, but keeps the code here simpler.
+        */
+       max = vm_start_gap(vma);
+       if (vma->vm_prev) {
+               prev_end = vm_end_gap(vma->vm_prev);
+               if (max > prev_end)
+                       max -= prev_end;
+               else
+                       max = 0;
+       }
        if (vma->vm_rb.rb_left) {
                subtree_gap = rb_entry(vma->vm_rb.rb_left,
                                struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -453,7 +467,7 @@ static void validate_mm(struct mm_struct *mm)
                        anon_vma_unlock_read(anon_vma);
                }
 
-               highest_address = vma->vm_end;
+               highest_address = vm_end_gap(vma);
                vma = vma->vm_next;
                i++;
        }
@@ -622,7 +636,7 @@ void __vma_link_rb(struct mm_struct *mm, struct 
vm_area_struct *vma,
        if (vma->vm_next)
                vma_gap_update(vma->vm_next);
        else
-               mm->highest_vm_end = vma->vm_end;
+               mm->highest_vm_end = vm_end_gap(vma);
 
        /*
         * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -874,7 +888,7 @@ again:                      remove_next = 1 + (end > 
next->vm_end);
                        vma_gap_update(vma);
                if (end_changed) {
                        if (!next)
-                               mm->highest_vm_end = end;
+                               mm->highest_vm_end = vm_end_gap(vma);
                        else if (!adjust_next)
                                vma_gap_update(next);
                }
@@ -917,7 +931,7 @@ again:                      remove_next = 1 + (end > 
next->vm_end);
                else if (next)
                        vma_gap_update(next);
                else
-                       mm->highest_vm_end = end;
+                       VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
        }
        if (insert && file)
                uprobe_mmap(insert);
@@ -1740,7 +1754,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info 
*info)
 
        while (true) {
                /* Visit left subtree if it looks promising */
-               gap_end = vma->vm_start;
+               gap_end = vm_start_gap(vma);
                if (gap_end >= low_limit && vma->vm_rb.rb_left) {
                        struct vm_area_struct *left =
                                rb_entry(vma->vm_rb.rb_left,
@@ -1751,12 +1765,13 @@ unsigned long unmapped_area(struct 
vm_unmapped_area_info *info)
                        }
                }
 
-               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
 check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
                        return -ENOMEM;
-               if (gap_end >= low_limit && gap_end - gap_start >= length)
+               if (gap_end >= low_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
 
                /* Visit right subtree if it looks promising */
@@ -1778,8 +1793,8 @@ check_current:
                        vma = rb_entry(rb_parent(prev),
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_left) {
-                               gap_start = vma->vm_prev->vm_end;
-                               gap_end = vma->vm_start;
+                               gap_start = vm_end_gap(vma->vm_prev);
+                               gap_end = vm_start_gap(vma);
                                goto check_current;
                        }
                }
@@ -1843,7 +1858,7 @@ unsigned long unmapped_area_topdown(struct 
vm_unmapped_area_info *info)
 
        while (true) {
                /* Visit right subtree if it looks promising */
-               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
+               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
                if (gap_start <= high_limit && vma->vm_rb.rb_right) {
                        struct vm_area_struct *right =
                                rb_entry(vma->vm_rb.rb_right,
@@ -1856,10 +1871,11 @@ unsigned long unmapped_area_topdown(struct 
vm_unmapped_area_info *info)
 
 check_current:
                /* Check if current node has a suitable gap */
-               gap_end = vma->vm_start;
+               gap_end = vm_start_gap(vma);
                if (gap_end < low_limit)
                        return -ENOMEM;
-               if (gap_start <= high_limit && gap_end - gap_start >= length)
+               if (gap_start <= high_limit &&
+                   gap_end > gap_start && gap_end - gap_start >= length)
                        goto found;
 
                /* Visit left subtree if it looks promising */
@@ -1882,7 +1898,7 @@ check_current:
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_right) {
                                gap_start = vma->vm_prev ?
-                                       vma->vm_prev->vm_end : 0;
+                                       vm_end_gap(vma->vm_prev) : 0;
                                goto check_current;
                        }
                }
@@ -1920,7 +1936,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long 
addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct vm_unmapped_area_info info;
 
        if (len > TASK_SIZE - mmap_min_addr)
@@ -1931,9 +1947,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long 
addr,
 
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
+               vma = find_vma_prev(mm, addr, &prev);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vma->vm_start))
+                   (!vma || addr + len <= vm_start_gap(vma)) &&
+                   (!prev || addr >= vm_end_gap(prev)))
                        return addr;
        }
 
@@ -1956,7 +1973,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
                          const unsigned long len, const unsigned long pgoff,
                          const unsigned long flags)
 {
-       struct vm_area_struct *vma;
+       struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
@@ -1971,9 +1988,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const 
unsigned long addr0,
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
+               vma = find_vma_prev(mm, addr, &prev);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vma->vm_start))
+                               (!vma || addr + len <= vm_start_gap(vma)) &&
+                               (!prev || addr >= vm_end_gap(prev)))
                        return addr;
        }
 
@@ -2099,21 +2117,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, 
unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma,
+                            unsigned long size, unsigned long grow)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start, actual_size;
+       unsigned long new_start;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       actual_size = size;
-       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
-               actual_size -= PAGE_SIZE;
-       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -2154,16 +2170,32 @@ static int acct_stack_growth(struct vm_area_struct 
*vma, unsigned long size, uns
  */
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *next;
+       unsigned long gap_addr;
        int error = 0;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /* Guard against wrapping around to address 0. */
-       if (address < PAGE_ALIGN(address+4))
-               address = PAGE_ALIGN(address+4);
-       else
+       /* Guard against exceeding limits of the address space. */
+       address &= PAGE_MASK;
+       if (address >= TASK_SIZE)
                return -ENOMEM;
+       address += PAGE_SIZE;
+
+       /* Enforce stack_guard_gap */
+       gap_addr = address + stack_guard_gap;
+
+       /* Guard against overflow */
+       if (gap_addr < address || gap_addr > TASK_SIZE)
+               gap_addr = TASK_SIZE;
+
+       next = vma->vm_next;
+       if (next && next->vm_start < gap_addr) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+               /* Check that both stack segments have the same anon_vma? */
+       }
 
        /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
@@ -2205,7 +2237,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned 
long address)
                                if (vma->vm_next)
                                        vma_gap_update(vma->vm_next);
                                else
-                                       vma->vm_mm->highest_vm_end = address;
+                                       vma->vm_mm->highest_vm_end = 
vm_end_gap(vma);
                                spin_unlock(&vma->vm_mm->page_table_lock);
 
                                perf_event_mmap(vma);
@@ -2225,6 +2257,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned 
long address)
 int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
+       struct vm_area_struct *prev;
+       unsigned long gap_addr;
        int error;
 
        address &= PAGE_MASK;
@@ -2232,6 +2266,17 @@ int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
+       /* Enforce stack_guard_gap */
+       gap_addr = address - stack_guard_gap;
+       if (gap_addr > address)
+               return -ENOMEM;
+       prev = vma->vm_prev;
+       if (prev && prev->vm_end > gap_addr) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+               /* Check that both stack segments have the same anon_vma? */
+       }
+
        /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
@@ -2283,28 +2328,25 @@ int expand_downwards(struct vm_area_struct *vma,
        return error;
 }
 
-/*
- * Note how expand_stack() refuses to expand the stack all the way to
- * abut the next virtual mapping, *unless* that mapping itself is also
- * a stack mapping. We want to leave room for a guard page, after all
- * (the guard page itself is not added here, that is done by the
- * actual page faulting logic)
- *
- * This matches the behavior of the guard page logic (see mm/memory.c:
- * check_stack_guard_page()), which only allows the guard page to be
- * removed under these circumstances.
- */
+/* enforced gap between the expanding stack and other mappings. */
+unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
+
+static int __init cmdline_parse_stack_guard_gap(char *p)
+{
+       unsigned long val;
+       char *endptr;
+
+       val = simple_strtoul(p, &endptr, 10);
+       if (!*endptr)
+               stack_guard_gap = val << PAGE_SHIFT;
+
+       return 0;
+}
+__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
+
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-       struct vm_area_struct *next;
-
-       address &= PAGE_MASK;
-       next = vma->vm_next;
-       if (next && next->vm_start == address + PAGE_SIZE) {
-               if (!(next->vm_flags & VM_GROWSUP))
-                       return -ENOMEM;
-       }
        return expand_upwards(vma, address);
 }
 
@@ -2326,14 +2368,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
-       struct vm_area_struct *prev;
-
-       address &= PAGE_MASK;
-       prev = vma->vm_prev;
-       if (prev && prev->vm_end == address) {
-               if (!(prev->vm_flags & VM_GROWSDOWN))
-                       return -ENOMEM;
-       }
        return expand_downwards(vma, address);
 }
 
@@ -2429,7 +2463,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct 
vm_area_struct *vma,
                vma->vm_prev = prev;
                vma_gap_update(vma);
        } else
-               mm->highest_vm_end = prev ? prev->vm_end : 0;
+               mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
        tail_vma->vm_next = NULL;
 
        /* Kill the cache */
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 5331c2bd85a2..29fd07ba57bd 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -368,6 +368,9 @@ static int swap_cgroup_prepare(int type)
                if (!page)
                        goto not_enough_page;
                ctrl->map[idx] = page;
+
+               if (!(idx % SWAP_CLUSTER_MAX))
+                       cond_resched();
        }
        return 0;
 not_enough_page:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 9e3b0b66a4f3..ee7a8983b173 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -76,18 +76,22 @@ static int __ip6_datagram_connect(struct sock *sk, struct 
sockaddr *uaddr, int a
                }
        }
 
-       addr_type = ipv6_addr_type(&usin->sin6_addr);
-
-       if (addr_type == IPV6_ADDR_ANY) {
+       if (ipv6_addr_any(&usin->sin6_addr)) {
                /*
                 *      connect to self
                 */
-               usin->sin6_addr.s6_addr[15] = 0x01;
+               if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+                       ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+                                              &usin->sin6_addr);
+               else
+                       usin->sin6_addr = in6addr_loopback;
        }
 
+       addr_type = ipv6_addr_type(&usin->sin6_addr);
+
        daddr = &usin->sin6_addr;
 
-       if (addr_type == IPV6_ADDR_MAPPED) {
+       if (addr_type & IPV6_ADDR_MAPPED) {
                struct sockaddr_in sin;
 
                if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 49a589a4454b..34b26db32c74 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -960,6 +960,9 @@ static int ip6_dst_lookup_tail(struct sock *sk,
                }
        }
 #endif
+       if (ipv6_addr_v4mapped(&fl6->saddr) &&
+           !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
+               return -EAFNOSUPPORT;
 
        return 0;
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0ade453839c4..19fe7b789a72 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -164,8 +164,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr 
*uaddr,
         *      connect() to INADDR_ANY means loopback (BSD'ism).
         */
 
-       if (ipv6_addr_any(&usin->sin6_addr))
-               usin->sin6_addr.s6_addr[15] = 0x1;
+       if (ipv6_addr_any(&usin->sin6_addr)) {
+               if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+                       ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+                                              &usin->sin6_addr);
+               else
+                       usin->sin6_addr = in6addr_loopback;
+       }
 
        addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -204,7 +209,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr 
*uaddr,
         *      TCP over IPv4
         */
 
-       if (addr_type == IPV6_ADDR_MAPPED) {
+       if (addr_type & IPV6_ADDR_MAPPED) {
                u32 exthdrlen = icsk->icsk_ext_hdr_len;
                struct sockaddr_in sin;
 
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 143e6c758b18..a774afa4ce52 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1107,6 +1107,10 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                        if (addr_len < SIN6_LEN_RFC2133)
                                return -EINVAL;
                        daddr = &sin6->sin6_addr;
+                       if (ipv6_addr_any(daddr) &&
+                           ipv6_addr_v4mapped(&np->saddr))
+                               ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+                                                      daddr);
                        break;
                case AF_INET:
                        goto do_udp_sendmsg;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bb6f6ef07566..ea3b13987521 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1319,12 +1319,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         */
        if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
+           !ieee80211_is_back_req(hdr->frame_control) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
            (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
             rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-           /* PM bit is only checked in frames where it isn't reserved,
+           /*
+            * PM bit is only checked in frames where it isn't reserved,
             * in AP mode it's reserved in non-bufferable management frames
             * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+            * BAR frames should be ignored as specified in
+            * IEEE 802.11-2012 10.2.1.2.
             */
            (!ieee80211_is_mgmt(hdr->frame_control) ||
             ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {

Reply via email to