commit:     6170daa192bd78ce9c1c618fdcf4acb5e0bb0c8c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Aug  2 14:41:21 2014 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Aug  2 14:41:21 2014 +0000
URL:        
http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=6170daa1

Linux patch 3.4.101

---
 0000_README              |   4 +
 1100_linux-3.4.101.patch | 349 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 353 insertions(+)

diff --git a/0000_README b/0000_README
index af7f3f3..0f65113 100644
--- a/0000_README
+++ b/0000_README
@@ -439,6 +439,10 @@ Patch:  1099_linux-3.4.100.patch
 From:   http://www.kernel.org
 Desc:   Linux 3.4.100
 
+Patch:  1100_linux-3.4.101.patch
+From:   http://www.kernel.org
+Desc:   Linux 3.4.101
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1100_linux-3.4.101.patch b/1100_linux-3.4.101.patch
new file mode 100644
index 0000000..54832f4
--- /dev/null
+++ b/1100_linux-3.4.101.patch
@@ -0,0 +1,349 @@
+diff --git a/Makefile b/Makefile
+index d6c64eb82525..a22bcb567348 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 4
+-SUBLEVEL = 100
++SUBLEVEL = 101
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+ 
+@@ -592,6 +592,8 @@ KBUILD_CFLAGS      += -fomit-frame-pointer
+ endif
+ endif
+ 
++KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
++
+ ifdef CONFIG_DEBUG_INFO
+ KBUILD_CFLAGS += -g
+ KBUILD_AFLAGS += -gdwarf-2
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 02f300fbf070..e0a8707dd137 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -292,7 +292,9 @@ static int __poke_user(struct task_struct *child, addr_t 
addr, addr_t data)
+                * psw and gprs are stored on the stack
+                */
+               if (addr == (addr_t) &dummy->regs.psw.mask &&
+-                  ((data & ~PSW_MASK_USER) != psw_user_bits ||
++                  (((data^psw_user_bits) & ~PSW_MASK_USER) ||
++                   (((data^psw_user_bits) & PSW_MASK_ASC) &&
++                    ((data|psw_user_bits) & PSW_MASK_ASC) == PSW_MASK_ASC) ||
+                    ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
+                       /* Invalid psw mask. */
+                       return -EINVAL;
+@@ -595,7 +597,10 @@ static int __poke_user_compat(struct task_struct *child,
+                */
+               if (addr == (addr_t) &dummy32->regs.psw.mask) {
+                       /* Build a 64 bit psw mask from 31 bit mask. */
+-                      if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
++                      if (((tmp^psw32_user_bits) & ~PSW32_MASK_USER) ||
++                          (((tmp^psw32_user_bits) & PSW32_MASK_ASC) &&
++                           ((tmp|psw32_user_bits) & PSW32_MASK_ASC)
++                           == PSW32_MASK_ASC))
+                               /* Invalid psw mask. */
+                               return -EINVAL;
+                       regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index e1e7f9c831da..e36c5cf38fde 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -428,8 +428,8 @@ sysenter_do_call:
+       cmpl $(NR_syscalls), %eax
+       jae sysenter_badsys
+       call *sys_call_table(,%eax,4)
+-      movl %eax,PT_EAX(%esp)
+ sysenter_after_call:
++      movl %eax,PT_EAX(%esp)
+       LOCKDEP_SYS_EXIT
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       TRACE_IRQS_OFF
+@@ -510,6 +510,7 @@ ENTRY(system_call)
+       jae syscall_badsys
+ syscall_call:
+       call *sys_call_table(,%eax,4)
++syscall_after_call:
+       movl %eax,PT_EAX(%esp)          # store the return value
+ syscall_exit:
+       LOCKDEP_SYS_EXIT
+@@ -678,12 +679,12 @@ syscall_fault:
+ END(syscall_fault)
+ 
+ syscall_badsys:
+-      movl $-ENOSYS,PT_EAX(%esp)
+-      jmp syscall_exit
++      movl $-ENOSYS,%eax
++      jmp syscall_after_call
+ END(syscall_badsys)
+ 
+ sysenter_badsys:
+-      movl $-ENOSYS,PT_EAX(%esp)
++      movl $-ENOSYS,%eax
+       jmp sysenter_after_call
+ END(syscall_badsys)
+       CFI_ENDPROC
+diff --git a/block/blk-tag.c b/block/blk-tag.c
+index 4af6f5cc1167..f606487bba56 100644
+--- a/block/blk-tag.c
++++ b/block/blk-tag.c
+@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue 
*q, int tag)
+ EXPORT_SYMBOL(blk_queue_find_tag);
+ 
+ /**
+- * __blk_free_tags - release a given set of tag maintenance info
++ * blk_free_tags - release a given set of tag maintenance info
+  * @bqt:      the tag map to free
+  *
+- * Tries to free the specified @bqt.  Returns true if it was
+- * actually freed and false if there are still references using it
++ * Drop the reference count on @bqt and frees it when the last reference
++ * is dropped.
+  */
+-static int __blk_free_tags(struct blk_queue_tag *bqt)
++void blk_free_tags(struct blk_queue_tag *bqt)
+ {
+-      int retval;
+-
+-      retval = atomic_dec_and_test(&bqt->refcnt);
+-      if (retval) {
++      if (atomic_dec_and_test(&bqt->refcnt)) {
+               BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+                                                       bqt->max_depth);
+ 
+@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
+ 
+               kfree(bqt);
+       }
+-
+-      return retval;
+ }
++EXPORT_SYMBOL(blk_free_tags);
+ 
+ /**
+  * __blk_queue_free_tags - release tag maintenance info
+@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
+       if (!bqt)
+               return;
+ 
+-      __blk_free_tags(bqt);
++      blk_free_tags(bqt);
+ 
+       q->queue_tags = NULL;
+       queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+ }
+ 
+ /**
+- * blk_free_tags - release a given set of tag maintenance info
+- * @bqt:      the tag map to free
+- *
+- * For externally managed @bqt frees the map.  Callers of this
+- * function must guarantee to have released all the queues that
+- * might have been using this tag map.
+- */
+-void blk_free_tags(struct blk_queue_tag *bqt)
+-{
+-      if (unlikely(!__blk_free_tags(bqt)))
+-              BUG();
+-}
+-EXPORT_SYMBOL(blk_free_tags);
+-
+-/**
+  * blk_queue_free_tags - release tag maintenance info
+  * @q:  the request queue for the device
+  *
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 0e87baf8fcc2..0a450eb517e0 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -446,6 +446,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 
+       /* Promise */
+       { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
++      { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 
ahci-mode */
+ 
+       /* Asmedia */
+       { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 6e67fdebdada..6b922365d5e9 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4693,6 +4693,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+  *    ata_qc_new - Request an available ATA command, for queueing
+  *    @ap: target port
+  *
++ *    Some ATA host controllers may implement a queue depth which is less
++ *    than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
++ *    the hardware limitation.
++ *
+  *    LOCKING:
+  *    None.
+  */
+@@ -4700,14 +4704,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
+ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+ {
+       struct ata_queued_cmd *qc = NULL;
++      unsigned int max_queue = ap->host->n_tags;
+       unsigned int i, tag;
+ 
+       /* no command while frozen */
+       if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+               return NULL;
+ 
+-      for (i = 0; i < ATA_MAX_QUEUE; i++) {
+-              tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
++      for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
++              tag = tag < max_queue ? tag : 0;
+ 
+               /* the last tag is reserved for internal command. */
+               if (tag == ATA_TAG_INTERNAL)
+@@ -5959,6 +5964,7 @@ void ata_host_init(struct ata_host *host, struct device 
*dev,
+ {
+       spin_lock_init(&host->lock);
+       mutex_init(&host->eh_mutex);
++      host->n_tags = ATA_MAX_QUEUE - 1;
+       host->dev = dev;
+       host->flags = flags;
+       host->ops = ops;
+@@ -6041,6 +6047,8 @@ int ata_host_register(struct ata_host *host, struct 
scsi_host_template *sht)
+ {
+       int i, rc;
+ 
++      host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
++
+       /* host must have been started */
+       if (!(host->flags & ATA_HOST_STARTED)) {
+               dev_err(host->dev, "BUG: trying to register unstarted host\n");
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index f04c0961f993..e5206fc76562 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -331,7 +331,7 @@ static int build_snap_context(struct ceph_snap_realm 
*realm)
+ 
+       /* alloc new snap context */
+       err = -ENOMEM;
+-      if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64))
++      if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
+               goto fail;
+       snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
+       if (!snapc)
+diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
+index 6bd325fedc87..19a240446fca 100644
+--- a/include/drm/drm_mem_util.h
++++ b/include/drm/drm_mem_util.h
+@@ -31,7 +31,7 @@
+ 
+ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+ {
+-      if (size != 0 && nmemb > ULONG_MAX / size)
++      if (size != 0 && nmemb > SIZE_MAX / size)
+               return NULL;
+ 
+       if (size * nmemb <= PAGE_SIZE)
+@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, 
size_t size)
+ /* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. 
*/
+ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+ {
+-      if (size != 0 && nmemb > ULONG_MAX / size)
++      if (size != 0 && nmemb > SIZE_MAX / size)
+               return NULL;
+ 
+       if (size * nmemb <= PAGE_SIZE)
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 645231c373c8..b795ee5cd208 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -35,6 +35,7 @@
+ #define LLONG_MAX     ((long long)(~0ULL>>1))
+ #define LLONG_MIN     (-LLONG_MAX - 1)
+ #define ULLONG_MAX    (~0ULL)
++#define SIZE_MAX      (~(size_t)0)
+ 
+ #define STACK_MAGIC   0xdeadbeef
+ 
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 50d7cb1ee947..dd16deb27dd8 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -539,6 +539,7 @@ struct ata_host {
+       struct device           *dev;
+       void __iomem * const    *iomap;
+       unsigned int            n_ports;
++      unsigned int            n_tags;                 /* nr of NCQ tags */
+       void                    *private_data;
+       struct ata_port_operations *ops;
+       unsigned long           flags;
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index a595dce6b0c7..67d5d94b783a 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -242,7 +242,7 @@ size_t ksize(const void *);
+  */
+ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+-      if (size != 0 && n > ULONG_MAX / size)
++      if (size != 0 && n > SIZE_MAX / size)
+               return NULL;
+       return __kmalloc(n * size, flags);
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0d4e0ad97a04..efd682099a0a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2348,6 +2348,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, 
struct mm_struct *src,
+               } else {
+                       if (cow)
+                               huge_ptep_set_wrprotect(src, addr, src_pte);
++                      entry = huge_ptep_get(src_pte);
+                       ptepage = pte_page(entry);
+                       get_page(ptepage);
+                       page_dup_rmap(ptepage);
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 45eb6217bf38..ad6ee88a3d48 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -750,7 +750,9 @@ static void add_scan_area(unsigned long ptr, size_t size, 
gfp_t gfp)
+       }
+ 
+       spin_lock_irqsave(&object->lock, flags);
+-      if (ptr + size > object->pointer + object->size) {
++      if (size == SIZE_MAX) {
++              size = object->pointer + object->size - ptr;
++      } else if (ptr + size > object->pointer + object->size) {
+               kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
+               dump_object_info(object);
+               kmem_cache_free(scan_area_cache, area);
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 1196c7728ede..ad9d90064a4b 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -349,6 +349,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
+       if (unlikely(!va))
+               return ERR_PTR(-ENOMEM);
+ 
++      /*
++       * Only scan the relevant parts containing pointers to other objects
++       * to avoid false negatives.
++       */
++      kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
++
+ retry:
+       spin_lock(&vmap_area_lock);
+       /*
+@@ -1669,11 +1675,11 @@ void *__vmalloc_node_range(unsigned long size, 
unsigned long align,
+       insert_vmalloc_vmlist(area);
+ 
+       /*
+-       * A ref_count = 3 is needed because the vm_struct and vmap_area
+-       * structures allocated in the __get_vm_area_node() function contain
+-       * references to the virtual address of the vmalloc'ed block.
++       * A ref_count = 2 is needed because vm_struct allocated in
++       * __get_vm_area_node() contains a reference to the virtual address of
++       * the vmalloc'ed block.
+        */
+-      kmemleak_alloc(addr, real_size, 3, gfp_mask);
++      kmemleak_alloc(addr, real_size, 2, gfp_mask);
+ 
+       return addr;
+ 

Reply via email to