diff --git a/Makefile b/Makefile
index 5adfadf..810572b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 4
-SUBLEVEL = 51
+SUBLEVEL = 52
 EXTRAVERSION =
 NAME = Saber-toothed Squirrel
 
diff --git a/arch/arm/include/asm/cacheflush.h 
b/arch/arm/include/asm/cacheflush.h
index 42dec04..0a5e8a5 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -305,9 +305,7 @@ static inline void flush_anon_page(struct vm_area_struct 
*vma,
 }
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
 
 #define flush_dcache_mmap_lock(mapping) \
        spin_lock_irq(&(mapping)->tree_lock)
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 40ca11e..8f0d285 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -299,6 +299,39 @@ void flush_dcache_page(struct page *page)
 EXPORT_SYMBOL(flush_dcache_page);
 
 /*
+ * Ensure cache coherency for the kernel mapping of this page. We can
+ * assume that the page is pinned via kmap.
+ *
+ * If the page only exists in the page cache and there are no user
+ * space mappings, this is a no-op since the page was already marked
+ * dirty at creation.  Otherwise, we need to flush the dirty kernel
+ * cache lines directly.
+ */
+void flush_kernel_dcache_page(struct page *page)
+{
+       if (cache_is_vivt() || cache_is_vipt_aliasing()) {
+               struct address_space *mapping;
+
+               mapping = page_mapping(page);
+
+               if (!mapping || mapping_mapped(mapping)) {
+                       void *addr;
+
+                       addr = page_address(page);
+                       /*
+                        * kmap_atomic() doesn't set the page virtual
+                        * address for highmem pages, and
+                        * kunmap_atomic() takes care of cache
+                        * flushing already.
+                        */
+                       if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+               }
+       }
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
+/*
  * Flush an anonymous page so that users of get_user_pages()
  * can safely access the data.  The expected sequence is:
  *
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index d51225f..eb5293a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+void flush_kernel_dcache_page(struct page *page)
+{
+       __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long uaddr, void *dst, const void *src,
                       unsigned long len)
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 147614e..6a8a382 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci)
        struct frad_local       *flp;
        struct net_device       *master, *slave;
        int                     err;
+       bool                    found = false;
+
+       rtnl_lock();
 
        /* validate slave device */
        master = __dev_get_by_name(&init_net, dlci->devname);
-       if (!master)
-               return -ENODEV;
+       if (!master) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       list_for_each_entry(dlp, &dlci_devs, list) {
+               if (dlp->master == master) {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found) {
+               err = -ENODEV;
+               goto out;
+       }
 
        if (netif_running(master)) {
-               return -EBUSY;
+               err = -EBUSY;
+               goto out;
        }
 
        dlp = netdev_priv(master);
        slave = dlp->slave;
        flp = netdev_priv(slave);
 
-       rtnl_lock();
        err = (*flp->deassoc)(slave, master);
        if (!err) {
                list_del(&dlp->list);
@@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci)
 
                dev_put(slave);
        }
+out:
        rtnl_unlock();
-
        return err;
 }
 
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 7d47514..2e99f79 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1034,22 +1034,37 @@ static unsigned int dma_handle_tx(struct eg20t_port 
*priv)
 static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr)
 {
        u8 fcr = ioread8(priv->membase + UART_FCR);
+       struct uart_port *port = &priv->port;
+       struct tty_struct *tty = tty_port_tty_get(&port->state->port);
+       char   *error_msg[5] = {};
+       int    i = 0;
 
        /* Reset FIFO */
        fcr |= UART_FCR_CLEAR_RCVR;
        iowrite8(fcr, priv->membase + UART_FCR);
 
        if (lsr & PCH_UART_LSR_ERR)
-               dev_err(&priv->pdev->dev, "Error data in FIFO\n");
+               error_msg[i++] = "Error data in FIFO\n";
 
-       if (lsr & UART_LSR_FE)
-               dev_err(&priv->pdev->dev, "Framing Error\n");
+       if (lsr & UART_LSR_FE) {
+               port->icount.frame++;
+               error_msg[i++] = "  Framing Error\n";
+       }
 
-       if (lsr & UART_LSR_PE)
-               dev_err(&priv->pdev->dev, "Parity Error\n");
+       if (lsr & UART_LSR_PE) {
+               port->icount.parity++;
+               error_msg[i++] = "  Parity Error\n";
+       }
 
-       if (lsr & UART_LSR_OE)
-               dev_err(&priv->pdev->dev, "Overrun Error\n");
+       if (lsr & UART_LSR_OE) {
+               port->icount.overrun++;
+               error_msg[i++] = "  Overrun Error\n";
+       }
+
+       if (tty == NULL) {
+               for (i = 0; error_msg[i] != NULL; i++)
+                       dev_err(&priv->pdev->dev, error_msg[i]);
+       }
 }
 
 static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
diff --git a/fs/exec.c b/fs/exec.c
index 2b7f5ff..0ea0b4c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1163,13 +1163,6 @@ void setup_new_exec(struct linux_binprm * bprm)
                        set_dumpable(current->mm, suid_dumpable);
        }
 
-       /*
-        * Flush performance counters when crossing a
-        * security domain:
-        */
-       if (!get_dumpable(current->mm))
-               perf_event_exit_task(current);
-
        /* An exec changes our domain. We are no longer part of the thread
           group */
 
@@ -1233,6 +1226,15 @@ void install_exec_creds(struct linux_binprm *bprm)
 
        commit_creds(bprm->cred);
        bprm->cred = NULL;
+
+       /*
+        * Disable monitoring for regular users
+        * when executing setuid binaries. Must
+        * wait until new credentials are committed
+        * by commit_creds() above
+        */
+       if (get_dumpable(current->mm) != SUID_DUMP_USER)
+               perf_event_exit_task(current);
        /*
         * cred_guard_mutex must be held at least to this point to prevent
         * ptrace_attach() from altering our determination of the task's
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 8640a12..25c472b 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type)
 static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 {
        int err, over = 0;
+       loff_t pos = file->f_pos;
        struct qstr nm;
        union ubifs_key key;
        struct ubifs_dent_node *dent;
        struct inode *dir = file->f_path.dentry->d_inode;
        struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-       dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+       dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
 
-       if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
+       if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
                /*
                 * The directory was seek'ed to a senseless position or there
                 * are no more entries.
                 */
                return 0;
 
+       if (file->f_version == 0) {
+               /*
+                * The file was seek'ed, which means that @file->private_data
+                * is now invalid. This may also be just the first
+                * 'ubifs_readdir()' invocation, in which case
+                * @file->private_data is NULL, and the below code is
+                * basically a no-op.
+                */
+               kfree(file->private_data);
+               file->private_data = NULL;
+       }
+
+       /*
+        * 'generic_file_llseek()' unconditionally sets @file->f_version to
+        * zero, and we use this for detecting whether the file was seek'ed.
+        */
+       file->f_version = 1;
+
        /* File positions 0 and 1 correspond to "." and ".." */
-       if (file->f_pos == 0) {
+       if (pos == 0) {
                ubifs_assert(!file->private_data);
                over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
                if (over)
                        return 0;
-               file->f_pos = 1;
+               file->f_pos = pos = 1;
        }
 
-       if (file->f_pos == 1) {
+       if (pos == 1) {
                ubifs_assert(!file->private_data);
                over = filldir(dirent, "..", 2, 1,
                               parent_ino(file->f_path.dentry), DT_DIR);
@@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, 
filldir_t filldir)
                        goto out;
                }
 
-               file->f_pos = key_hash_flash(c, &dent->key);
+               file->f_pos = pos = key_hash_flash(c, &dent->key);
                file->private_data = dent;
        }
 
@@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, 
filldir_t filldir)
        if (!dent) {
                /*
                 * The directory was seek'ed to and is now readdir'ed.
-                * Find the entry corresponding to @file->f_pos or the
-                * closest one.
+                * Find the entry corresponding to @pos or the closest one.
                 */
-               dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
+               dent_key_init_hash(c, &key, dir->i_ino, pos);
                nm.name = NULL;
                dent = ubifs_tnc_next_ent(c, &key, &nm);
                if (IS_ERR(dent)) {
                        err = PTR_ERR(dent);
                        goto out;
                }
-               file->f_pos = key_hash_flash(c, &dent->key);
+               file->f_pos = pos = key_hash_flash(c, &dent->key);
                file->private_data = dent;
        }
 
@@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, 
filldir_t filldir)
                             ubifs_inode(dir)->creat_sqnum);
 
                nm.len = le16_to_cpu(dent->nlen);
-               over = filldir(dirent, dent->name, nm.len, file->f_pos,
+               over = filldir(dirent, dent->name, nm.len, pos,
                               le64_to_cpu(dent->inum),
                               vfs_dent_type(dent->type));
                if (over)
@@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, 
filldir_t filldir)
                }
 
                kfree(file->private_data);
-               file->f_pos = key_hash_flash(c, &dent->key);
+               file->f_pos = pos = key_hash_flash(c, &dent->key);
                file->private_data = dent;
                cond_resched();
+
+               if (file->f_version == 0)
+                       /*
+                        * The file was seek'ed meanwhile, lets return and start
+                        * reading direntries from the new position on the next
+                        * invocation.
+                        */
+                       return 0;
        }
 
 out:
@@ -456,15 +482,13 @@ out:
 
        kfree(file->private_data);
        file->private_data = NULL;
+       /* 2 is a special value indicating that there are no more direntries */
        file->f_pos = 2;
        return 0;
 }
 
-/* If a directory is seeked, we have to free saved readdir() state */
 static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin)
 {
-       kfree(file->private_data);
-       file->private_data = NULL;
        return generic_file_llseek(file, offset, origin);
 }
 
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8e9a069..89fb74e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -950,8 +950,7 @@ struct perf_event {
        /* mmap bits */
        struct mutex                    mmap_mutex;
        atomic_t                        mmap_count;
-       int                             mmap_locked;
-       struct user_struct              *mmap_user;
+
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 839a24f..7ceb270 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -193,9 +193,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context 
*cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
-static void ring_buffer_attach(struct perf_event *event,
-                              struct ring_buffer *rb);
-
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2849,6 +2846,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer 
*rb);
 
 static void free_event(struct perf_event *event)
 {
@@ -2873,15 +2871,30 @@ static void free_event(struct perf_event *event)
                if (has_branch_stack(event)) {
                        static_key_slow_dec_deferred(&perf_sched_events);
                        /* is system-wide event */
-                       if (!(event->attach_state & PERF_ATTACH_TASK))
+                       if (!(event->attach_state & PERF_ATTACH_TASK)) {
                                atomic_dec(&per_cpu(perf_branch_stack_events,
                                                    event->cpu));
+                       }
                }
        }
 
        if (event->rb) {
-               ring_buffer_put(event->rb);
-               event->rb = NULL;
+               struct ring_buffer *rb;
+
+               /*
+                * Can happen when we close an event with re-directed output.
+                *
+                * Since we have a 0 refcount, perf_mmap_close() will skip
+                * over us; possibly making our ring_buffer_put() the last.
+                */
+               mutex_lock(&event->mmap_mutex);
+               rb = event->rb;
+               if (rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* could be last */
+               }
+               mutex_unlock(&event->mmap_mutex);
        }
 
        if (is_cgroup_event(event))
@@ -3119,30 +3132,13 @@ static unsigned int perf_poll(struct file *file, 
poll_table *wait)
        unsigned int events = POLL_HUP;
 
        /*
-        * Race between perf_event_set_output() and perf_poll(): perf_poll()
-        * grabs the rb reference but perf_event_set_output() overrides it.
-        * Here is the timeline for two threads T1, T2:
-        * t0: T1, rb = rcu_dereference(event->rb)
-        * t1: T2, old_rb = event->rb
-        * t2: T2, event->rb = new rb
-        * t3: T2, ring_buffer_detach(old_rb)
-        * t4: T1, ring_buffer_attach(rb1)
-        * t5: T1, poll_wait(event->waitq)
-        *
-        * To avoid this problem, we grab mmap_mutex in perf_poll()
-        * thereby ensuring that the assignment of the new ring buffer
-        * and the detachment of the old buffer appear atomic to perf_poll()
+        * Pin the event->rb by taking event->mmap_mutex; otherwise
+        * perf_event_set_output() can swizzle our rb and make us miss wakeups.
         */
        mutex_lock(&event->mmap_mutex);
-
-       rcu_read_lock();
-       rb = rcu_dereference(event->rb);
-       if (rb) {
-               ring_buffer_attach(event, rb);
+       rb = event->rb;
+       if (rb)
                events = atomic_xchg(&rb->poll, 0);
-       }
-       rcu_read_unlock();
-
        mutex_unlock(&event->mmap_mutex);
 
        poll_wait(file, &event->waitq, wait);
@@ -3459,16 +3455,12 @@ static void ring_buffer_attach(struct perf_event *event,
                return;
 
        spin_lock_irqsave(&rb->event_lock, flags);
-       if (!list_empty(&event->rb_entry))
-               goto unlock;
-
-       list_add(&event->rb_entry, &rb->event_list);
-unlock:
+       if (list_empty(&event->rb_entry))
+               list_add(&event->rb_entry, &rb->event_list);
        spin_unlock_irqrestore(&rb->event_lock, flags);
 }
 
-static void ring_buffer_detach(struct perf_event *event,
-                              struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer 
*rb)
 {
        unsigned long flags;
 
@@ -3487,13 +3479,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (!rb)
-               goto unlock;
-
-       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
-               wake_up_all(&event->waitq);
-
-unlock:
+       if (rb) {
+               list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+                       wake_up_all(&event->waitq);
+       }
        rcu_read_unlock();
 }
 
@@ -3522,18 +3511,10 @@ static struct ring_buffer *ring_buffer_get(struct 
perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
-       struct perf_event *event, *n;
-       unsigned long flags;
-
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
-               list_del_init(&event->rb_entry);
-               wake_up_all(&event->waitq);
-       }
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       WARN_ON_ONCE(!list_empty(&rb->event_list));
 
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
@@ -3543,26 +3524,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
        struct perf_event *event = vma->vm_file->private_data;
 
        atomic_inc(&event->mmap_count);
+       atomic_inc(&event->rb->mmap_count);
 }
 
+/*
+ * A buffer can be mmap()ed multiple times; either directly through the same
+ * event, or through other events by use of perf_event_set_output().
+ *
+ * In order to undo the VM accounting done by perf_mmap() we need to destroy
+ * the buffer here, where we still have a VM context. This means we need
+ * to detach all events redirecting to us.
+ */
 static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
-               unsigned long size = perf_data_size(event->rb);
-               struct user_struct *user = event->mmap_user;
-               struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = event->rb;
+       struct user_struct *mmap_user = rb->mmap_user;
+       int mmap_locked = rb->mmap_locked;
+       unsigned long size = perf_data_size(rb);
+
+       atomic_dec(&rb->mmap_count);
+
+       if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+               return;
+
+       /* Detach current event from the buffer. */
+       rcu_assign_pointer(event->rb, NULL);
+       ring_buffer_detach(event, rb);
+       mutex_unlock(&event->mmap_mutex);
+
+       /* If there's still other mmap()s of this buffer, we're done. */
+       if (atomic_read(&rb->mmap_count)) {
+               ring_buffer_put(rb); /* can't be last */
+               return;
+       }
+
+       /*
+        * No other mmap()s, detach from all other events that might redirect
+        * into the now unreachable buffer. Somewhat complicated by the
+        * fact that rb::event_lock otherwise nests inside mmap_mutex.
+        */
+again:
+       rcu_read_lock();
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+               if (!atomic_long_inc_not_zero(&event->refcount)) {
+                       /*
+                        * This event is en-route to free_event() which will
+                        * detach it and remove it from the list.
+                        */
+                       continue;
+               }
+               rcu_read_unlock();
 
-               atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-               vma->vm_mm->pinned_vm -= event->mmap_locked;
-               rcu_assign_pointer(event->rb, NULL);
-               ring_buffer_detach(event, rb);
+               mutex_lock(&event->mmap_mutex);
+               /*
+                * Check we didn't race with perf_event_set_output() which can
+                * swizzle the rb from under us while we were waiting to
+                * acquire mmap_mutex.
+                *
+                * If we find a different rb; ignore this event, a next
+                * iteration will no longer find it on the list. We have to
+                * still restart the iteration to make sure we're not now
+                * iterating the wrong list.
+                */
+               if (event->rb == rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* can't be last, we still have 
one */
+               }
                mutex_unlock(&event->mmap_mutex);
+               put_event(event);
 
-               ring_buffer_put(rb);
-               free_uid(user);
+               /*
+                * Restart the iteration; either we're on the wrong list or
+                * destroyed its integrity by doing a deletion.
+                */
+               goto again;
        }
+       rcu_read_unlock();
+
+       /*
+        * It could be there's still a few 0-ref events on the list; they'll
+        * get cleaned up by free_event() -- they'll also still have their
+        * ref on the rb and will free it whenever they are done with it.
+        *
+        * Aside from that, this buffer is 'fully' detached and unmapped,
+        * undo the VM accounting.
+        */
+
+       atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+       vma->vm_mm->pinned_vm -= mmap_locked;
+       free_uid(mmap_user);
+
+       ring_buffer_put(rb); /* could be last */
 }
 
 static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3612,12 +3667,24 @@ static int perf_mmap(struct file *file, struct 
vm_area_struct *vma)
                return -EINVAL;
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
+again:
        mutex_lock(&event->mmap_mutex);
        if (event->rb) {
-               if (event->rb->nr_pages == nr_pages)
-                       atomic_inc(&event->rb->refcount);
-               else
+               if (event->rb->nr_pages != nr_pages) {
                        ret = -EINVAL;
+                       goto unlock;
+               }
+
+               if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
+                       /*
+                        * Raced against perf_mmap_close() through
+                        * perf_event_set_output(). Try again, hope for better
+                        * luck.
+                        */
+                       mutex_unlock(&event->mmap_mutex);
+                       goto again;
+               }
+
                goto unlock;
        }
 
@@ -3658,12 +3725,16 @@ static int perf_mmap(struct file *file, struct 
vm_area_struct *vma)
                ret = -ENOMEM;
                goto unlock;
        }
-       rcu_assign_pointer(event->rb, rb);
+
+       atomic_set(&rb->mmap_count, 1);
+       rb->mmap_locked = extra;
+       rb->mmap_user = get_current_user();
 
        atomic_long_add(user_extra, &user->locked_vm);
-       event->mmap_locked = extra;
-       event->mmap_user = get_current_user();
-       vma->vm_mm->pinned_vm += event->mmap_locked;
+       vma->vm_mm->pinned_vm += extra;
+
+       ring_buffer_attach(event, rb);
+       rcu_assign_pointer(event->rb, rb);
 
        perf_event_update_userpage(event);
 
@@ -3672,7 +3743,11 @@ unlock:
                atomic_inc(&event->mmap_count);
        mutex_unlock(&event->mmap_mutex);
 
-       vma->vm_flags |= VM_RESERVED;
+       /*
+        * Since pinned accounting is per vm we cannot allow fork() to copy our
+        * vma.
+        */
+       vma->vm_flags |= VM_DONTCOPY | VM_RESERVED;
        vma->vm_ops = &perf_mmap_vmops;
 
        return ret;
@@ -6161,6 +6236,8 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
+       old_rb = event->rb;
+
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6168,16 +6245,28 @@ set:
                        goto unlock;
        }
 
-       old_rb = event->rb;
-       rcu_assign_pointer(event->rb, rb);
        if (old_rb)
                ring_buffer_detach(event, old_rb);
+
+       if (rb)
+               ring_buffer_attach(event, rb);
+
+       rcu_assign_pointer(event->rb, rb);
+
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
+
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
 
-       if (old_rb)
-               ring_buffer_put(old_rb);
 out:
        return ret;
 }
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index bb38c4d..fc8bfcf 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct 
perf_event *bp,
                return;
        }
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                unsigned int nr;
 
                nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum 
bp_type_idx type,
        if (cpu >= 0) {
                toggle_bp_task_slot(bp, cpu, enable, type, weight);
        } else {
-               for_each_online_cpu(cpu)
+               for_each_possible_cpu(cpu)
                        toggle_bp_task_slot(bp, cpu, enable, type, weight);
        }
 
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index b0b107f..b400e64 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -30,6 +30,10 @@ struct ring_buffer {
        spinlock_t                      event_lock;
        struct list_head                event_list;
 
+       atomic_t                        mmap_count;
+       unsigned long                   mmap_locked;
+       struct user_struct              *mmap_user;
+
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
 };
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index fa07aed..932420d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1880,6 +1880,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn 
*conn,
        BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
                        conn, code, ident, dlen);
 
+       if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
+               return NULL;
+
        len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
        count = min_t(unsigned int, conn->mtu, len);
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to