On Mon, Jan 19, 2009 at 03:20:31PM -0800, Roland McGrath wrote:
> Thanks for working on this, Ananth.  (Btw, it's "embed.")
> 
> I think it would be less disruptive (and materially no different)
> to leave utrace_flags as it is.  That field is the one (and only)
> that is used in hot paths (or used anywhere outside utrace.c).
> It might in future get moved around to stay in a cache-hot part
> of task_struct, for example.
> 
> The long comment above struct utrace is really all about implementation
> details inside utrace.c and I don't think you should move that commentary
> to the header file.  Instead, put a comment saying that the contents of
> struct utrace and their use is entirely private to kernel/utrace.c and it
> is only defined in the header to make its size known for struct task_struct
> layout (and init_task.h).
> 
> I committed some cosmetic changes that will make for a little less flutter
> in your patch.

Here is V2 of the patch. Tested and works fine. Same two tests on
powerpc fail, all tests pass on x86, while there are some occurances of
the ptrace.c WARN_ON.

Roland,
I've tried to tweak the comments appropriately. Please feel free to
modify them as you consider fit.

Signed-off-by: Ananth N Mavinakayanahalli <ana...@in.ibm.com>

---
 include/linux/sched.h     |    3 
 include/linux/tracehook.h |   16 --
 include/linux/utrace.h    |   48 ++++--
 kernel/ptrace.c           |   11 +
 kernel/utrace.c           |  331 +++++++++++-----------------------------------
 5 files changed, 126 insertions(+), 283 deletions(-)

Index: utrace-20jan/include/linux/sched.h
===================================================================
--- utrace-20jan.orig/include/linux/sched.h
+++ utrace-20jan/include/linux/sched.h
@@ -88,6 +88,7 @@ struct sched_param {
 #include <linux/kobject.h>
 #include <linux/latencytop.h>
 #include <linux/cred.h>
+#include <linux/utrace.h>
 
 #include <asm/processor.h>
 
@@ -1267,7 +1268,7 @@ struct task_struct {
        seccomp_t seccomp;
 
 #ifdef CONFIG_UTRACE
-       struct utrace *utrace;
+       struct utrace utrace;
        unsigned long utrace_flags;
 #endif
 
Index: utrace-20jan/include/linux/utrace.h
===================================================================
--- utrace-20jan.orig/include/linux/utrace.h
+++ utrace-20jan/include/linux/utrace.h
@@ -33,13 +33,37 @@
 #include <linux/list.h>
 #include <linux/kref.h>
 #include <linux/signal.h>
-#include <linux/sched.h>
+#include <linux/pid.h>
 
 struct linux_binprm;
+struct linux_binfmt;
 struct pt_regs;
-struct utrace;
+struct task_struct;
 struct user_regset;
 struct user_regset_view;
+struct seq_file;
+
+/*
+ * Per-thread structure task_struct.utrace refers to.
+ *
+ * The structure and its contents are private to kernel/utrace.c and is
+ * defined here only so its size is known for struct task_struct layout
+ */
+struct utrace {
+       struct task_struct *cloning;
+       struct list_head attached, attaching;
+       spinlock_t lock;
+
+       struct utrace_attached_engine *reporting;
+
+       unsigned int stopped:1;
+       unsigned int report:1;
+       unsigned int interrupt:1;
+       unsigned int signal_handler:1;
+       unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */
+       unsigned int death:1;   /* in utrace_report_death() now */
+       unsigned int reap:1;    /* release_task() has run */
+};
 
 /*
  * Event bits passed to utrace_set_events().
@@ -133,7 +157,7 @@ static inline struct utrace *task_utrace
 {
        return NULL;
 }
-static inline void utrace_init_task(struct task_struct *child)
+static inline void utrace_init_task(struct task_struct *task)
 {
 }
 
@@ -144,22 +168,10 @@ static inline void task_utrace_proc_stat
 
 #else  /* CONFIG_UTRACE */
 
-static inline unsigned long task_utrace_flags(struct task_struct *task)
-{
-       return task->utrace_flags;
-}
-
-static inline struct utrace *task_utrace_struct(struct task_struct *task)
-{
-       return task->utrace;
-}
-
-static inline void utrace_init_task(struct task_struct *child)
-{
-       child->utrace_flags = 0;
-       child->utrace = NULL;
-}
+#define task_utrace_flags(task)                ((task)->utrace_flags)
+#define task_utrace_struct(task)       (&(task)->utrace)
 
+void utrace_init_task(struct task_struct *task);
 void task_utrace_proc_status(struct seq_file *m, struct task_struct *p);
 
 /**
Index: utrace-20jan/kernel/utrace.c
===================================================================
--- utrace-20jan.orig/kernel/utrace.c
+++ utrace-20jan/kernel/utrace.c
@@ -10,103 +10,56 @@
  * Red Hat Author: Roland McGrath.
  */
 
-#include <linux/utrace.h>
+#include <linux/sched.h>
 #include <linux/tracehook.h>
 #include <linux/regset.h>
 #include <asm/syscall.h>
 #include <linux/ptrace.h>
 #include <linux/err.h>
-#include <linux/sched.h>
 #include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
+#include <linux/utrace.h>
 
 
 /*
- * Per-thread structure task_struct.utrace points to.
+ * struct utrace, defined in utrace.h is private to this file. Its
+ * defined there just so struct task_struct knows its size.
  *
- * The task itself never has to worry about this going away after
- * some event is found set in task_struct.utrace_flags.
- * Once created, this pointer is changed only when the task is quiescent
- * (TASK_TRACED or TASK_STOPPED with the siglock held, or dead).
- *
- * For other parties, the pointer to this is protected by RCU and
- * task_lock.  Since call_rcu is never used while the thread is alive and
- * using this struct utrace, we can overlay the RCU data structure used
- * only for a dead struct with some local state used only for a live utrace
- * on an active thread.
- *
- * The two lists @attached and @attaching work together for smooth
- * asynchronous attaching with low overhead.  Modifying either list
- * requires @lock.  The @attaching list can be modified any time while
- * holding @lock.  New engines being attached always go on this list.
- *
- * The @attached list is what the task itself uses for its reporting
- * loops.  When the task itself is not quiescent, it can use the
- * @attached list without taking any lock.  Noone may modify the list
- * when the task is not quiescent.  When it is quiescent, that means
- * that it won't run again without taking @lock itself before using
- * the list.
+ * The two lists @utrace->attached and @utrace->attaching work together
+ * for smooth asynchronous attaching with low overhead.  Modifying
+ * either list requires @utrace->lock.  The @utrace->attaching list
+ * can be modified any time while holding @utrace->lock.  New engines
+ * being attached always go on this list.
+ *
+ * The @utrace->attached list is what the task itself uses for its
+ * reporting loops.  When the task itself is not quiescent, it can
+ * use the @utrace->attached list without taking any lock.  Noone
+ * may modify the list when the task is not quiescent.  When it is
+ * quiescent, that means that it won't run again without taking
+ * @utrace->lock itself before using the list.
  *
  * At each place where we know the task is quiescent (or it's current),
- * while holding @lock, we call splice_attaching(), below.  This moves
- * the @attaching list members on to the end of the @attached list.
- * Since this happens at the start of any reporting pass, any new
- * engines attached asynchronously go on the stable @attached list
- * in time to have their callbacks seen.
- */
-struct utrace {
-       union {
-               struct rcu_head dead;
-               struct {
-                       struct task_struct *cloning;
-               } live;
-       } u;
-
-       struct list_head attached, attaching;
-       spinlock_t lock;
-
-       struct utrace_attached_engine *reporting;
-
-       unsigned int stopped:1;
-       unsigned int report:1;
-       unsigned int interrupt:1;
-       unsigned int signal_handler:1;
-       unsigned int vfork_stop:1; /* need utrace_stop() before vfork wait */
-       unsigned int death:1;   /* in utrace_report_death() now */
-       unsigned int reap:1;    /* release_task() has run */
-};
+ * while holding @utrace->lock, we call splice_attaching(), below.
+ * This moves the @utrace->attaching list members on to the end of
+ * the @utrace->attached list. Since this happens at the start of
+ * any reporting pass, any new engines attached asynchronously go
+ * on the stable @utrace->attached list in time to have their
+ * callbacks seen.
+ */
 
-static struct kmem_cache *utrace_cachep;
 static struct kmem_cache *utrace_engine_cachep;
 static const struct utrace_engine_ops utrace_detached_ops; /* forward decl */
 
 static int __init utrace_init(void)
 {
-       utrace_cachep = KMEM_CACHE(utrace, SLAB_PANIC);
        utrace_engine_cachep = KMEM_CACHE(utrace_attached_engine, SLAB_PANIC);
        return 0;
 }
 module_init(utrace_init);
 
-static void utrace_free(struct rcu_head *rhead)
-{
-       struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
-       kmem_cache_free(utrace_cachep, utrace);
-}
-
-/*
- * Called with utrace locked.  Clean it up and free it via RCU.
- */
-static void rcu_utrace_free(struct utrace *utrace)
-       __releases(utrace->lock)
-{
-       spin_unlock(&utrace->lock);
-       call_rcu(&utrace->u.dead, utrace_free);
-}
-
 /*
  * This is called with @utrace->lock held when the task is safely
  * quiescent, i.e. it won't consult utrace->attached without the lock.
@@ -172,8 +125,12 @@ static inline bool exclude_utrace(struct
 /*
  * Initialize the struct, initially zero'd.
  */
-static inline void init_utrace_struct(struct utrace *utrace)
+void utrace_init_task(struct task_struct *task)
 {
+       struct utrace *utrace = task_utrace_struct(task);
+
+       task->utrace_flags = 0;
+       utrace->cloning = NULL;
        INIT_LIST_HEAD(&utrace->attached);
        INIT_LIST_HEAD(&utrace->attaching);
        spin_lock_init(&utrace->lock);
@@ -181,8 +138,6 @@ static inline void init_utrace_struct(st
 
 /*
  * Called without locks.
- * Allocate target->utrace and install engine in it.  If we lose a race in
- * setting it up, return -EAGAIN.  This function mediates startup races.
  * The creating parent task has priority, and other callers will delay here
  * to let its call succeed and take the new utrace lock first.
  */
@@ -199,8 +154,8 @@ static int utrace_first_engine(struct ta
         * report_clone hook has had a chance to run.
         */
        if (target->flags & PF_STARTING) {
-               utrace = current->utrace;
-               if (!utrace || utrace->u.live.cloning != target) {
+               utrace = task_utrace_struct(current);
+               if (utrace->cloning != target) {
                        yield();
                        if (signal_pending(current))
                                return -ERESTARTNOINTR;
@@ -208,11 +163,7 @@ static int utrace_first_engine(struct ta
                }
        }
 
-       utrace = kmem_cache_zalloc(utrace_cachep, GFP_KERNEL);
-       if (unlikely(!utrace))
-               return -ENOMEM;
-       init_utrace_struct(utrace);
-
+       utrace = task_utrace_struct(target);
        list_add(&engine->entry, &utrace->attached);
 
        ret = -EAGAIN;
@@ -220,9 +171,7 @@ static int utrace_first_engine(struct ta
        task_lock(target);
        if (exclude_utrace(target)) {
                ret = -EBUSY;
-       } else if (likely(!target->utrace)) {
-               rcu_assign_pointer(target->utrace, utrace);
-
+       } else {
                /*
                 * The task_lock protects us against another thread doing
                 * the same thing.  We might still be racing against
@@ -240,30 +189,20 @@ static int utrace_first_engine(struct ta
                        spin_unlock(&utrace->lock);
                        return 0;
                }
-
-               /*
-                * The target has already been through release_task.
-                * Our caller will restart and notice it's too late now.
-                */
-               target->utrace = NULL;
        }
 
        /*
-        * Another engine attached first, so there is a struct already.
-        * A null return says to restart looking for the existing one.
+        * Another engine attached first.
+        * Restart looking for the existing one.
         */
        task_unlock(target);
        spin_unlock(&utrace->lock);
-       kmem_cache_free(utrace_cachep, utrace);
 
        return ret;
 }
 
 /*
- * Called with rcu_read_lock() held.
- * Lock utrace and verify that it's still installed in target->utrace.
- * If not, return -EAGAIN.
- * Then enqueue engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE.
+ * Enqueue engine, or maybe don't if UTRACE_ATTACH_EXCLUSIVE.
  */
 static int utrace_second_engine(struct task_struct *target,
                                struct utrace *utrace,
@@ -276,13 +215,7 @@ static int utrace_second_engine(struct t
 
        spin_lock(&utrace->lock);
 
-       if (unlikely(rcu_dereference(target->utrace) != utrace)) {
-               /*
-                * We lost a race with other CPUs doing a sequence
-                * of detach and attach before we got in.
-                */
-               ret = -EAGAIN;
-       } else if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
+       if ((flags & UTRACE_ATTACH_EXCLUSIVE) &&
                   unlikely(matching_engine(utrace, flags, ops, data))) {
                ret = -EEXIST;
        } else {
@@ -344,18 +277,15 @@ struct utrace_attached_engine *utrace_at
 {
        struct utrace *utrace;
        struct utrace_attached_engine *engine;
-       int ret;
+       int ret = 0;
 
 restart:
-       rcu_read_lock();
-       utrace = rcu_dereference(target->utrace);
-       smp_rmb();
+       utrace = task_utrace_struct(target);
        if (unlikely(target->exit_state == EXIT_DEAD)) {
                /*
                 * The target has already been reaped.
                 * Check this first; a race with reaping may lead to restart.
                 */
-               rcu_read_unlock();
                if (!(flags & UTRACE_ATTACH_CREATE))
                        return ERR_PTR(-ENOENT);
                return ERR_PTR(-ESRCH);
@@ -363,19 +293,14 @@ restart:
 
        if (!(flags & UTRACE_ATTACH_CREATE)) {
                engine = NULL;
-               if (utrace) {
-                       spin_lock(&utrace->lock);
-                       engine = matching_engine(utrace, flags, ops, data);
-                       if (engine)
-                               utrace_engine_get(engine);
-                       spin_unlock(&utrace->lock);
-               }
-               rcu_read_unlock();
+               spin_lock(&utrace->lock);
+               engine = matching_engine(utrace, flags, ops, data);
+               if (engine)
+                       utrace_engine_get(engine);
+               spin_unlock(&utrace->lock);
                return engine ?: ERR_PTR(-ENOENT);
        }
 
-       rcu_read_unlock();
-
        if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops))
                return ERR_PTR(-EINVAL);
 
@@ -398,15 +323,12 @@ restart:
        engine->ops = ops;
        engine->data = data;
 
-       rcu_read_lock();
-       utrace = rcu_dereference(target->utrace);
-       if (!utrace) {
-               rcu_read_unlock();
+       if ((ret == 0) && (list_empty(&utrace->attached))) {
+               /* First time here, set engines up */
                ret = utrace_first_engine(target, engine);
        } else {
                ret = utrace_second_engine(target, utrace, engine,
                                           flags, ops, data);
-               rcu_read_unlock();
        }
 
        if (unlikely(ret)) {
@@ -555,28 +477,23 @@ static bool utrace_stop(struct task_stru
        try_to_freeze();
 
        killed = false;
-       rcu_read_lock();
-       utrace = rcu_dereference(task->utrace);
-       if (utrace) {
+       /*
+        * utrace_wakeup() clears @utrace->stopped before waking us up.
+        * We're officially awake if it's clear.
+        */
+       spin_lock(&utrace->lock);
+       if (unlikely(utrace->stopped)) {
                /*
-                * utrace_wakeup() clears @utrace->stopped before waking us up.
-                * We're officially awake if it's clear.
+                * If we're here with it still set, it must have been
+                * signal_wake_up() instead, waking us up for a SIGKILL.
                 */
-               spin_lock(&utrace->lock);
-               if (unlikely(utrace->stopped)) {
-                       /*
-                        * If we're here with it still set, it must have been
-                        * signal_wake_up() instead, waking us up for a SIGKILL.
-                        */
-                       spin_lock_irq(&task->sighand->siglock);
-                       WARN_ON(!sigismember(&task->pending.signal, SIGKILL));
-                       spin_unlock_irq(&task->sighand->siglock);
-                       utrace->stopped = 0;
-                       killed = true;
-               }
-               spin_unlock(&utrace->lock);
+               spin_lock_irq(&task->sighand->siglock);
+               WARN_ON(!sigismember(&task->pending.signal, SIGKILL));
+               spin_unlock_irq(&task->sighand->siglock);
+               utrace->stopped = 0;
+               killed = true;
        }
-       rcu_read_unlock();
+       spin_unlock(&utrace->lock);
 
        /*
         * While we were in TASK_TRACED, complete_signal() considered
@@ -613,6 +530,7 @@ static struct utrace *get_utrace_lock(st
        __acquires(utrace->lock)
 {
        struct utrace *utrace;
+       int ret = 0;
 
        /*
         * You must hold a ref to be making a call.  A call from within
@@ -644,33 +562,34 @@ static struct utrace *get_utrace_lock(st
                return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS);
        }
 
-       utrace = rcu_dereference(target->utrace);
+       utrace = task_utrace_struct(target);
        smp_rmb();
-       if (unlikely(!utrace) || unlikely(target->exit_state == EXIT_DEAD)) {
+       if (unlikely(target->exit_state == EXIT_DEAD)) {
                /*
                 * If all engines detached already, utrace is clear.
                 * Otherwise, we're called after utrace_release_task might
                 * have started.  A call to this engine's report_reap
                 * callback might already be in progress.
                 */
-               utrace = ERR_PTR(-ESRCH);
+               ret = -ESRCH;
        } else {
                spin_lock(&utrace->lock);
-               if (unlikely(rcu_dereference(target->utrace) != utrace) ||
-                   unlikely(!engine->ops) ||
+               if (unlikely(!engine->ops) ||
                    unlikely(engine->ops == &utrace_detached_ops)) {
                        /*
                         * By the time we got the utrace lock,
                         * it had been reaped or detached already.
                         */
                        spin_unlock(&utrace->lock);
-                       utrace = ERR_PTR(-ESRCH);
+                       ret = -ESRCH;
                        if (!attached && engine->ops == &utrace_detached_ops)
-                               utrace = ERR_PTR(-ERESTARTSYS);
+                               ret = -ERESTARTSYS;
                }
        }
        rcu_read_unlock();
 
+       if (ret)
+               return ERR_PTR(ret);
        return utrace;
 }
 
@@ -690,7 +609,7 @@ static void put_detached_list(struct lis
 
 /*
  * Called with utrace->lock held.
- * Notify and clean up all engines, then free utrace.
+ * Notify and clean up all engines.
  */
 static void utrace_reap(struct task_struct *target, struct utrace *utrace)
        __releases(utrace->lock)
@@ -726,33 +645,23 @@ restart:
                goto restart;
        }
 
-       rcu_utrace_free(utrace); /* Releases the lock.  */
-
+       spin_unlock(&utrace->lock);
        put_detached_list(&detached);
 }
 
 #define DEATH_EVENTS (UTRACE_EVENT(DEATH) | UTRACE_EVENT(QUIESCE))
 
 /*
- * Called by release_task.  After this, target->utrace must be cleared.
+ * Called by release_task.
  */
 void utrace_release_task(struct task_struct *target)
 {
-       struct utrace *utrace;
-
-       task_lock(target);
-       utrace = rcu_dereference(target->utrace);
-       rcu_assign_pointer(target->utrace, NULL);
-       task_unlock(target);
-
-       if (unlikely(!utrace))
-               return;
+       struct utrace *utrace = task_utrace_struct(target);
 
        spin_lock(&utrace->lock);
        /*
-        * If the list is empty, utrace is already on its way to be freed.
         * We raced with detach and we won the task_lock race but lost the
-        * utrace->lock race.  All we have to do is let RCU run.
+        * utrace->lock race.
         */
        if (likely(!list_empty(&utrace->attached))) {
                utrace->reap = 1;
@@ -1066,25 +975,8 @@ static void utrace_reset(struct task_str
        /*
         * If any engines are left, we're done.
         */
-       if (flags) {
-               spin_unlock(&utrace->lock);
-       } else {
-               /*
-                * No more engines, clear out the utrace.  Here we can race
-                * with utrace_release_task().  If it gets task_lock()
-                * first, then it cleans up this struct for us.
-                */
-
-               task_lock(task);
-               if (unlikely(task->utrace != utrace)) {
-                       task_unlock(task);
-                       spin_unlock(&utrace->lock);
-               } else {
-                       rcu_assign_pointer(task->utrace, NULL);
-                       task_unlock(task);
-                       rcu_utrace_free(utrace);
-               }
-
+       spin_unlock(&utrace->lock);
+       if (!flags) {
                if (action)
                        *action = UTRACE_RESUME;
        }
@@ -1699,18 +1591,18 @@ void utrace_report_clone(unsigned long c
 
        /*
         * We don't use the REPORT() macro here, because we need
-        * to clear utrace->u.live.cloning before finish_report().
+        * to clear utrace->cloning before finish_report().
         * After finish_report(), utrace can be a stale pointer
         * in cases when report.action is still UTRACE_RESUME.
         */
        start_report(utrace);
-       utrace->u.live.cloning = child;
+       utrace->cloning = child;
 
        REPORT_CALLBACKS(task, utrace, &report,
                         UTRACE_EVENT(CLONE), report_clone,
                         report.action, engine, task, clone_flags, child);
 
-       utrace->u.live.cloning = NULL;
+       utrace->cloning = NULL;
        finish_report(&report, task, utrace);
 
        /*
@@ -1766,25 +1658,13 @@ void utrace_report_jctl(int notify, int 
         * was finished, we might be here with utrace already
         * removed or in the middle of being removed.
         *
-        * RCU makes it safe to get the utrace->lock even if it's
-        * being freed.  Once we have that lock, either an external
-        * detach has finished and this struct has been freed, or
-        * else we know we are excluding any other detach attempt.
-        *
         * If we are indeed attached, then make sure we are no
         * longer considered stopped while we run callbacks.
         */
-       rcu_read_lock();
-       utrace = rcu_dereference(task->utrace);
-       if (unlikely(!utrace)) {
-               rcu_read_unlock();
-               return;
-       }
        spin_lock(&utrace->lock);
        utrace->stopped = 0;
        utrace->report = 0;
        spin_unlock(&utrace->lock);
-       rcu_read_unlock();
 
        REPORT(task, utrace, &report, UTRACE_EVENT(JCTL),
               report_jctl, was_stopped ? CLD_STOPPED : CLD_CONTINUED, what);
@@ -1987,7 +1867,7 @@ void utrace_resume(struct task_struct *t
  */
 bool utrace_interrupt_pending(void)
 {
-       return current->utrace->interrupt;
+       return current->utrace.interrupt;
 }
 
 /*
@@ -2028,7 +1908,7 @@ int utrace_get_signal(struct task_struct
        __releases(task->sighand->siglock)
        __acquires(task->sighand->siglock)
 {
-       struct utrace *utrace;
+       struct utrace *utrace = task_utrace_struct(task);
        struct k_sigaction *ka;
        INIT_REPORT(report);
        struct utrace_attached_engine *engine, *next;
@@ -2037,44 +1917,13 @@ int utrace_get_signal(struct task_struct
        u32 ret;
        int signr;
 
-       /*
-        * We could have been considered quiescent while we were in
-        * TASK_STOPPED, and detached asynchronously.  If we woke up
-        * and checked task->utrace_flags before that was finished,
-        * we might be here with utrace already removed or in the
-        * middle of being removed.
-        */
-       rcu_read_lock();
-       utrace = rcu_dereference(task->utrace);
-       if (unlikely(!utrace)) {
-               rcu_read_unlock();
-               return 0;
-       }
-
        if (utrace->interrupt || utrace->report || utrace->signal_handler) {
                /*
                 * We've been asked for an explicit report before we
                 * even check for pending signals.
                 */
-
                spin_unlock_irq(&task->sighand->siglock);
-
-               /*
-                * RCU makes it safe to get the utrace->lock even if
-                * it's being freed.  Once we have that lock, either an
-                * external detach has finished and this struct has been
-                * freed, or else we know we are excluding any other
-                * detach attempt.
-                */
                spin_lock(&utrace->lock);
-               rcu_read_unlock();
-
-               if (unlikely(task->utrace != utrace)) {
-                       spin_unlock(&utrace->lock);
-                       cond_resched();
-                       return -1;
-               }
-
                splice_attaching(utrace);
 
                if (unlikely(!utrace->interrupt) && unlikely(!utrace->report))
@@ -2122,7 +1971,6 @@ int utrace_get_signal(struct task_struct
                 * If noone is interested in intercepting signals,
                 * let the caller just dequeue them normally.
                 */
-               rcu_read_unlock();
                return 0;
        } else {
                if (unlikely(utrace->stopped)) {
@@ -2141,17 +1989,9 @@ int utrace_get_signal(struct task_struct
                         */
                        spin_unlock_irq(&task->sighand->siglock);
                        spin_lock(&utrace->lock);
-                       rcu_read_unlock();
-                       if (unlikely(task->utrace != utrace)) {
-                               spin_unlock(&utrace->lock);
-                               cond_resched();
-                               return -1;
-                       }
                        utrace->stopped = 0;
                        spin_unlock(&utrace->lock);
                        spin_lock_irq(&task->sighand->siglock);
-               } else {
-                       rcu_read_unlock();
                }
 
                /*
@@ -2542,11 +2382,7 @@ struct task_struct *utrace_tracer_task(s
        struct utrace_attached_engine *engine;
        const struct utrace_engine_ops *ops;
        struct task_struct *tracer = NULL;
-       struct utrace *utrace;
-
-       utrace = rcu_dereference(target->utrace);
-       if (!utrace)
-               return NULL;
+       struct utrace *utrace = task_utrace_struct(target);
 
        list_for_each_safe(pos, next, &utrace->attached) {
                engine = list_entry(pos, struct utrace_attached_engine,
@@ -2587,9 +2423,8 @@ int utrace_unsafe_exec(struct task_struc
  */
 void task_utrace_proc_status(struct seq_file *m, struct task_struct *p)
 {
-       struct utrace *utrace = rcu_dereference(p->utrace);
-       if (likely(!utrace))
-               return;
+       struct utrace *utrace = task_utrace_struct(p);
+
        seq_printf(m, "Utrace: %lx%s%s%s\n",
                   p->utrace_flags,
                   utrace->stopped ? " (stopped)" : "",
Index: utrace-20jan/include/linux/tracehook.h
===================================================================
--- utrace-20jan.orig/include/linux/tracehook.h
+++ utrace-20jan/include/linux/tracehook.h
@@ -370,8 +370,7 @@ static inline void tracehook_report_vfor
 static inline void tracehook_prepare_release_task(struct task_struct *task)
 {
        smp_mb();
-       if (task_utrace_struct(task) != NULL)
-               utrace_release_task(task);
+       utrace_release_task(task);
 }
 
 /**
@@ -385,21 +384,8 @@ static inline void tracehook_prepare_rel
  */
 static inline void tracehook_finish_release_task(struct task_struct *task)
 {
-       int bad = 0;
        ptrace_release_task(task);
        BUG_ON(task->exit_state != EXIT_DEAD);
-       if (unlikely(task_utrace_struct(task) != NULL)) {
-               /*
-                * In a race condition, utrace_attach() will temporarily set
-                * it, but then check @task->exit_state and clear it.  It does
-                * all this under task_lock(), so we take the lock to check
-                * that there is really a bug and not just that known race.
-                */
-               task_lock(task);
-               bad = unlikely(task_utrace_struct(task) != NULL);
-               task_unlock(task);
-       }
-       BUG_ON(bad);
 }
 
 /**
Index: utrace-20jan/kernel/ptrace.c
===================================================================
--- utrace-20jan.orig/kernel/ptrace.c
+++ utrace-20jan/kernel/ptrace.c
@@ -778,7 +778,16 @@ static inline bool exclude_ptrace(struct
  */
 static inline bool exclude_ptrace(struct task_struct *task)
 {
-       return unlikely(!!task_utrace_struct(task));
+       struct utrace *utrace = task_utrace_struct(task);
+
+       spin_lock(&utrace->lock);
+       if (list_empty(&utrace->attached) && list_empty(&utrace->attaching)) {
+               spin_unlock(&utrace->lock);
+               return false;
+       }
+
+       spin_unlock(&utrace->lock);
+       return true;
 }
 #endif
 

Reply via email to