Temporary revert the following patches to keep utrace/utrace-ptrace working:

        40ae717d1e78d982bd469b2013a4cbf4ec1ca434
        ptrace: fix signal->wait_chldexit usage in 
task_clear_group_stop_trapping()

        321fb561971ba0f10ce18c0f8a4b9fbfc7cef4b9
        ptrace: ptrace_check_attach() should not do s/STOPPED/TRACED/

        ee77f075921730b2b465880f9fd4367003bdab39
        signal: Turn SIGNAL_STOP_DEQUEUED into GROUP_STOP_DEQUEUED

        780006eac2fe7f4d2582da16a096e5a44c4767ff
        signal: do_signal_stop: Remove the unneeded 
task_clear_group_stop_pending()

        244056f9dbbc6dc4126a301c745fa3dd67d8af3c
        job control: Don't send duplicate job control stop notification while 
ptraced

        ceb6bd67f9b9db765e1c29405f26e8460391badd
        job control: Notify the real parent of job control events regardless of 
ptrace

        62bcf9d992ecc19ea4f37ff57ee0b3333e3e843e
        job control: Job control stop notifications should always go to the 
real parent

        75b95953a56969a990e6ce154b260be83818fe71
        job control: Add @for_ptrace to do_notify_parent_cldstop()

        45cb24a1da53beb70f09efccc0373f6a47a9efe0
        job control: Allow access to job control events through ptracees

        9b84cca2564b9a5b2d064fb44d2a55a5b44473a0
        job control: Fix ptracer wait(2) hang and explain notask_error clearing

        408a37de6c95832a4880a88a742f89f0cc554d06
        job control: Don't set group_stop exit_code if re-entering job control 
stop

        0e9f0a4abfd80f8adca624538d479d95159b16d8
        ptrace: Always put ptracee into appropriate execution state

        e3bd058f62896ec7a2c605ed62a0a811e9147947
        ptrace: Collapse ptrace_untrace() into __ptrace_unlink()

        d79fdd6d96f46fabb779d86332e3677c6f5c2a4f
        ptrace: Clean transitions between TASK_STOPPED and TRACED

        5224fa3660ad3881d2f2ad726d22614117963f10
        ptrace: Make do_signal_stop() use ptrace_stop() if the task is being 
ptraced

        0ae8ce1c8c5b9007ce6bfc83ec2aa0dfce5bbed3
        ptrace: Participate in group stop from ptrace_stop() iff the task is 
trapping for group stop

        39efa3ef3a376a4e53de2f82fc91182459d34200
        signal: Use GROUP_STOP_PENDING to stop once for a single group stop

        e5c1902e9260a0075ea52cb5ef627a8d9aaede89
        signal: Fix premature completion of group stop when interfered by ptrace

        fe1bc6a0954611b806f9e158eb0817cf8ba21660
        ptrace: Add @why to ptrace_stop()

        edf2ed153bcae52de70db00a98b0e81a5668e563
        ptrace: Kill tracehook_notify_jctl()

This obviously reverts some user-visible fixes, but the fixed problems
are very old and minor, they were never reported. In the long term we
need another solution.

Signed-off-by: Oleg Nesterov <o...@redhat.com>
---
 fs/exec.c                 |    1 -
 include/linux/sched.h     |   17 +--
 include/linux/tracehook.h |   27 ++++
 kernel/exit.c             |   77 ++---------
 kernel/ptrace.c           |  116 +++++-----------
 kernel/signal.c           |  339 +++++++++------------------------------------
 6 files changed, 148 insertions(+), 429 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index 6075a1e..82b5379 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1769,7 +1769,6 @@ static int zap_process(struct task_struct *start, int 
exit_code)
 
        t = start;
        do {
-               task_clear_group_stop_pending(t);
                if (t != current && t->mm) {
                        sigaddset(&t->pending.signal, SIGKILL);
                        signal_wake_up(t, 1);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a837b20..6c42e24 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -663,8 +663,9 @@ struct signal_struct {
  * Bits in flags field of signal_struct.
  */
 #define SIGNAL_STOP_STOPPED    0x00000001 /* job control stop in effect */
-#define SIGNAL_STOP_CONTINUED  0x00000002 /* SIGCONT since WCONTINUED reap */
-#define SIGNAL_GROUP_EXIT      0x00000004 /* group exit in progress */
+#define SIGNAL_STOP_DEQUEUED   0x00000002 /* stop signal dequeued */
+#define SIGNAL_STOP_CONTINUED  0x00000004 /* SIGCONT since WCONTINUED reap */
+#define SIGNAL_GROUP_EXIT      0x00000008 /* group exit in progress */
 /*
  * Pending notifications to parent.
  */
@@ -1283,7 +1284,6 @@ struct task_struct {
        int exit_state;
        int exit_code, exit_signal;
        int pdeath_signal;  /*  The signal sent when the parent dies  */
-       unsigned int group_stop;        /* GROUP_STOP_*, siglock protected */
        /* ??? */
        unsigned int personality;
        unsigned did_exec:1;
@@ -1803,17 +1803,6 @@ extern void thread_group_times(struct task_struct *p, 
cputime_t *ut, cputime_t *
 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
 #define used_math() tsk_used_math(current)
 
-/*
- * task->group_stop flags
- */
-#define GROUP_STOP_SIGMASK     0xffff    /* signr of the last group stop */
-#define GROUP_STOP_PENDING     (1 << 16) /* task should stop for group stop */
-#define GROUP_STOP_CONSUME     (1 << 17) /* consume group stop count */
-#define GROUP_STOP_TRAPPING    (1 << 18) /* switching from STOPPED to TRACED */
-#define GROUP_STOP_DEQUEUED    (1 << 19) /* stop signal dequeued */
-
-extern void task_clear_group_stop_pending(struct task_struct *task);
-
 #ifdef CONFIG_PREEMPT_RCU
 
 #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index e95f523..ebcfa4e 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -469,6 +469,33 @@ static inline int tracehook_get_signal(struct task_struct 
*task,
 }
 
 /**
+ * tracehook_notify_jctl - report about job control stop/continue
+ * @notify:            zero, %CLD_STOPPED or %CLD_CONTINUED
+ * @why:               %CLD_STOPPED or %CLD_CONTINUED
+ *
+ * This is called when we might call do_notify_parent_cldstop().
+ *
+ * @notify is zero if we would not ordinarily send a %SIGCHLD,
+ * or is the %CLD_STOPPED or %CLD_CONTINUED .si_code for %SIGCHLD.
+ *
+ * @why is %CLD_STOPPED when about to stop for job control;
+ * we are already in %TASK_STOPPED state, about to call schedule().
+ * It might also be that we have just exited (check %PF_EXITING),
+ * but need to report that a group-wide stop is complete.
+ *
+ * @why is %CLD_CONTINUED when waking up after job control stop and
+ * ready to make a delayed @notify report.
+ *
+ * Return the %CLD_* value for %SIGCHLD, or zero to generate no signal.
+ *
+ * Called with the siglock held.
+ */
+static inline int tracehook_notify_jctl(int notify, int why)
+{
+       return notify ?: (current->ptrace & PT_PTRACED) ? why : 0;
+}
+
+/**
  * tracehook_finish_jctl - report about return from job control stop
  *
  * This is called by do_signal_stop() after wakeup.
diff --git a/kernel/exit.c b/kernel/exit.c
index f2b321b..a86fca4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1556,80 +1556,31 @@ static int wait_consider_task(struct wait_opts *wo, int 
ptrace,
        if (p->exit_state == EXIT_DEAD)
                return 0;
 
-       /* slay zombie? */
-       if (p->exit_state == EXIT_ZOMBIE) {
+       if (likely(!ptrace) && unlikely(task_ptrace(p))) {
                /*
-                * A zombie ptracee is only visible to its ptracer.
-                * Notification and reaping will be cascaded to the real
-                * parent when the ptracer detaches.
-                */
-               if (likely(!ptrace) && unlikely(task_ptrace(p))) {
-                       /* it will become visible, clear notask_error */
-                       wo->notask_error = 0;
-                       return 0;
-               }
-
-               /* we don't reap group leaders with subthreads */
-               if (!delay_group_leader(p))
-                       return wait_task_zombie(wo, p);
-
-               /*
-                * Allow access to stopped/continued state via zombie by
-                * falling through.  Clearing of notask_error is complex.
-                *
-                * When !@ptrace:
-                *
-                * If WEXITED is set, notask_error should naturally be
-                * cleared.  If not, subset of WSTOPPED|WCONTINUED is set,
-                * so, if there are live subthreads, there are events to
-                * wait for.  If all subthreads are dead, it's still safe
-                * to clear - this function will be called again in finite
-                * amount time once all the subthreads are released and
-                * will then return without clearing.
-                *
-                * When @ptrace:
-                *
-                * Stopped state is per-task and thus can't change once the
-                * target task dies.  Only continued and exited can happen.
-                * Clear notask_error if WCONTINUED | WEXITED.
-                */
-               if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
-                       wo->notask_error = 0;
-       } else {
-               /*
-                * If @p is ptraced by a task in its real parent's group,
-                * hide group stop/continued state when looking at @p as
-                * the real parent; otherwise, a single stop can be
-                * reported twice as group and ptrace stops.
-                *
-                * If a ptracer wants to distinguish the two events for its
-                * own children, it should create a separate process which
-                * takes the role of real parent.
-                */
-               if (likely(!ptrace) && task_ptrace(p) &&
-                   same_thread_group(p->parent, p->real_parent))
-                       return 0;
-
-               /*
-                * @p is alive and it's gonna stop, continue or exit, so
-                * there always is something to wait for.
+                * This child is hidden by ptrace.
+                * We aren't allowed to see it now, but eventually we will.
                 */
                wo->notask_error = 0;
+               return 0;
        }
 
        /*
-        * Wait for stopped.  Depending on @ptrace, different stopped state
-        * is used and the two don't interact with each other.
+        * We don't reap group leaders with subthreads.
         */
+       if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p))
+               return wait_task_zombie(wo, p);
+
+       /*
+        * It's stopped or running now, so it might
+        * later continue, exit, or stop again.
+        */
+       wo->notask_error = 0;
+
        ret = wait_task_stopped(wo, ptrace, p);
        if (ret)
                return ret;
 
-       /*
-        * Wait for continued.  There's only one continued state and the
-        * ptracer can consume it which can confuse the real parent.  Don't
-        * use WCONTINUED from ptracer.  You don't need or want it.
-        */
        return wait_task_continued(wo, p);
 }
 
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2df1157..5cb3003 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -38,33 +38,35 @@ void __ptrace_link(struct task_struct *child, struct 
task_struct *new_parent)
        child->parent = new_parent;
 }
 
-/**
- * __ptrace_unlink - unlink ptracee and restore its execution state
- * @child: ptracee to be unlinked
- *
- * Remove @child from the ptrace list, move it back to the original parent,
- * and restore the execution state so that it conforms to the group stop
- * state.
- *
- * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
- * exiting.  For PTRACE_DETACH, unless the ptracee has been killed between
- * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
- * If the ptracer is exiting, the ptracee can be in any state.
- *
- * After detach, the ptracee should be in a state which conforms to the
- * group stop.  If the group is stopped or in the process of stopping, the
- * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
- * up from TASK_TRACED.
- *
- * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
- * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
- * to but in the opposite direction of what happens while attaching to a
- * stopped task.  However, in this direction, the intermediate RUNNING
- * state is not hidden even from the current ptracer and if it immediately
- * re-attaches and performs a WNOHANG wait(2), it may fail.
+/*
+ * Turn a tracing stop into a normal stop now, since with no tracer there
+ * would be no way to wake it up with SIGCONT or SIGKILL.  If there was a
+ * signal sent that would resume the child, but didn't because it was in
+ * TASK_TRACED, resume it now.
+ * Requires that irqs be disabled.
+ */
+static void ptrace_untrace(struct task_struct *child)
+{
+       spin_lock(&child->sighand->siglock);
+       if (task_is_traced(child)) {
+               /*
+                * If the group stop is completed or in progress,
+                * this thread was already counted as stopped.
+                */
+               if (child->signal->flags & SIGNAL_STOP_STOPPED ||
+                   child->signal->group_stop_count)
+                       __set_task_state(child, TASK_STOPPED);
+               else
+                       signal_wake_up(child, 1);
+       }
+       spin_unlock(&child->sighand->siglock);
+}
+
+/*
+ * unptrace a task: move it back to its original parent and
+ * remove it from the ptrace list.
  *
- * CONTEXT:
- * write_lock_irq(tasklist_lock)
+ * Must be called with the tasklist lock write-held.
  */
 void __ptrace_unlink(struct task_struct *child)
 {
@@ -74,27 +76,8 @@ void __ptrace_unlink(struct task_struct *child)
        child->parent = child->real_parent;
        list_del_init(&child->ptrace_entry);
 
-       spin_lock(&child->sighand->siglock);
-
-       /*
-        * Reinstate GROUP_STOP_PENDING if group stop is in effect and
-        * @child isn't dead.
-        */
-       if (!(child->flags & PF_EXITING) &&
-           (child->signal->flags & SIGNAL_STOP_STOPPED ||
-            child->signal->group_stop_count))
-               child->group_stop |= GROUP_STOP_PENDING;
-
-       /*
-        * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
-        * @child in the butt.  Note that @resume should be used iff @child
-        * is in TASK_TRACED; otherwise, we might unduly disrupt
-        * TASK_KILLABLE sleeps.
-        */
-       if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
-               signal_wake_up(child, task_is_traced(child));
-
-       spin_unlock(&child->sighand->siglock);
+       if (task_is_traced(child))
+               ptrace_untrace(child);
 }
 
 /*
@@ -113,14 +96,16 @@ int ptrace_check_attach(struct task_struct *child, int 
kill)
         */
        read_lock(&tasklist_lock);
        if ((child->ptrace & PT_PTRACED) && child->parent == current) {
+               ret = 0;
                /*
                 * child->sighand can't be NULL, release_task()
                 * does ptrace_unlink() before __exit_signal().
                 */
                spin_lock_irq(&child->sighand->siglock);
-               WARN_ON_ONCE(task_is_stopped(child));
-               if (task_is_traced(child) || kill)
-                       ret = 0;
+               if (task_is_stopped(child))
+                       child->state = TASK_TRACED;
+               else if (!task_is_traced(child) && !kill)
+                       ret = -ESRCH;
                spin_unlock_irq(&child->sighand->siglock);
        }
        read_unlock(&tasklist_lock);
@@ -184,7 +169,6 @@ bool ptrace_may_access(struct task_struct *task, unsigned 
int mode)
 
 static int ptrace_attach(struct task_struct *task)
 {
-       bool wait_trap = false;
        int retval;
 
        audit_ptrace(task);
@@ -224,42 +208,12 @@ static int ptrace_attach(struct task_struct *task)
        __ptrace_link(task, current);
        send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 
-       spin_lock(&task->sighand->siglock);
-
-       /*
-        * If the task is already STOPPED, set GROUP_STOP_PENDING and
-        * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
-        * will be cleared if the child completes the transition or any
-        * event which clears the group stop states happens.  We'll wait
-        * for the transition to complete before returning from this
-        * function.
-        *
-        * This hides STOPPED -> RUNNING -> TRACED transition from the
-        * attaching thread but a different thread in the same group can
-        * still observe the transient RUNNING state.  IOW, if another
-        * thread's WNOHANG wait(2) on the stopped tracee races against
-        * ATTACH, the wait(2) may fail due to the transient RUNNING.
-        *
-        * The following task_is_stopped() test is safe as both transitions
-        * in and out of STOPPED are protected by siglock.
-        */
-       if (task_is_stopped(task)) {
-               task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
-               signal_wake_up(task, 1);
-               wait_trap = true;
-       }
-
-       spin_unlock(&task->sighand->siglock);
-
        retval = 0;
 unlock_tasklist:
        write_unlock_irq(&tasklist_lock);
 unlock_creds:
        mutex_unlock(&task->signal->cred_guard_mutex);
 out:
-       if (wait_trap)
-               wait_event(current->signal->wait_chldexit,
-                          !(task->group_stop & GROUP_STOP_TRAPPING));
        return retval;
 }
 
diff --git a/kernel/signal.c b/kernel/signal.c
index ff76786..23a31b6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, 
sigset_t *blocked)
 
 static int recalc_sigpending_tsk(struct task_struct *t)
 {
-       if ((t->group_stop & GROUP_STOP_PENDING) ||
+       if (t->signal->group_stop_count > 0 ||
            PENDING(&t->pending, &t->blocked) ||
            PENDING(&t->signal->shared_pending, &t->blocked)) {
                set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -223,83 +223,6 @@ static inline void print_dropped_signal(int sig)
                                current->comm, current->pid, sig);
 }
 
-/**
- * task_clear_group_stop_trapping - clear group stop trapping bit
- * @task: target task
- *
- * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us.  Clear it
- * and wake up the ptracer.  Note that we don't need any further locking.
- * @task->siglock guarantees that @task->parent points to the ptracer.
- *
- * CONTEXT:
- * Must be called with @task->sighand->siglock held.
- */
-static void task_clear_group_stop_trapping(struct task_struct *task)
-{
-       if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
-               task->group_stop &= ~GROUP_STOP_TRAPPING;
-               __wake_up_sync_key(&task->parent->signal->wait_chldexit,
-                                  TASK_UNINTERRUPTIBLE, 1, task);
-       }
-}
-
-/**
- * task_clear_group_stop_pending - clear pending group stop
- * @task: target task
- *
- * Clear group stop states for @task.
- *
- * CONTEXT:
- * Must be called with @task->sighand->siglock held.
- */
-void task_clear_group_stop_pending(struct task_struct *task)
-{
-       task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
-                             GROUP_STOP_DEQUEUED);
-}
-
-/**
- * task_participate_group_stop - participate in a group stop
- * @task: task participating in a group stop
- *
- * @task has GROUP_STOP_PENDING set and is participating in a group stop.
- * Group stop states are cleared and the group stop count is consumed if
- * %GROUP_STOP_CONSUME was set.  If the consumption completes the group
- * stop, the appropriate %SIGNAL_* flags are set.
- *
- * CONTEXT:
- * Must be called with @task->sighand->siglock held.
- *
- * RETURNS:
- * %true if group stop completion should be notified to the parent, %false
- * otherwise.
- */
-static bool task_participate_group_stop(struct task_struct *task)
-{
-       struct signal_struct *sig = task->signal;
-       bool consume = task->group_stop & GROUP_STOP_CONSUME;
-
-       WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
-
-       task_clear_group_stop_pending(task);
-
-       if (!consume)
-               return false;
-
-       if (!WARN_ON_ONCE(sig->group_stop_count == 0))
-               sig->group_stop_count--;
-
-       /*
-        * Tell the caller to notify completion iff we are entering into a
-        * fresh group stop.  Read comment in do_signal_stop() for details.
-        */
-       if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
-               sig->flags = SIGNAL_STOP_STOPPED;
-               return true;
-       }
-       return false;
-}
-
 /*
  * allocate a new signal queue record
  * - this may be called without locks if and only if t == current, otherwise an
@@ -604,7 +527,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, 
siginfo_t *info)
                 * is to alert stop-signal processing code when another
                 * processor has come along and cleared the flag.
                 */
-               current->group_stop |= GROUP_STOP_DEQUEUED;
+               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
        }
        if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
                /*
@@ -809,7 +732,6 @@ static int prepare_signal(int sig, struct task_struct *p, 
int from_ancestor_ns)
                rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
                t = p;
                do {
-                       task_clear_group_stop_pending(t);
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
                        wake_up_state(t, __TASK_STOPPED);
                } while_each_thread(p, t);
@@ -837,6 +759,13 @@ static int prepare_signal(int sig, struct task_struct *p, 
int from_ancestor_ns)
                        signal->flags = why | SIGNAL_STOP_CONTINUED;
                        signal->group_stop_count = 0;
                        signal->group_exit_code = 0;
+               } else {
+                       /*
+                        * We are not stopped, but there could be a stop
+                        * signal in the middle of being processed after
+                        * being removed from the queue.  Clear that too.
+                        */
+                       signal->flags &= ~SIGNAL_STOP_DEQUEUED;
                }
        }
 
@@ -925,7 +854,6 @@ static void complete_signal(int sig, struct task_struct *p, 
int group)
                        signal->group_stop_count = 0;
                        t = p;
                        do {
-                               task_clear_group_stop_pending(t);
                                sigaddset(&t->pending.signal, SIGKILL);
                                signal_wake_up(t, 1);
                        } while_each_thread(p, t);
@@ -1160,7 +1088,6 @@ int zap_other_threads(struct task_struct *p)
        p->signal->group_stop_count = 0;
 
        while_each_thread(p, t) {
-               task_clear_group_stop_pending(t);
                count++;
 
                /* Don't bother with already dead threads */
@@ -1588,30 +1515,16 @@ int do_notify_parent(struct task_struct *tsk, int sig)
        return ret;
 }
 
-/**
- * do_notify_parent_cldstop - notify parent of stopped/continued state change
- * @tsk: task reporting the state change
- * @for_ptracer: the notification is for ptracer
- * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
- *
- * Notify @tsk's parent that the stopped/continued state has changed.  If
- * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
- * If %true, @tsk reports to @tsk->parent which should be the ptracer.
- *
- * CONTEXT:
- * Must be called with tasklist_lock at least read locked.
- */
-static void do_notify_parent_cldstop(struct task_struct *tsk,
-                                    bool for_ptracer, int why)
+static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 {
        struct siginfo info;
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
 
-       if (for_ptracer) {
+       if (task_ptrace(tsk))
                parent = tsk->parent;
-       } else {
+       else {
                tsk = tsk->group_leader;
                parent = tsk->real_parent;
        }
@@ -1687,15 +1600,6 @@ static int sigkill_pending(struct task_struct *tsk)
 }
 
 /*
- * Test whether the target task of the usual cldstop notification - the
- * real_parent of @child - is in the same group as the ptracer.
- */
-static bool real_parent_is_ptracer(struct task_struct *child)
-{
-       return same_thread_group(child->parent, child->real_parent);
-}
-
-/*
  * This must be called with current->sighand->siglock held.
  *
  * This should be the path for all ptrace stops.
@@ -1706,12 +1610,10 @@ static bool real_parent_is_ptracer(struct task_struct 
*child)
  * If we actually decide not to stop at all because the tracer
  * is gone, we keep current->exit_code unless clear_code.
  */
-static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t 
*info)
+static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
        __releases(&current->sighand->siglock)
        __acquires(&current->sighand->siglock)
 {
-       bool gstop_done = false;
-
        if (arch_ptrace_stop_needed(exit_code, info)) {
                /*
                 * The arch code has something special to do before a
@@ -1732,49 +1634,21 @@ static void ptrace_stop(int exit_code, int why, int 
clear_code, siginfo_t *info)
        }
 
        /*
-        * If @why is CLD_STOPPED, we're trapping to participate in a group
-        * stop.  Do the bookkeeping.  Note that if SIGCONT was delievered
-        * while siglock was released for the arch hook, PENDING could be
-        * clear now.  We act as if SIGCONT is received after TASK_TRACED
-        * is entered - ignore it.
+        * If there is a group stop in progress,
+        * we must participate in the bookkeeping.
         */
-       if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
-               gstop_done = task_participate_group_stop(current);
+       if (current->signal->group_stop_count > 0)
+               --current->signal->group_stop_count;
 
        current->last_siginfo = info;
        current->exit_code = exit_code;
 
-       /*
-        * TRACED should be visible before TRAPPING is cleared; otherwise,
-        * the tracer might fail do_wait().
-        */
-       set_current_state(TASK_TRACED);
-
-       /*
-        * We're committing to trapping.  Clearing GROUP_STOP_TRAPPING and
-        * transition to TASK_TRACED should be atomic with respect to
-        * siglock.  This hsould be done after the arch hook as siglock is
-        * released and regrabbed across it.
-        */
-       task_clear_group_stop_trapping(current);
-
+       /* Let the debugger run.  */
+       __set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
        if (may_ptrace_stop()) {
-               /*
-                * Notify parents of the stop.
-                *
-                * While ptraced, there are two parents - the ptracer and
-                * the real_parent of the group_leader.  The ptracer should
-                * know about every stop while the real parent is only
-                * interested in the completion of group stop.  The states
-                * for the two don't interact with each other.  Notify
-                * separately unless they're gonna be duplicates.
-                */
-               do_notify_parent_cldstop(current, true, why);
-               if (gstop_done && !real_parent_is_ptracer(current))
-                       do_notify_parent_cldstop(current, false, why);
-
+               do_notify_parent_cldstop(current, CLD_TRAPPED);
                /*
                 * Don't want to allow preemption here, because
                 * sys_ptrace() needs this task to be inactive.
@@ -1789,16 +1663,7 @@ static void ptrace_stop(int exit_code, int why, int 
clear_code, siginfo_t *info)
                /*
                 * By the time we got the lock, our tracer went away.
                 * Don't drop the lock yet, another tracer may come.
-                *
-                * If @gstop_done, the ptracer went away between group stop
-                * completion and here.  During detach, it would have set
-                * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
-                * in do_signal_stop() on return, so notifying the real
-                * parent of the group stop completion is enough.
                 */
-               if (gstop_done)
-                       do_notify_parent_cldstop(current, false, why);
-
                __set_current_state(TASK_RUNNING);
                if (clear_code)
                        current->exit_code = 0;
@@ -1842,7 +1707,7 @@ void ptrace_notify(int exit_code)
 
        /* Let the debugger run.  */
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
+       ptrace_stop(exit_code, 1, &info);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
@@ -1855,115 +1720,64 @@ void ptrace_notify(int exit_code)
 static int do_signal_stop(int signr)
 {
        struct signal_struct *sig = current->signal;
+       int notify;
 
-       if (!(current->group_stop & GROUP_STOP_PENDING)) {
-               unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
+       if (!sig->group_stop_count) {
                struct task_struct *t;
 
-               /* signr will be recorded in task->group_stop for retries */
-               WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
-
-               if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
+               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
                    unlikely(signal_group_exit(sig)))
                        return 0;
                /*
-                * There is no group stop already in progress.  We must
-                * initiate one now.
-                *
-                * While ptraced, a task may be resumed while group stop is
-                * still in effect and then receive a stop signal and
-                * initiate another group stop.  This deviates from the
-                * usual behavior as two consecutive stop signals can't
-                * cause two group stops when !ptraced.  That is why we
-                * also check !task_is_stopped(t) below.
-                *
-                * The condition can be distinguished by testing whether
-                * SIGNAL_STOP_STOPPED is already set.  Don't generate
-                * group_exit_code in such case.
-                *
-                * This is not necessary for SIGNAL_STOP_CONTINUED because
-                * an intervening stop signal is required to cause two
-                * continued events regardless of ptrace.
+                * There is no group stop already in progress.
+                * We must initiate one now.
                 */
-               if (!(sig->flags & SIGNAL_STOP_STOPPED))
-                       sig->group_exit_code = signr;
-               else
-                       WARN_ON_ONCE(!task_ptrace(current));
+               sig->group_exit_code = signr;
 
-               current->group_stop &= ~GROUP_STOP_SIGMASK;
-               current->group_stop |= signr | gstop;
                sig->group_stop_count = 1;
-               for (t = next_thread(current); t != current;
-                    t = next_thread(t)) {
-                       t->group_stop &= ~GROUP_STOP_SIGMASK;
+               for (t = next_thread(current); t != current; t = next_thread(t))
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
-                       if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
-                               t->group_stop |= signr | gstop;
+                       if (!(t->flags & PF_EXITING) &&
+                           !task_is_stopped_or_traced(t)) {
                                sig->group_stop_count++;
                                signal_wake_up(t, 0);
                        }
-               }
        }
-retry:
-       if (likely(!task_ptrace(current))) {
-               int notify = 0;
-
-               /*
-                * If there are no other threads in the group, or if there
-                * is a group stop in progress and we are the last to stop,
-                * report to the parent.
-                */
-               if (task_participate_group_stop(current))
-                       notify = CLD_STOPPED;
-
-               __set_current_state(TASK_STOPPED);
-               spin_unlock_irq(&current->sighand->siglock);
-
-               /*
-                * Notify the parent of the group stop completion.  Because
-                * we're not holding either the siglock or tasklist_lock
-                * here, ptracer may attach inbetween; however, this is for
-                * group stop and should always be delivered to the real
-                * parent of the group leader.  The new ptracer will get
-                * its notification when this task transitions into
-                * TASK_TRACED.
-                */
-               if (notify) {
-                       read_lock(&tasklist_lock);
-                       do_notify_parent_cldstop(current, false, notify);
-                       read_unlock(&tasklist_lock);
-               }
-
-               /* Now we don't run again until woken by SIGCONT or SIGKILL */
-               schedule();
-
-               spin_lock_irq(&current->sighand->siglock);
-       } else {
-               ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
-                           CLD_STOPPED, 0, NULL);
-               current->exit_code = 0;
-       }
-
        /*
-        * GROUP_STOP_PENDING could be set if another group stop has
-        * started since being woken up or ptrace wants us to transit
-        * between TASK_STOPPED and TRACED.  Retry group stop.
+        * If there are no other threads in the group, or if there is
+        * a group stop in progress and we are the last to stop, report
+        * to the parent.  When ptraced, every thread reports itself.
+        */
+       notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
+       notify = tracehook_notify_jctl(notify, CLD_STOPPED);
+       /*
+        * tracehook_notify_jctl() can drop and reacquire siglock, so
+        * we keep ->group_stop_count != 0 before the call. If SIGCONT
+        * or SIGKILL comes in between ->group_stop_count == 0.
         */
-       if (current->group_stop & GROUP_STOP_PENDING) {
-               WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
-               goto retry;
+       if (sig->group_stop_count) {
+               if (!--sig->group_stop_count)
+                       sig->flags = SIGNAL_STOP_STOPPED;
+               current->exit_code = sig->group_exit_code;
+               __set_current_state(TASK_STOPPED);
        }
+       spin_unlock_irq(&current->sighand->siglock);
 
-       /* PTRACE_ATTACH might have raced with task killing, clear trapping */
-       task_clear_group_stop_trapping(current);
+       if (notify) {
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(current, notify);
+               read_unlock(&tasklist_lock);
+       }
 
-       spin_unlock_irq(&current->sighand->siglock);
+       /* Now we don't run again until woken by SIGCONT or SIGKILL */
+       schedule();
 
        tracehook_finish_jctl();
+       current->exit_code = 0;
 
        return 1;
 }
@@ -1977,7 +1791,7 @@ static int ptrace_signal(int signr, siginfo_t *info,
        ptrace_signal_deliver(regs, cookie);
 
        /* Let the debugger run.  */
-       ptrace_stop(signr, CLD_TRAPPED, 0, info);
+       ptrace_stop(signr, 0, info);
 
        /* We're back.  Did the debugger cancel the sig?  */
        signr = current->exit_code;
@@ -2032,7 +1846,6 @@ relock:
         * the CLD_ si_code into SIGNAL_CLD_MASK bits.
         */
        if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
-               struct task_struct *leader;
                int why;
 
                if (signal->flags & SIGNAL_CLD_CONTINUED)
@@ -2042,26 +1855,14 @@ relock:
 
                signal->flags &= ~SIGNAL_CLD_MASK;
 
+               why = tracehook_notify_jctl(why, CLD_CONTINUED);
                spin_unlock_irq(&sighand->siglock);
 
-               /*
-                * Notify the parent that we're continuing.  This event is
-                * always per-process and doesn't make whole lot of sense
-                * for ptracers, who shouldn't consume the state via
-                * wait(2) either, but, for backward compatibility, notify
-                * the ptracer of the group leader too unless it's gonna be
-                * a duplicate.
-                */
-               read_lock(&tasklist_lock);
-
-               do_notify_parent_cldstop(current, false, why);
-
-               leader = current->group_leader;
-               if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
-                       do_notify_parent_cldstop(leader, true, why);
-
-               read_unlock(&tasklist_lock);
-
+               if (why) {
+                       read_lock(&tasklist_lock);
+                       do_notify_parent_cldstop(current->group_leader, why);
+                       read_unlock(&tasklist_lock);
+               }
                goto relock;
        }
 
@@ -2078,8 +1879,8 @@ relock:
                if (unlikely(signr != 0))
                        ka = return_ka;
                else {
-                       if (unlikely(current->group_stop &
-                                    GROUP_STOP_PENDING) && do_signal_stop(0))
+                       if (unlikely(signal->group_stop_count > 0) &&
+                           do_signal_stop(0))
                                goto relock;
 
                        signr = dequeue_signal(current, &current->blocked,
@@ -2253,19 +2054,17 @@ void exit_signals(struct task_struct *tsk)
        signotset(&unblocked);
        retarget_shared_pending(tsk, &unblocked);
 
-       if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
-           task_participate_group_stop(tsk))
-               group_stop = CLD_STOPPED;
+       if (unlikely(tsk->signal->group_stop_count) &&
+                       !--tsk->signal->group_stop_count) {
+               tsk->signal->flags = SIGNAL_STOP_STOPPED;
+               group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
+       }
 out:
        spin_unlock_irq(&tsk->sighand->siglock);
 
-       /*
-        * If group stop has completed, deliver the notification.  This
-        * should always go to the real parent of the group leader.
-        */
        if (unlikely(group_stop)) {
                read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(tsk, false, group_stop);
+               do_notify_parent_cldstop(tsk, group_stop);
                read_unlock(&tasklist_lock);
        }
 }
-- 
1.5.5.1


Reply via email to