Same as 53da1d9456fe7f87a920a78fdbdcf1225d197cb7 commit.

The tracee can be preempted by tracer right after read_unlock(tasklist),
in this case the tracer can spin wait_task_inactive().

I still can't understand why this patch makes the very noticeable
difference. The test-case does something like

        for (1 .. 70) {
                ptrace(PTRACE_SINGLESTEP);
                waitpid();
                ptrace(PTRACE_GETREGS);
        }

without this ptach it takes near 0.3 seconds sometimes.

---

 kernel/ptrace.c |    6 ++++++
 1 file changed, 6 insertions(+)

--- PU/kernel/ptrace.c~57_NOTIFY_SYNC_WAKEUP    2009-10-02 00:16:57.000000000 
+0200
+++ PU/kernel/ptrace.c  2009-10-02 23:19:41.000000000 +0200
@@ -833,8 +833,14 @@ static void do_ptrace_notify_stop(struct
 
        // XXX: !!!!!!!! UNSAFE when called by tracer !!!!!!!!!!!!!
        read_lock(&tasklist_lock);
+       /*
+        * Don't want to allow preemption here, because
+        * sys_ptrace() needs this task to be inactive.
+        */
+       preempt_disable();
        do_notify_parent_cldstop(tracee, CLD_TRAPPED);
        read_unlock(&tasklist_lock);
+       preempt_enable_no_resched();
 }
 
 void ptrace_notify_stop(struct task_struct *tracee)

Reply via email to