No functional changes, preparation for utrace-ptrace.
Introduce kernel/ptrace.h and move the code which can be shared
with the new implementation into this new header.
Signed-off-by: Oleg Nesterov o...@redhat.com
---
kernel/ptrace-common.h | 278
kernel/ptrace.c| 280 -
2 files changed, 279 insertions(+), 279 deletions(-)
--- /dev/null 2009-10-25 19:46:00.608018007 +0100
+++ V1/kernel/ptrace-common.h 2009-10-26 02:20:44.0 +0100
@@ -0,0 +1,278 @@
+int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+{
+ const struct cred *cred = current_cred(), *tcred;
+
+ /* May we inspect the given task?
+* This check is used both for attaching with ptrace
+* and for allowing access to sensitive information in /proc.
+*
+* ptrace_attach denies several cases that /proc allows
+* because setting up the necessary parent/child relationship
+* or halting the specified task is impossible.
+*/
+ int dumpable = 0;
+ /* Don't let security modules deny introspection */
+ if (task == current)
+ return 0;
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if ((cred-uid != tcred-euid ||
+cred-uid != tcred-suid ||
+cred-uid != tcred-uid ||
+cred-gid != tcred-egid ||
+cred-gid != tcred-sgid ||
+cred-gid != tcred-gid)
+ !capable(CAP_SYS_PTRACE)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+ rcu_read_unlock();
+ smp_rmb();
+ if (task-mm)
+ dumpable = get_dumpable(task-mm);
+ if (!dumpable !capable(CAP_SYS_PTRACE))
+ return -EPERM;
+
+ return security_ptrace_access_check(task, mode);
+}
+
+bool ptrace_may_access(struct task_struct *task, unsigned int mode)
+{
+ int err;
+ task_lock(task);
+ err = __ptrace_may_access(task, mode);
+ task_unlock(task);
+ return !err;
+}
+
+/*
+ * Called with irqs disabled, returns true if childs should reap themselves.
+ */
+static int ignoring_children(struct sighand_struct *sigh)
+{
+ int ret;
+ spin_lock(sigh-siglock);
+ ret = (sigh-action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
+ (sigh-action[SIGCHLD-1].sa.sa_flags SA_NOCLDWAIT);
+ spin_unlock(sigh-siglock);
+ return ret;
+}
+
+/*
+ * Called with tasklist_lock held for writing.
+ * Unlink a traced task, and clean it up if it was a traced zombie.
+ * Return true if it needs to be reaped with release_task().
+ * (We can't call release_task() here because we already hold tasklist_lock.)
+ *
+ * If it's a zombie, our attachedness prevented normal parent notification
+ * or self-reaping. Do notification now if it would have happened earlier.
+ * If it should reap itself, return true.
+ *
+ * If it's our own child, there is no notification to do. But if our normal
+ * children self-reap, then this child was prevented by ptrace and we must
+ * reap it now, in that case we must also wake up sub-threads sleeping in
+ * do_wait().
+ */
+static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
+{
+ __ptrace_unlink(p);
+
+ if (p-exit_state == EXIT_ZOMBIE) {
+ if (!task_detached(p) thread_group_empty(p)) {
+ if (!same_thread_group(p-real_parent, tracer))
+ do_notify_parent(p, p-exit_signal);
+ else if (ignoring_children(tracer-sighand)) {
+ __wake_up_parent(p, tracer);
+ p-exit_signal = -1;
+ }
+ }
+ if (task_detached(p)) {
+ /* Mark it as in the process of being reaped. */
+ p-exit_state = EXIT_DEAD;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user
*dst, int len)
+{
+ int copied = 0;
+
+ while (len 0) {
+ char buf[128];
+ int this_len, retval;
+
+ this_len = (len sizeof(buf)) ? sizeof(buf) : len;
+ retval = access_process_vm(tsk, src, buf, this_len, 0);
+ if (!retval) {
+ if (copied)
+ break;
+ return -EIO;
+ }
+ if (copy_to_user(dst, buf, retval))
+ return -EFAULT;
+ copied += retval;
+ src += retval;
+ dst += retval;
+ len -= retval;
+ }
+ return copied;
+}
+
+int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long
dst, int len)
+{
+ int copied = 0;
+
+ while (len 0) {
+