On 06/28, Linus Torvalds wrote:
>
> Then try_get_task_stack(tsk) becomes
>
>      void *try_get_task_stack(struct task_struct *tsk)
>      {
>           void *stack = tsk->stack;
>           if (!atomic_inc_not_zero(&tsk->stackref))
>                stack = NULL;
>           return stack;
>      }

Yes, and then we can trivilly fix the users of to_live_kthread().
So I'll wait for this change and send the simple fix on top of it.

Otherwise I'll send another ugly hack (see below).

Oleg.
---

diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index e691b6a..7667bc62 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -37,6 +37,7 @@ struct task_struct *kthread_create_on_cpu(int 
(*threadfn)(void *data),
        __k;                                                               \
 })
 
+void set_kthread_struct(void *kthread);
 void kthread_bind(struct task_struct *k, unsigned int cpu);
 void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
 int kthread_stop(struct task_struct *k);
diff --git a/kernel/fork.c b/kernel/fork.c
index 97122f9..8643248 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -326,6 +326,23 @@ void release_task_stack(struct task_struct *tsk)
 #endif
 }
 
+void set_kthread_struct(void *kthread)
+{
+       /*
+        * This is the ugly but simple hack we will hopefully remove soon.
+        * We abuse ->set_child_tid to avoid the new member and because it
+        * can't be wrongly copied by copy_process(). We also rely on fact
+        * that the caller can't exec, so PF_KTHREAD can't be cleared.
+        */
+       current->set_child_tid = (__force void __user *)kthread;
+}
+
+static void free_kthread_struct(struct task_struct *tsk)
+{
+       if (tsk->flags & PF_KTHREAD)
+               kfree((__force void *)tsk->set_child_tid); /* can be NULL */
+}
+
 void free_task(struct task_struct *tsk)
 {
 #ifndef CONFIG_THREAD_INFO_IN_TASK
@@ -345,6 +362,7 @@ void free_task(struct task_struct *tsk)
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
        arch_release_task_struct(tsk);
+       free_kthread_struct(tsk);
        free_task_struct(tsk);
 }
 EXPORT_SYMBOL(free_task);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9ff173d..3a4921f 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -181,14 +181,11 @@ static int kthread(void *_create)
        int (*threadfn)(void *data) = create->threadfn;
        void *data = create->data;
        struct completion *done;
-       struct kthread self;
+       struct kthread *self;
        int ret;
 
-       self.flags = 0;
-       self.data = data;
-       init_completion(&self.exited);
-       init_completion(&self.parked);
-       current->vfork_done = &self.exited;
+       self = kmalloc(sizeof(*self), GFP_KERNEL);
+       set_kthread_struct(self);
 
        /* If user was SIGKILLed, I release the structure. */
        done = xchg(&create->done, NULL);
@@ -196,6 +193,19 @@ static int kthread(void *_create)
                kfree(create);
                do_exit(-EINTR);
        }
+
+       if (!self) {
+               create->result = ERR_PTR(-ENOMEM);
+               complete(done);
+               do_exit(-ENOMEM);
+       }
+
+       self->flags = 0;
+       self->data = data;
+       init_completion(&self->exited);
+       init_completion(&self->parked);
+       current->vfork_done = &self->exited;
+
        /* OK, tell user we're spawned, wait for stop or wakeup */
        __set_current_state(TASK_UNINTERRUPTIBLE);
        create->result = current;
@@ -203,12 +213,10 @@ static int kthread(void *_create)
        schedule();
 
        ret = -EINTR;
-
-       if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
-               __kthread_parkme(&self);
+       if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+               __kthread_parkme(self);
                ret = threadfn(data);
        }
-       /* we can't just return, we must preserve "self" on stack */
        do_exit(ret);
 }
 

Reply via email to