Add a new cgroup subsystem callback can_fork that conditionally
states whether or not the fork is accepted or rejected by a cgroup
policy.

In addition, add a cancel_fork callback so that if an error occurs later
in the forking process, any state modified by can_fork can be reverted.

In order to ensure that the fork charged the right hierarchy, save the
"current" css_set before doing ss->can_fork and compare it with the
"current" css_set that gets committed to the task *proper* in post_fork.
If they do not match, revert the can_fork's charging of the wrong
hierarchy and forcefully reapply it to the right hierarchy using the
reapply_fork callback. Since a changing "current" css_set in
copy_process indicates an organisation operation took place, we can
break the cgroup policy in this case.

In order for a subsystem to know that a task associated with a cgroup
hierarchy is being migrated to another hierarchy, add a detach callback
to the subsystem which is run after the migration has been confirmed but
before the old_cset's refcount is dropped.

This is in preparation for implementing the pids cgroup subsystem.

Signed-off-by: Aleksa Sarai <cyp...@cyphar.com>
---
 include/linux/cgroup.h |  42 ++++++++++++-
 kernel/cgroup.c        | 163 ++++++++++++++++++++++++++++++++++++++++++++++---
 kernel/fork.c          |  38 ++++++++++--
 3 files changed, 227 insertions(+), 16 deletions(-)

diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b9cb94c..278df0f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -25,14 +25,25 @@
 
 #ifdef CONFIG_CGROUPS
 
+struct cgroup_fork_state;
 struct cgroup_root;
 struct cgroup_subsys;
 struct cgroup;
 
+extern struct cgroup_fork_state *cgroup_cfs_alloc(void);
+extern void cgroup_cfs_free(struct cgroup_fork_state *cfs);
+extern void cgroup_cfs_get(struct cgroup_fork_state *cfs);
+extern void cgroup_cfs_put(struct cgroup_fork_state *cfs);
+
 extern int cgroup_init_early(void);
 extern int cgroup_init(void);
 extern void cgroup_fork(struct task_struct *p);
-extern void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+                          struct cgroup_fork_state *cfs);
+extern void cgroup_cancel_fork(struct task_struct *p,
+                              struct cgroup_fork_state *cfs);
+extern void cgroup_post_fork(struct task_struct *p,
+                            struct cgroup_fork_state *old_cfs);
 extern void cgroup_exit(struct task_struct *p);
 extern int cgroupstats_build(struct cgroupstats *stats,
                                struct dentry *dentry);
@@ -649,6 +660,15 @@ struct cgroup_subsys {
                              struct cgroup_taskset *tset);
        void (*attach)(struct cgroup_subsys_state *css,
                       struct cgroup_taskset *tset);
+       void (*detach)(struct cgroup_subsys_state *old_css,
+                      struct task_struct *task);
+       int (*can_fork)(struct cgroup_subsys_state *css,
+                       struct task_struct *task);
+       void (*cancel_fork)(struct cgroup_subsys_state *css,
+                           struct task_struct *task);
+       void (*reapply_fork)(struct cgroup_subsys_state *css,
+                            struct cgroup_subsys_state *old_css,
+                            struct task_struct *task);
        void (*fork)(struct task_struct *task);
        void (*exit)(struct cgroup_subsys_state *css,
                     struct cgroup_subsys_state *old_css,
@@ -943,12 +963,30 @@ struct cgroup_subsys_state 
*css_tryget_online_from_dir(struct dentry *dentry,
 
 #else /* !CONFIG_CGROUPS */
 
+struct cgroup_fork_state;
 struct cgroup_subsys_state;
 
+static inline struct cgroup_fork_state *cfs cgroup_cfs_alloc(void)
+{
+       return NULL;
+}
+static inline void cgroup_cfs_free(struct cgroup_fork_state *cfs) {}
+static inline void cgroup_cfs_get(struct cgroup_fork_state *cfs) {}
+static inline void cgroup_cfs_put(struct cgroup_fork_state *cfs) {}
+
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
 static inline void cgroup_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+                                 struct cgroup_fork_state *cfs)
+{
+       return 0;
+}
+static inline void cgroup_cancel_fork(struct task_struct *p,
+                                     struct cgroup_fork_state *cfs) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+                                   struct cgroup_fork_state *old_cfs) {}
+
 static inline void cgroup_exit(struct task_struct *p) {}
 
 static inline int cgroupstats_build(struct cgroupstats *stats,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index abd491f..5853d61 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -176,14 +176,19 @@ static DEFINE_IDR(cgroup_hierarchy_idr);
 static u64 css_serial_nr_next = 1;
 
 /*
- * These bitmask flags indicate whether tasks in the fork and exit paths should
- * check for fork/exit handlers to call. This avoids us having to do
- * extra work in the fork/exit path if none of the subsystems need to
- * be called.
+ * These bitmask flags indicate whether tasks in the fork and exit paths
+ * should check for fork/exit handlers to call. This avoids us having to do
+ * extra work in the fork/exit path if a subsystems doesn't need to be
+ * called.
  */
 static int need_fork_callback __read_mostly;
 static int need_exit_callback __read_mostly;
 
+/* Ditto for the can_fork/cancel_fork/reapply_fork callbacks. */
+static int need_canfork_callback __read_mostly;
+static int need_cancelfork_callback __read_mostly;
+static int need_reapplyfork_callback __read_mostly;
+
 static struct cftype cgroup_dfl_base_files[];
 static struct cftype cgroup_legacy_base_files[];
 
@@ -412,7 +417,7 @@ static int notify_on_release(const struct cgroup *cgrp)
             (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
 
 /**
- * for_each_subsys_which - filter for_each_subsys with a bitmask
+ * for_each_subsys_which - filter for_each_subsys with a subsys bitmask
  * @ss_mask: the bitmask
  * @ss: the iteration cursor
  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
@@ -2054,6 +2059,8 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
                                struct css_set *new_cset)
 {
        struct css_set *old_cset;
+       struct cgroup_subsys_state *css;
+       int i;
 
        lockdep_assert_held(&cgroup_mutex);
        lockdep_assert_held(&css_set_rwsem);
@@ -2078,6 +2085,18 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
        list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
 
        /*
+        * We detach from the old_cset subsystems here. We must do this
+        * before we drop the refcount for old_cset, in order to make sure
+        * that nobody frees it underneath us.
+        */
+       for_each_e_css(css, i, old_cgrp) {
+               struct cgroup_subsys_state *old_css = old_cset->subsys[i];
+
+               if (old_css->ss->detach)
+                       old_css->ss->detach(old_css, tsk);
+       }
+
+       /*
         * We just gained a reference on old_cset by taking it from the
         * task. As trading it for new_cset is protected by cgroup_mutex,
         * we're safe to drop it here; it will be freed under RCU.
@@ -2321,9 +2340,10 @@ static int cgroup_migrate(struct cgroup *cgrp, struct 
task_struct *leader,
         */
        tset.csets = &tset.dst_csets;
 
-       for_each_e_css(css, i, cgrp)
+       for_each_e_css(css, i, cgrp) {
                if (css->ss->attach)
                        css->ss->attach(css, &tset);
+       }
 
        ret = 0;
        goto out_release_tset;
@@ -4935,6 +4955,9 @@ static void __init cgroup_init_subsys(struct 
cgroup_subsys *ss, bool early)
 
        need_fork_callback |= (bool) ss->fork << ss->id;
        need_exit_callback |= (bool) ss->exit << ss->id;
+       need_canfork_callback |= (bool) ss->can_fork << ss->id;
+       need_cancelfork_callback |= (bool) ss->cancel_fork << ss->id;
+       need_reapplyfork_callback |= (bool) ss->reapply_fork << ss->id;
 
        /* At system boot, before all subsystems have been
         * registered, no tasks have been forked, so we don't
@@ -5173,6 +5196,62 @@ static const struct file_operations 
proc_cgroupstats_operations = {
        .release = single_release,
 };
 
+struct cgroup_fork_state {
+       struct css_set *cset;
+};
+
+/**
+ * cgroup_cfs_alloc - allocates an empty cgroup_fork_state
+ */
+struct cgroup_fork_state *cgroup_cfs_alloc(void)
+{
+       struct cgroup_fork_state *cfs;
+
+       cfs = kzalloc(sizeof(struct cgroup_fork_state), GFP_KERNEL);
+       if (!cfs)
+               return ERR_PTR(-ENOMEM);
+
+       return cfs;
+}
+
+/**
+ * cgroup_cfs_get - get the current cgroup_fork_state and bumps the refcounts
+ * @cfs: the fork state to update
+ *
+ * Make sure that you run a corresponding cgroup_cfs_put after using the cfs or
+ * running cgroup_cfs_get again. This must be called with rcu_read_lock() held
+ * by the caller.
+ */
+void cgroup_cfs_get(struct cgroup_fork_state *cfs)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       WARN_ON_ONCE(cfs->cset);
+
+       cfs->cset = task_css_set(current);
+       get_css_set(cfs->cset);
+}
+
+/**
+ * cgroup_cfs_put - get the current cgroup_fork_state and bumps the refcounts
+ * @cfs: the fork state to update
+ */
+void cgroup_cfs_put(struct cgroup_fork_state *cfs)
+{
+       WARN_ON_ONCE(!cfs->cset);
+
+       put_css_set(cfs->cset);
+       cfs->cset = NULL;
+}
+
+/**
+ * cgroup_cfs_free - frees a cgroup_fork_state pointer
+ * @cfs: the pointer to free
+ */
+void cgroup_cfs_free(struct cgroup_fork_state *cfs)
+{
+       kfree(cfs);
+}
+
 /**
  * cgroup_fork - initialize cgroup related fields during copy_process()
  * @child: pointer to task_struct of forking parent process.
@@ -5188,6 +5267,54 @@ void cgroup_fork(struct task_struct *child)
 }
 
 /**
+ * cgroup_can_fork - called on a new task before the process is exposed.
+ * @child: the task in question.
+ *
+ * This calls the subsystem can_fork() callbacks. If the can_fork() callback
+ * returns an error, the fork aborts with that error code. This allows for
+ * a cgroup subsystem to conditionally allow or deny new forks.
+ */
+int cgroup_can_fork(struct task_struct *child, struct cgroup_fork_state *cfs)
+{
+       struct cgroup_subsys *ss;
+       int i, j, retval;
+
+       for_each_subsys_which(need_canfork_callback, ss, i) {
+               retval = ss->can_fork(cfs->cset->subsys[i], child);
+               if (retval)
+                       goto out_revert;
+       }
+
+       return 0;
+
+out_revert:
+       for_each_subsys_which(need_cancelfork_callback, ss, j) {
+               if (j >= i)
+                       break;
+
+               ss->cancel_fork(cfs->cset->subsys[i], child);
+       }
+
+       return retval;
+}
+
+/**
+ * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
+ * @child: the task in question
+ *
+ * This calls the cancel_fork() callbacks if a fork failed *after*
+ * cgroup_can_fork() succeded.
+ */
+void cgroup_cancel_fork(struct task_struct *child, struct cgroup_fork_state 
*cfs)
+{
+       struct cgroup_subsys *ss;
+       int i;
+
+       for_each_subsys_which(need_cancelfork_callback, ss, i)
+               ss->cancel_fork(cfs->cset->subsys[i], child);
+}
+
+/**
  * cgroup_post_fork - called on a new task after adding it to the task list
  * @child: the task in question
  *
@@ -5197,9 +5324,10 @@ void cgroup_fork(struct task_struct *child)
  * cgroup_task_iter_start() - to guarantee that the new task ends up on its
  * list.
  */
-void cgroup_post_fork(struct task_struct *child)
+void cgroup_post_fork(struct task_struct *child, struct cgroup_fork_state 
*old_cfs)
 {
        struct cgroup_subsys *ss;
+       struct css_set *cset;
        int i;
 
        /*
@@ -5223,9 +5351,8 @@ void cgroup_post_fork(struct task_struct *child)
         * in the init_css_set before cg_links is enabled and there's no
         * operation which transfers all tasks out of init_css_set.
         */
+       cset = old_cfs->cset;
        if (use_task_css_set_links) {
-               struct css_set *cset;
-
                down_write(&css_set_rwsem);
                cset = task_css_set(current);
                if (list_empty(&child->cg_list)) {
@@ -5237,6 +5364,24 @@ void cgroup_post_fork(struct task_struct *child)
        }
 
        /*
+        * Deal with tasks that were migrated mid-fork. If the css_set
+        * changed between can_fork() and post_fork() an organisation
+        * operation has occurred, and we need to revert/reapply the
+        * the can_fork().
+        */
+       for_each_subsys_which(need_reapplyfork_callback, ss, i) {
+               struct cgroup_subsys_state *css = cset->subsys[i];
+               struct cgroup_subsys_state *old_css = old_cfs->cset->subsys[i];
+
+               /*
+                * We only reapply for subsystems whose
+                * association changed in the interim.
+                */
+               if (old_css != css)
+                       ss->reapply_fork(css, old_css, child);
+       }
+
+       /*
         * Call ss->fork().  This must happen after @child is linked on
         * css_set; otherwise, @child might change state between ->fork()
         * and addition to css_set.
diff --git a/kernel/fork.c b/kernel/fork.c
index cf65139..170ae32 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1196,6 +1196,7 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
 {
        int retval;
        struct task_struct *p;
+       struct cgroup_fork_state *cfs;
 
        if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
                return ERR_PTR(-EINVAL);
@@ -1322,12 +1323,17 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
        if (clone_flags & CLONE_THREAD)
                threadgroup_change_begin(current);
        cgroup_fork(p);
+       cfs = cgroup_cfs_alloc();
+       if (IS_ERR(cfs)) {
+               retval = PTR_ERR(cfs);
+               goto bad_fork_cleanup_threadgroup_lock;
+       }
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
        if (IS_ERR(p->mempolicy)) {
                retval = PTR_ERR(p->mempolicy);
                p->mempolicy = NULL;
-               goto bad_fork_cleanup_threadgroup_lock;
+               goto bad_fork_cfs_free;
        }
 #endif
 #ifdef CONFIG_CPUSETS
@@ -1468,6 +1474,21 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
        INIT_LIST_HEAD(&p->thread_group);
        p->task_works = NULL;
 
+
+       /*
+        * Ensure that the cgroup subsystem policies allow the new process to be
+        * forked. If this fork is happening in an organization operation, then
+        * this will not charge the correct css_set. This is fixed during
+        * cgroup_post_fork() (when the css_set has been updated) by undoing
+        * this operation and forcefully charging the correct css_set.
+        */
+       rcu_read_lock();
+       cgroup_cfs_get(cfs);
+       rcu_read_unlock();
+       retval = cgroup_can_fork(p, cfs);
+       if (retval)
+               goto bad_fork_put_cfs;
+
        /*
         * Make it visible to the rest of the system, but dont wake it up yet.
         * Need tasklist lock for parent etc handling!
@@ -1504,7 +1525,7 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
                spin_unlock(&current->sighand->siglock);
                write_unlock_irq(&tasklist_lock);
                retval = -ERESTARTNOINTR;
-               goto bad_fork_free_pid;
+               goto bad_fork_cancel_cgroup;
        }
 
        if (likely(p->pid)) {
@@ -1546,7 +1567,9 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
        write_unlock_irq(&tasklist_lock);
 
        proc_fork_connector(p);
-       cgroup_post_fork(p);
+       cgroup_post_fork(p, cfs);
+       cgroup_cfs_put(cfs);
+       cgroup_cfs_free(cfs);
        if (clone_flags & CLONE_THREAD)
                threadgroup_change_end(current);
        perf_event_fork(p);
@@ -1556,7 +1579,10 @@ static struct task_struct *copy_process(unsigned long 
clone_flags,
 
        return p;
 
-bad_fork_free_pid:
+bad_fork_cancel_cgroup:
+       cgroup_cancel_fork(p, cfs);
+bad_fork_put_cfs:
+       cgroup_cfs_put(cfs);
        if (pid != &init_struct_pid)
                free_pid(pid);
 bad_fork_cleanup_io:
@@ -1585,8 +1611,10 @@ bad_fork_cleanup_perf:
 bad_fork_cleanup_policy:
 #ifdef CONFIG_NUMA
        mpol_put(p->mempolicy);
-bad_fork_cleanup_threadgroup_lock:
+bad_fork_cfs_free:
 #endif
+       cgroup_cfs_free(cfs);
+bad_fork_cleanup_threadgroup_lock:
        if (clone_flags & CLONE_THREAD)
                threadgroup_change_end(current);
        delayacct_tsk_free(p);
-- 
2.3.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to