Implemented function 've_cleanup_per_cgroot_data' that is called in two
resource release cases.

1. At container stop procedure, container' init process goes through
exit process routines, and calls ve_exit_ns. ve_exit_ns knows about
ve from it's agrument it and can call ve_cleanup_per_cgroot_data with
ve argument.
2. At destruction of cgroup mount point, 'cgroup_drop_root' will be called
from cgroup's unmount implementation code. Also same code will be executed
during cgroup_mount function when error happens and cleanup is needed.
In both cases cgroup_drop_root will call 've_cleanup_per_cgroot_data'.
All those codepaths will know cgroup and pass it as an argument.

've_cleaup_per_cgroot_data' releases per-cgroup-root resources stored in ve
It expects any of two arguments (ve or cgroup) to be non-NULL to derive
type of cleanup, that is needed. If cgroup is NULL, it cleans up values
for all cgroup roots that it possesses.
If cgroup is non-NULL it only cleans out values for this particular
cgroup.

Signed-off-by: Valeriy Vdovin <[email protected]>
---
 include/linux/ve.h |  2 ++
 kernel/cgroup.c    |  1 +
 kernel/ve/ve.c     | 30 +++++++++++++++++++++---------
 3 files changed, 24 insertions(+), 9 deletions(-)

diff --git a/include/linux/ve.h b/include/linux/ve.h
index 5bf275f..2dcd7bb 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -220,6 +220,8 @@ int ve_set_release_agent_path(struct cgroup *cgroot,
 
 const char *ve_get_release_agent_path(struct cgroup *cgrp_root);
 
+void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp);
+
 extern struct ve_struct *get_ve(struct ve_struct *ve);
 extern void put_ve(struct ve_struct *ve);
 
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 989f73e..3305032 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1591,6 +1591,7 @@ static void cgroup_drop_root(struct cgroupfs_root *root)
 {
        if (!root)
                return;
+       ve_cleanup_per_cgroot_data(NULL, &root->top_cgroup);
 
        BUG_ON(!root->hierarchy_id);
        spin_lock(&hierarchy_id_lock);
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 8d78270..db26cbd4 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -745,21 +745,33 @@ err_list:
        return err;
 }
 
-static void ve_per_cgroot_free(struct ve_struct *ve)
+static inline void per_cgroot_data_free(struct per_cgroot_data *data)
+{
+       struct cgroup_rcu_string *release_agent = data->release_agent_path;
+
+       RCU_INIT_POINTER(data->release_agent_path, NULL);
+       if (release_agent)
+               kfree_rcu(release_agent, rcu_head);
+       kfree(data);
+}
+
+void ve_cleanup_per_cgroot_data(struct ve_struct *ve, struct cgroup *cgrp)
 {
        struct per_cgroot_data *data, *saved;
-       struct cgroup_rcu_string *release_agent;
 
+       BUG_ON(!ve && !cgrp);
+       rcu_read_lock();
+       if (!ve)
+               ve = cgroup_get_ve_owner(cgrp);
        raw_spin_lock(&ve->per_cgroot_list_lock);
        list_for_each_entry_safe(data, saved, &ve->per_cgroot_list, list) {
-               release_agent = data->release_agent_path;
-               RCU_INIT_POINTER(data->release_agent_path, NULL);
-               if (release_agent)
-                       kfree_rcu(release_agent, rcu_head);
-               list_del_init(&data->list);
-               kfree(data);
+               if (!cgrp || data->cgroot == cgrp) {
+                       list_del_init(&data->list);
+                       per_cgroot_data_free(data);
+               }
        }
        raw_spin_unlock(&ve->per_cgroot_list_lock);
+       rcu_read_unlock();
 }
 
 void ve_stop_ns(struct pid_namespace *pid_ns)
@@ -812,7 +824,7 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
 
        ve_workqueue_stop(ve);
 
-       ve_per_cgroot_free(ve);
+       ve_cleanup_per_cgroot_data(ve, NULL);
 
        /*
         * At this point all userspace tasks in container are dead.
-- 
1.8.3.1

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to