The commit is pushed to "branch-rh7-3.10.0-1127.18.2.vz7.163.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1127.18.2.el7
------>
commit c7a536d7451a248cdafcd3e4ad15ac1bd8e08c11
Author: Valeriy Vdovin <[email protected]>
Date:   Thu Aug 6 08:31:57 2020 +0300

    ve/cgroup: private per-cgroup-root data container
    
    As long as each ve is internally attached to a particular
    css_set via it's init_task, it's good to have container with parameters,
    which are common to each cgroup subsystem hierarchy, rooting from it's
    virtual root.
    
    Signed-off-by: Valeriy Vdovin <[email protected]>
    
    Reviewed-by: Kirill Tkhai <[email protected]>
---
 include/linux/ve.h |  7 ++++++
 kernel/ve/ve.c     | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 80 insertions(+)

diff --git a/include/linux/ve.h b/include/linux/ve.h
index 4dbd216..65413d5 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -137,6 +137,13 @@ struct ve_struct {
        struct work_struct      release_agent_work;
 
        /*
+        * List of data, private for each root cgroup in
+        * ve's css_set.
+        */
+       struct list_head        per_cgroot_list;
+       struct raw_spinlock     per_cgroot_list_lock;
+
+       /*
         * All tasks, that belong to this ve, live
         * in cgroups, that are children to cgroups
         * that form this css_set.
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 9e6bb8b..f564dca 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -45,6 +45,14 @@
 #include <linux/vziptable_defs.h>
 #include <net/rtnetlink.h>
 
+struct per_cgroot_data {
+       struct list_head list;
+       /*
+        * data is related to this cgroup
+        */
+       struct cgroup *cgroot;
+};
+
 extern struct kmapset_set sysfs_ve_perms_set;
 
 static struct kmem_cache *ve_cachep;
@@ -92,6 +100,9 @@ struct ve_struct ve0 = {
        .release_list           = LIST_HEAD_INIT(ve0.release_list),
        .release_agent_work     = __WORK_INITIALIZER(ve0.release_agent_work,
                                        cgroup_release_agent),
+       .per_cgroot_list        = LIST_HEAD_INIT(ve0.per_cgroot_list),
+       .per_cgroot_list_lock   = __RAW_SPIN_LOCK_UNLOCKED(
+                                       ve0.per_cgroot_list_lock),
 };
 EXPORT_SYMBOL(ve0);
 
@@ -118,6 +129,52 @@ void put_ve(struct ve_struct *ve)
 }
 EXPORT_SYMBOL(put_ve);
 
+static struct per_cgroot_data *per_cgroot_data_find_locked(
+       struct list_head *per_cgroot_list, struct cgroup *cgroot)
+{
+       struct per_cgroot_data *data;
+
+       list_for_each_entry(data, per_cgroot_list, list) {
+               if (data->cgroot == cgroot)
+                       return data;
+       }
+       return NULL;
+}
+
+static inline struct per_cgroot_data *per_cgroot_get_or_create(
+       struct ve_struct *ve, struct cgroup *cgroot)
+{
+       struct per_cgroot_data *data, *other_data;
+
+       raw_spin_lock(&ve->per_cgroot_list_lock);
+       data = per_cgroot_data_find_locked(&ve->per_cgroot_list,
+               cgroot);
+       raw_spin_unlock(&ve->per_cgroot_list_lock);
+
+       if (data)
+               return data;
+
+       data = kzalloc(sizeof(struct per_cgroot_data), GFP_KERNEL);
+       if (!data)
+               return ERR_PTR(-ENOMEM);
+
+       raw_spin_lock(&ve->per_cgroot_list_lock);
+       other_data = per_cgroot_data_find_locked(&ve->per_cgroot_list,
+               cgroot);
+
+       if (other_data) {
+               raw_spin_unlock(&ve->per_cgroot_list_lock);
+               kfree(data);
+               return other_data;
+       }
+
+       data->cgroot = cgroot;
+       list_add(&data->list, &ve->per_cgroot_list);
+
+       raw_spin_unlock(&ve->per_cgroot_list_lock);
+       return data;
+}
+
 struct cgroup_subsys_state *ve_get_init_css(struct ve_struct *ve, int 
subsys_id)
 {
        struct cgroup_subsys_state *css, *tmp;
@@ -617,6 +674,18 @@ err_list:
        return err;
 }
 
+static void ve_per_cgroot_free(struct ve_struct *ve)
+{
+       struct per_cgroot_data *data, *saved;
+
+       raw_spin_lock(&ve->per_cgroot_list_lock);
+       list_for_each_entry_safe(data, saved, &ve->per_cgroot_list, list) {
+               list_del_init(&data->list);
+               kfree(data);
+       }
+       raw_spin_unlock(&ve->per_cgroot_list_lock);
+}
+
 void ve_stop_ns(struct pid_namespace *pid_ns)
 {
        struct ve_struct *ve = current->task_ve;
@@ -667,6 +736,8 @@ void ve_exit_ns(struct pid_namespace *pid_ns)
 
        ve_workqueue_stop(ve);
 
+       ve_per_cgroot_free(ve);
+
        /*
         * At this point all userspace tasks in container are dead.
         */
@@ -740,6 +811,7 @@ static struct cgroup_subsys_state *ve_create(struct cgroup 
*cg)
 
        INIT_WORK(&ve->release_agent_work, cgroup_release_agent);
        raw_spin_lock_init(&ve->release_list_lock);
+       raw_spin_lock_init(&ve->per_cgroot_list_lock);
 
        ve->_randomize_va_space = ve0._randomize_va_space;
 
@@ -776,6 +848,7 @@ do_init:
        INIT_LIST_HEAD(&ve->ve_list);
        INIT_LIST_HEAD(&ve->devmnt_list);
        INIT_LIST_HEAD(&ve->release_list);
+       INIT_LIST_HEAD(&ve->per_cgroot_list);
        mutex_init(&ve->devmnt_mutex);
 
 #ifdef CONFIG_AIO
_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to