The commit is pushed to "branch-rh7-3.10.0-123.1.2-ovz" and will appear at 
https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-123.1.2.vz7.5.7
------>
commit ad94c300d06e3e10ce8ef4fb6e6cddbbc659dd12
Author: Vladimir Davydov <vdavy...@parallels.com>
Date:   Thu May 28 18:01:43 2015 +0400

    fairsched: add cpu.proc.loadavg to the cpu cgroup
    
    Patchset description:
    
    Currently, cpu stats are broken for UUID-named CTs. First, reading
    /proc/stat from inside a container or /proc/vz/vestat on the host fails
    with ENOENT if there is a UUID-named CT. Patch 1 fixes that. Second,
    there is no new interface for getting loadavg for UUID-named CTs, which
    would be a successor of the legacy VZCTL_GET_CPU_STAT ioctl. Patch 2
    adds cpu.proc.loadavg that mimics behavior of /proc/loadavg.
    
    This patch description:
    
    This file mimics behavior of the system wide /proc/loadavg. It is going
    to be a successor of the legacy VZCTL_GET_CPU_STAT ioctl.
    
    Also, use it for showing /proc/loadavg inside a container, just like we
    do in case of /proc/stat.
    
    https://jira.sw.ru/browse/PSBM-32284
    
    Signed-off-by: Vladimir Davydov <vdavy...@parallels.com>
---
 fs/proc/loadavg.c         | 22 +++++++++----------
 include/linux/fairsched.h |  5 +++++
 include/linux/sched.h     |  7 ------
 kernel/fairsched.c        | 15 +++++++++++++
 kernel/sched/core.c       | 54 +++++++++++++++++++++++++++++------------------
 5 files changed, 65 insertions(+), 38 deletions(-)

diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index a9c0f55..4cbdeef 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -6,6 +6,8 @@
 #include <linux/seq_file.h>
 #include <linux/seqlock.h>
 #include <linux/time.h>
+#include <linux/fairsched.h>
+#include <linux/ve.h>
 
 #define LOAD_INT(x) ((x) >> FSHIFT)
 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
@@ -13,25 +15,23 @@
 static int loadavg_proc_show(struct seq_file *m, void *v)
 {
        unsigned long avnrun[3];
-       long running, threads;
        struct ve_struct *ve;
 
        ve = get_exec_env();
-       if (ve_is_super(ve)) {
-               get_avenrun(avnrun, FIXED_1/200, 0);
-               running = nr_running();
-               threads = nr_threads;
-       } else {
-               get_avenrun_ve(avnrun, FIXED_1/200, 0);
-               running = nr_running_ve();
-               threads = nr_threads_ve(ve);
+       if (!ve_is_super(ve)) {
+               int ret;
+               ret = fairsched_show_loadavg(ve_name(ve), m);
+               if (ret != -ENOSYS)
+                       return ret;
        }
 
-       seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%ld %d\n",
+       get_avenrun(avnrun, FIXED_1/200, 0);
+
+       seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
                LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
                LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
                LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
-               running, threads,
+               nr_running(), nr_threads,
                task_active_pid_ns(current)->last_pid);
        return 0;
 }
diff --git a/include/linux/fairsched.h b/include/linux/fairsched.h
index 568c25a..12bbc5b 100644
--- a/include/linux/fairsched.h
+++ b/include/linux/fairsched.h
@@ -63,12 +63,17 @@ int cpu_cgroup_proc_stat(struct cgroup *cgrp, struct cftype 
*cft,
                                struct seq_file *p);
 int fairsched_show_stat(const char *name, struct seq_file *p);
 
+int cpu_cgroup_proc_loadavg(struct cgroup *cgrp, struct cftype *cft,
+                           struct seq_file *p);
+int fairsched_show_loadavg(const char *name, struct seq_file *p);
+
 #else /* CONFIG_VZ_FAIRSCHED */
 
 static inline int fairsched_new_node(int id, unsigned int vcpus) { return 0; }
 static inline int fairsched_move_task(int id, struct task_struct *tsk) { 
return 0; }
 static inline void fairsched_drop_node(int id, int leave) { }
 static inline int fairsched_show_stat(const char *name, struct seq_file *p) { 
return -ENOSYS; }
+static inline int fairsched_show_loadavg(const char *name, struct seq_file *p) 
{ return -ENOSYS; }
 static inline int fairsched_get_cpu_avenrun(const char *name, unsigned long 
*avenrun) { return -ENOSYS; }
 static inline int fairsched_get_cpu_stat(const char *name, struct 
kernel_cpustat *kstat) { return -ENOSYS; }
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index edca7b8..2871013 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -117,13 +117,6 @@ extern unsigned long nr_active_cpu(void);
 extern atomic_t nr_dead;
 extern unsigned long nr_zombie;
 
-#ifdef CONFIG_VE
-extern unsigned long nr_running_ve(void);
-#else
-#define nr_running_ve()                                0
-#endif
-
-
 extern void calc_global_load(unsigned long ticks);
 extern void update_cpu_load_nohz(void);
 
diff --git a/kernel/fairsched.c b/kernel/fairsched.c
index 7fe93d2..412afed 100644
--- a/kernel/fairsched.c
+++ b/kernel/fairsched.c
@@ -730,6 +730,21 @@ int fairsched_show_stat(const char *name, struct seq_file 
*p)
        return err;
 }
 
+int fairsched_show_loadavg(const char *name, struct seq_file *p)
+{
+       struct cgroup *cgrp;
+       int err;
+
+       cgrp = cgroup_kernel_open(root_node.cpu, 0, name);
+       if (IS_ERR_OR_NULL(cgrp))
+               return cgrp ? PTR_ERR(cgrp) : -ENOENT;
+
+       err = cpu_cgroup_proc_loadavg(cgrp, NULL, p);
+       cgroup_kernel_close(cgrp);
+
+       return err;
+}
+
 int fairsched_get_cpu_avenrun(const char *name, unsigned long *avenrun)
 {
        struct cgroup *cgrp;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index efc069e..c6a5ab0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2350,26 +2350,6 @@ unsigned long nr_active_cpu(void)
        return this->nr_active;
 }
 
-#ifdef CONFIG_VE
-unsigned long nr_running_ve(void)
-{
-       struct task_group *tg = task_group(current);
-       unsigned long nr_running = 0;
-       int i;
-
-       for_each_possible_cpu(i) {
-#ifdef CONFIG_FAIR_GROUP_SCHED
-               nr_running += tg->cfs_rq[i]->nr_running;
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
-               nr_running += tg->rt_rq[i]->rt_nr_running;
-#endif
-       }
-
-       return nr_running;
-}
-#endif
-
 /*
  * Global load-average calculations
  *
@@ -8814,6 +8794,36 @@ int cpu_cgroup_proc_stat(struct cgroup *cgrp, struct 
cftype *cft,
        return 0;
 }
 
+int cpu_cgroup_proc_loadavg(struct cgroup *cgrp, struct cftype *cft,
+                           struct seq_file *p)
+{
+       struct task_group *tg = cgroup_tg(cgrp);
+       unsigned long avnrun[3];
+       int nr_running = 0;
+       int i;
+
+       avnrun[0] = tg->avenrun[0] + FIXED_1/200;
+       avnrun[1] = tg->avenrun[1] + FIXED_1/200;
+       avnrun[2] = tg->avenrun[2] + FIXED_1/200;
+
+       for_each_possible_cpu(i) {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+               nr_running += tg->cfs_rq[i]->nr_running;
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+               nr_running += tg->rt_rq[i]->rt_nr_running;
+#endif
+       }
+
+       seq_printf(p, "%lu.%02lu %lu.%02lu %lu.%02lu %d/%d %d\n",
+               LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
+               LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
+               LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
+               nr_running, cgroup_task_count(cgrp),
+               task_active_pid_ns(current)->last_pid);
+       return 0;
+}
+
 void cpu_cgroup_get_stat(struct cgroup *cgrp, struct kernel_cpustat *kstat)
 {
        int i, j;
@@ -8935,6 +8945,10 @@ static struct cftype cpu_files[] = {
                .read_seq_string = cpu_cgroup_proc_stat,
        },
        {
+               .name = "proc.loadavg",
+               .read_seq_string = cpu_cgroup_proc_loadavg,
+       },
+       {
                .name = "delayacct.total",
                .read_map = cpu_cgroup_delay_show,
        },
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to