Add Cgroup interface for latency-nice. Each CPU Cgroup adds a new file
"latency-nice" which is shared by all the threads in that Cgroup.

Signed-off-by: subhra mazumdar <subhra.mazum...@oracle.com>
---
 include/linux/sched.h |  1 +
 kernel/sched/core.c   | 40 ++++++++++++++++++++++++++++++++++++++++
 kernel/sched/fair.c   |  1 +
 kernel/sched/sched.h  |  8 ++++++++
 4 files changed, 50 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1183741..b4a79c3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -631,6 +631,7 @@ struct task_struct {
        int                             static_prio;
        int                             normal_prio;
        unsigned int                    rt_priority;
+       u64                             latency_nice;
 
        const struct sched_class        *sched_class;
        struct sched_entity             se;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 874c427..47969bc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5976,6 +5976,7 @@ void __init sched_init(void)
                init_dl_rq(&rq->dl);
 #ifdef CONFIG_FAIR_GROUP_SCHED
                root_task_group.shares = ROOT_TASK_GROUP_LOAD;
+               root_task_group.latency_nice = LATENCY_NICE_DEFAULT;
                INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
                rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
                /*
@@ -6345,6 +6346,7 @@ static void sched_change_group(struct task_struct *tsk, 
int type)
         */
        tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
                          struct task_group, css);
+       tsk->latency_nice = tg->latency_nice;
        tg = autogroup_task_group(tsk, tg);
        tsk->sched_task_group = tg;
 
@@ -6812,6 +6814,34 @@ static u64 cpu_rt_period_read_uint(struct 
cgroup_subsys_state *css,
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+static u64 cpu_latency_nice_read_u64(struct cgroup_subsys_state *css,
+                                    struct cftype *cft)
+{
+       struct task_group *tg = css_tg(css);
+
+       return tg->latency_nice;
+}
+
+static int cpu_latency_nice_write_u64(struct cgroup_subsys_state *css,
+                                     struct cftype *cft, u64 latency_nice)
+{
+       struct task_group *tg = css_tg(css);
+       struct css_task_iter it;
+       struct task_struct *p;
+
+       if (latency_nice < LATENCY_NICE_MIN || latency_nice > LATENCY_NICE_MAX)
+               return -ERANGE;
+
+       tg->latency_nice = latency_nice;
+
+       css_task_iter_start(css, 0, &it);
+       while ((p = css_task_iter_next(&it)))
+               p->latency_nice = latency_nice;
+       css_task_iter_end(&it);
+
+       return 0;
+}
+
 static struct cftype cpu_legacy_files[] = {
 #ifdef CONFIG_FAIR_GROUP_SCHED
        {
@@ -6848,6 +6878,11 @@ static struct cftype cpu_legacy_files[] = {
                .write_u64 = cpu_rt_period_write_uint,
        },
 #endif
+       {
+               .name = "latency-nice",
+               .read_u64 = cpu_latency_nice_read_u64,
+               .write_u64 = cpu_latency_nice_write_u64,
+       },
        { }     /* Terminate */
 };
 
@@ -7015,6 +7050,11 @@ static struct cftype cpu_files[] = {
                .write = cpu_max_write,
        },
 #endif
+       {
+               .name = "latency-nice",
+               .read_u64 = cpu_latency_nice_read_u64,
+               .write_u64 = cpu_latency_nice_write_u64,
+       },
        { }     /* terminate */
 };
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f35930f..b08d00c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10479,6 +10479,7 @@ int alloc_fair_sched_group(struct task_group *tg, 
struct task_group *parent)
                goto err;
 
        tg->shares = NICE_0_LOAD;
+       tg->latency_nice = LATENCY_NICE_DEFAULT;
 
        init_cfs_bandwidth(tg_cfs_bandwidth(tg));
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b52ed1a..365c928 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -143,6 +143,13 @@ static inline void cpu_load_update_active(struct rq 
*this_rq) { }
 #define NICE_0_LOAD            (1L << NICE_0_LOAD_SHIFT)
 
 /*
+ * Latency-nice default value
+ */
+#define        LATENCY_NICE_DEFAULT    5
+#define        LATENCY_NICE_MIN        1
+#define        LATENCY_NICE_MAX        100
+
+/*
  * Single value that decides SCHED_DEADLINE internal math precision.
  * 10 -> just above 1us
  * 9  -> just above 0.5us
@@ -362,6 +369,7 @@ struct cfs_bandwidth {
 /* Task group related information */
 struct task_group {
        struct cgroup_subsys_state css;
+       u64 latency_nice;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /* schedulable entities of this group on each CPU */
-- 
2.9.3

Reply via email to