From: Peter Zijlstra <pet...@infradead.org>

Create a single function that initializes a sched_dl_entity.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/core.c     |  5 +----
 kernel/sched/deadline.c | 22 +++++++++++++++-------
 kernel/sched/sched.h    |  5 +----
 3 files changed, 17 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 12e1f3a2cabc6..6b36bf82b53c2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3081,10 +3081,7 @@ static void __sched_fork(unsigned long clone_flags, 
struct task_struct *p)
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
 
-       RB_CLEAR_NODE(&p->dl.rb_node);
-       init_dl_task_timer(&p->dl);
-       init_dl_inactive_task_timer(&p->dl);
-       __dl_clear_params(p);
+       init_dl_entity(&p->dl);
 
        INIT_LIST_HEAD(&p->rt.run_list);
        p->rt.timeout           = 0;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2ece83b5991f5..8d909bdb9a119 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -219,6 +219,8 @@ static void dl_change_utilization(struct task_struct *p, 
u64 new_bw)
        __add_rq_bw(new_bw, &rq->dl);
 }
 
+static void __dl_clear_params(struct sched_dl_entity *dl_se);
+
 /*
  * The utilization of a task cannot be immediately removed from
  * the rq active utilization (running_bw) when the task blocks.
@@ -317,7 +319,7 @@ static void task_non_contending(struct task_struct *p)
                                sub_rq_bw(&p->dl, &rq->dl);
                        raw_spin_lock(&dl_b->lock);
                        __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
-                       __dl_clear_params(p);
+                       __dl_clear_params(dl_se);
                        raw_spin_unlock(&dl_b->lock);
                }
 
@@ -1123,7 +1125,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer 
*timer)
        return HRTIMER_NORESTART;
 }
 
-void init_dl_task_timer(struct sched_dl_entity *dl_se)
+static void init_dl_task_timer(struct sched_dl_entity *dl_se)
 {
        struct hrtimer *timer = &dl_se->dl_timer;
 
@@ -1335,7 +1337,7 @@ static enum hrtimer_restart inactive_task_timer(struct 
hrtimer *timer)
                raw_spin_lock(&dl_b->lock);
                __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
                raw_spin_unlock(&dl_b->lock);
-               __dl_clear_params(p);
+               __dl_clear_params(dl_se);
 
                goto unlock;
        }
@@ -1351,7 +1353,7 @@ static enum hrtimer_restart inactive_task_timer(struct 
hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
+static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
 {
        struct hrtimer *timer = &dl_se->inactive_timer;
 
@@ -2741,10 +2743,8 @@ bool __checkparam_dl(const struct sched_attr *attr)
 /*
  * This function clears the sched_dl_entity static params.
  */
-void __dl_clear_params(struct task_struct *p)
+static void __dl_clear_params(struct sched_dl_entity *dl_se)
 {
-       struct sched_dl_entity *dl_se = &p->dl;
-
        dl_se->dl_runtime               = 0;
        dl_se->dl_deadline              = 0;
        dl_se->dl_period                = 0;
@@ -2759,6 +2759,14 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_overrun               = 0;
 }
 
+void init_dl_entity(struct sched_dl_entity *dl_se)
+{
+       RB_CLEAR_NODE(&dl_se->rb_node);
+       init_dl_task_timer(dl_se);
+       init_dl_inactive_task_timer(dl_se);
+       __dl_clear_params(dl_se);
+}
+
 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
 {
        struct sched_dl_entity *dl_se = &p->dl;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 963c16fc27500..62304d4de99cc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -255,8 +255,6 @@ struct rt_bandwidth {
        unsigned int            rt_period_active;
 };
 
-void __dl_clear_params(struct task_struct *p);
-
 /*
  * To keep the bandwidth of -deadline tasks and groups under control
  * we need some place where:
@@ -1939,8 +1937,7 @@ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, 
u64 period, u64 runtime
 
 extern struct dl_bandwidth def_dl_bandwidth;
 extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 
runtime);
-extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
-extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
+extern void init_dl_entity(struct sched_dl_entity *dl_se);
 
 #define BW_SHIFT               20
 #define BW_UNIT                        (1 << BW_SHIFT)
-- 
2.26.2

Reply via email to