Analogous to the existing {en,de}queue_runnable_load_avg() add helpers
for {en,de}queue_load_avg(). More users will follow.

Includes some code movement to avoid fwd declarations.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/fair.c |  156 ++++++++++++++++++++++++++++------------------------
 1 file changed, 86 insertions(+), 70 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2705,6 +2705,90 @@ account_entity_dequeue(struct cfs_rq *cf
        cfs_rq->nr_running--;
 }
 
+/*
+ * Signed add and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define add_positive(_ptr, _val) do {                           \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(_val) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+                                                               \
+       res = var + val;                                        \
+                                                               \
+       if (val < 0 && res > var)                               \
+               res = 0;                                        \
+                                                               \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do {                          \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(*ptr) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+       res = var - val;                                        \
+       if (res > var)                                          \
+               res = 0;                                        \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
+#ifdef CONFIG_SMP
+/*
+ * XXX we want to get rid of this helper and use the full load resolution.
+ */
+static inline long se_weight(struct sched_entity *se)
+{
+       return scale_load_down(se->load.weight);
+}
+
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       cfs_rq->runnable_load_avg += se->avg.load_avg;
+       cfs_rq->runnable_load_sum += se_weight(se) * se->avg.load_sum;
+}
+
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       sub_positive(&cfs_rq->runnable_load_avg, se->avg.load_avg);
+       sub_positive(&cfs_rq->runnable_load_sum, se_weight(se) * 
se->avg.load_sum);
+}
+
+static inline void
+enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       cfs_rq->avg.load_avg += se->avg.load_avg;
+       cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
+}
+
+static inline void
+dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+       sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+       sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+}
+#else
+static inline void
+enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static inline void
+dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+#endif
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
 /*
@@ -3097,14 +3181,6 @@ ___update_load_avg(struct sched_avg *sa,
 }
 
 /*
- * XXX we want to get rid of this helper and use the full load resolution.
- */
-static inline long se_weight(struct sched_entity *se)
-{
-       return scale_load_down(se->load.weight);
-}
-
-/*
  * sched_entity:
  *
  *   load_sum := runnable_sum
@@ -3153,26 +3229,6 @@ __update_load_avg_cfs_rq(u64 now, int cp
        return 0;
 }
 
-/*
- * Signed add and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
-#define add_positive(_ptr, _val) do {                           \
-       typeof(_ptr) ptr = (_ptr);                              \
-       typeof(_val) val = (_val);                              \
-       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
-                                                               \
-       res = var + val;                                        \
-                                                               \
-       if (val < 0 && res > var)                               \
-               res = 0;                                        \
-                                                               \
-       WRITE_ONCE(*ptr, res);                                  \
-} while (0)
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
 /**
  * update_tg_load_avg - update the tg's load avg
@@ -3417,23 +3473,6 @@ static inline void set_tg_cfs_propagate(
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
-/*
- * Unsigned subtract and clamp on underflow.
- *
- * Explicitly do a load-store to ensure the intermediate value never hits
- * memory. This allows lockless observations without ever seeing the negative
- * values.
- */
-#define sub_positive(_ptr, _val) do {                          \
-       typeof(_ptr) ptr = (_ptr);                              \
-       typeof(*ptr) val = (_val);                              \
-       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
-       res = var - val;                                        \
-       if (res > var)                                          \
-               res = 0;                                        \
-       WRITE_ONCE(*ptr, res);                                  \
-} while (0)
-
 /**
  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
  * @now: current time, as per cfs_rq_clock_task()
@@ -3496,8 +3535,7 @@ update_cfs_rq_load_avg(u64 now, struct c
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se)
 {
        se->avg.last_update_time = cfs_rq->avg.last_update_time;
-       cfs_rq->avg.load_avg += se->avg.load_avg;
-       cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
+       enqueue_load_avg(cfs_rq, se);
        cfs_rq->avg.util_avg += se->avg.util_avg;
        cfs_rq->avg.util_sum += se->avg.util_sum;
        set_tg_cfs_propagate(cfs_rq);
@@ -3515,9 +3553,7 @@ static void attach_entity_load_avg(struc
  */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se)
 {
-
-       sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
-       sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+       dequeue_load_avg(cfs_rq, se);
        sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
        sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
        set_tg_cfs_propagate(cfs_rq);
@@ -3559,22 +3595,6 @@ static inline void update_load_avg(struc
                update_tg_load_avg(cfs_rq, 0);
 }
 
-/* Add the load generated by se into cfs_rq's load average */
-static inline void
-enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-       cfs_rq->runnable_load_avg += se->avg.load_avg;
-       cfs_rq->runnable_load_sum += se_weight(se) * se->avg.load_sum;
-}
-
-/* Remove the runnable load generated by se from cfs_rq's runnable load 
average */
-static inline void
-dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-       sub_positive(&cfs_rq->runnable_load_avg, se->avg.load_avg);
-       sub_positive(&cfs_rq->runnable_load_sum, se_weight(se) * 
se->avg.load_sum);
-}
-
 #ifndef CONFIG_64BIT
 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
 {
@@ -3661,10 +3681,6 @@ static inline void update_load_avg(struc
        cfs_rq_util_change(cfs_rq);
 }
 
-static inline void
-enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
-static inline void
-dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
 static inline void


Reply via email to