On Fri, Jul 18, 2014 at 07:26:06AM +0800, Yuyang Du wrote: > @@ -665,20 +660,27 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct > sched_entity *se) > } > > #ifdef CONFIG_SMP > -static unsigned long task_h_load(struct task_struct *p); > > -static inline void __update_task_entity_contrib(struct sched_entity *se); > +/* dependent on LOAD_AVG_PERIOD, see below */ > +#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
Please don't separate this from the rest of the values it belongs to. If you really have to, move the entire block. > @@ -2071,13 +2073,9 @@ static inline long calc_tg_weight(struct task_group > *tg, struct cfs_rq *cfs_rq) > long tg_weight; > > /* > - * Use this CPU's actual weight instead of the last load_contribution > - * to gain a more accurate current total weight. See > - * update_cfs_rq_load_contribution(). > + * Use this CPU's load average instead of actual weight > */ > tg_weight = atomic_long_read(&tg->load_avg); > - tg_weight -= cfs_rq->tg_load_contrib; > - tg_weight += cfs_rq->load.weight; I don't think that comment makes any sense after this. The comment was there to explain the -=, += things, but that's all gone so its pretty trivial now, and i++ /* inc by one */ comments are not useful. > @@ -2181,7 +2178,7 @@ static const u32 runnable_avg_yN_sum[] = { > * Approximate: > * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) > */ > -static __always_inline u64 decay_load(u64 val, u64 n) > +static __always_inline u64 decay_load32(u64 val, u64 n) > { > unsigned int local_n; > > @@ -2210,6 +2207,18 @@ static __always_inline u64 decay_load(u64 val, u64 n) > return val >> 32; > } > > +static __always_inline u64 decay_load(u64 val, u64 n) > +{ > + if (likely(val <= UINT_MAX)) > + val = decay_load32(val, n); > + else { > + val *= (u32)decay_load32(1 << 15, n); > + val >>= 15; > + } > + > + return val; > +} Please just use mul_u64_u32_shr(). /me continues reading the rest of it..
pgpQJsmXgChEW.pgp
Description: PGP signature