Revert "sched/fair: Drop always true parameter of update_cfs_rq_load_avg()"

We need this to implement "sched/fair: Skip frequency updates if CPU about to idle"

This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52.

Change-Id: I8abe4b2e0ae0c610a1477c24fb7b356fec0de409
Signed-off-by: Yaroslav Furman <yaro330@gmail.com>
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Yaroslav Furman 2019-05-27 19:51:50 +03:00 committed by Richard Raya
parent a4fe90ac84
commit 50c8304c35

View File

@ -877,7 +877,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
/*
* For !fair tasks do:
*
update_cfs_rq_load_avg(now, cfs_rq);
update_cfs_rq_load_avg(now, cfs_rq, false);
attach_entity_load_avg(cfs_rq, se);
switched_from_fair(rq, p);
*
@ -3560,6 +3560,7 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
* update_cfs_rq_load_avg - update the cfs_rq's load/util averages
* @now: current time, as per cfs_rq_clock_task()
* @cfs_rq: cfs_rq to update
* @update_freq: should we call cfs_rq_util_change() or will the call do so
*
* The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
* avg. The immediate corollary is that all (fair) tasks must be attached, see
@ -3573,7 +3574,7 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
* call update_tg_load_avg() when this function returns true.
*/
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
struct sched_avg *sa = &cfs_rq->avg;
int decayed, removed_load = 0, removed_util = 0;
@ -3601,7 +3602,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
if (decayed || removed_util)
if (update_freq && (decayed || removed_util))
cfs_rq_util_change(cfs_rq);
return decayed || removed_load;
@ -3638,7 +3639,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
__update_load_avg_se(now, cpu, cfs_rq, se);
decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
decayed |= propagate_entity_load_avg(se);
if (decayed && (flags & UPDATE_TG))
@ -3919,7 +3920,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
#else /* CONFIG_SMP */
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
{
return 0;
}
@ -9687,7 +9688,7 @@ static void update_blocked_averages(int cpu)
if (throttled_hierarchy(cfs_rq))
continue;
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
update_tg_load_avg(cfs_rq, 0);
/* Propagate pending load changes to the parent, if any: */
@ -9757,7 +9758,7 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;