sched/walt: Avoid walt irq work in offlined CPU

Avoid walt irq work in offlined CPU.

[clingutla@codeaurora.org: Resolved trivial merge conflicts]

Change-Id: Ia4410562f66bfa57daa15d8c0a785a2c7a95f2a0
Signed-off-by: Maria Yu <aiquny@codeaurora.org>
Signed-off-by: Lingutla Chandrasekhar <clingutla@codeaurora.org>
Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
Signed-off-by: Yaroslav Furman <yaro330@gmail.com>
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Maria Yu 2019-08-13 17:12:33 +08:00 committed by Richard Raya
parent 9b0e8fcc6d
commit 16da99cd69
2 changed files with 12 additions and 2 deletions

View File

@ -3111,3 +3111,13 @@ struct sched_avg_stats {
int nr_max;
};
extern void sched_get_nr_running_avg(struct sched_avg_stats *stats);
#ifdef CONFIG_SMP
static inline void sched_irq_work_queue(struct irq_work *work)
{
if (likely(cpu_online(raw_smp_processor_id())))
irq_work_queue(work);
else
irq_work_queue_on(work, cpumask_any(cpu_online_mask));
}
#endif

View File

@ -872,7 +872,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
if (!same_freq_domain(new_cpu, task_cpu(p))) {
src_rq->notif_pending = true;
dest_rq->notif_pending = true;
irq_work_queue(&walt_migration_irq_work);
sched_irq_work_queue(&walt_migration_irq_work);
}
if (is_ed_enabled()) {
@ -1954,7 +1954,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
rq->window_start);
if (result == old_window_start)
irq_work_queue(&walt_cpufreq_irq_work);
sched_irq_work_queue(&walt_cpufreq_irq_work);
}
/* Reflect task activity on its demand and cpu's busy time statistics */