diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b7f192c1376d..e0e43ee69899 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3111,3 +3111,13 @@ struct sched_avg_stats { int nr_max; }; extern void sched_get_nr_running_avg(struct sched_avg_stats *stats); + +#ifdef CONFIG_SMP +static inline void sched_irq_work_queue(struct irq_work *work) +{ + if (likely(cpu_online(raw_smp_processor_id()))) + irq_work_queue(work); + else + irq_work_queue_on(work, cpumask_any(cpu_online_mask)); +} +#endif diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index c1a4153ef3df..62dcaaa48af9 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -872,7 +872,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) if (!same_freq_domain(new_cpu, task_cpu(p))) { src_rq->notif_pending = true; dest_rq->notif_pending = true; - irq_work_queue(&walt_migration_irq_work); + sched_irq_work_queue(&walt_migration_irq_work); } if (is_ed_enabled()) { @@ -1954,7 +1954,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq) result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start, rq->window_start); if (result == old_window_start) - irq_work_queue(&walt_cpufreq_irq_work); + sched_irq_work_queue(&walt_cpufreq_irq_work); } /* Reflect task activity on its demand and cpu's busy time statistics */