mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
sched: Drop rq->lock from sched_exec()
Since we can now call select_task_rq() and set_task_cpu() with only p->pi_lock held, and sched_exec() load-balancing has always been optimistic, drop all rq->lock usage. Oleg also noted that need_migrate_task() will always be true for current, so don't bother calling that at all. Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/20110405152729.314204889@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ab2515c4b9
commit
8f42ced974
@ -3465,27 +3465,22 @@ void sched_exec(void)
|
||||
{
|
||||
struct task_struct *p = current;
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
int dest_cpu;
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
|
||||
if (dest_cpu == smp_processor_id())
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* select_task_rq() can race against ->cpus_allowed
|
||||
*/
|
||||
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
||||
likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
|
||||
if (likely(cpu_active(dest_cpu))) {
|
||||
struct migration_arg arg = { p, dest_cpu };
|
||||
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
|
||||
return;
|
||||
}
|
||||
unlock:
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user