Revert "msm-4.14: Drop sched_migrate_to_cpumask"

This reverts commit 602aa3bba862bb7ff51bdf2c9303db4b057f5353.

Change-Id: I4517bdb857e7e1ab02749596dedcaa8220dc040a
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2025-01-31 19:31:48 -03:00
parent e72721b923
commit 0636ec7ac0
4 changed files with 54 additions and 0 deletions

View File

@ -290,6 +290,7 @@ struct proc_maps_private {
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
unsigned long old_cpus_allowed;
} __randomize_layout;
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);

View File

@ -189,6 +189,9 @@ static void vma_stop(struct proc_maps_private *priv)
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
mmput(mm);
sched_migrate_to_cpumask_end(to_cpumask(&priv->old_cpus_allowed),
cpu_lp_mask);
}
static struct vm_area_struct *
@ -225,6 +228,9 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!mm || !mmget_not_zero(mm))
return NULL;
sched_migrate_to_cpumask_start(to_cpumask(&priv->old_cpus_allowed),
cpu_lp_mask);
down_read(&mm->mmap_sem);
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);

View File

@ -1800,6 +1800,11 @@ static inline bool cpupri_check_rt(void)
}
#endif
void sched_migrate_to_cpumask_start(struct cpumask *old_mask,
const struct cpumask *dest);
void sched_migrate_to_cpumask_end(const struct cpumask *old_mask,
const struct cpumask *dest);
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

View File

@ -2134,6 +2134,48 @@ out:
return ret;
}
/*
* Calls to sched_migrate_to_cpumask_start() cannot nest. This can only be used
* in process context.
*/
void sched_migrate_to_cpumask_start(struct cpumask *old_mask,
const struct cpumask *dest)
{
struct task_struct *p = current;
raw_spin_lock_irq(&p->pi_lock);
*cpumask_bits(old_mask) = *cpumask_bits(p->cpus_ptr);
raw_spin_unlock_irq(&p->pi_lock);
/*
* This will force the current task onto the destination cpumask. It
* will sleep when a migration to another CPU is actually needed.
*/
set_cpus_allowed_ptr(p, dest);
}
void sched_migrate_to_cpumask_end(const struct cpumask *old_mask,
const struct cpumask *dest)
{
struct task_struct *p = current;
/*
* Check that cpus_allowed didn't change from what it was temporarily
* set to earlier. If so, we can go ahead and lazily restore the old
* cpumask. There's no need to immediately migrate right now.
*/
raw_spin_lock_irq(&p->pi_lock);
if (*cpumask_bits(p->cpus_ptr) == *cpumask_bits(dest)) {
struct rq *rq = this_rq();
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
do_set_cpus_allowed(p, old_mask);
raw_spin_unlock(&rq->lock);
}
raw_spin_unlock_irq(&p->pi_lock);
}
/*
* wait_task_inactive - wait for a thread to unschedule.
*