msm-4.14: Drop sched_migrate_to_cpumask

Change-Id: I8b03f4b7f90c6486d42ef767ba0b52a9567830a2
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2024-04-22 01:34:43 -03:00
parent c98534606c
commit 602aa3bba8
4 changed files with 0 additions and 54 deletions

View File

@ -290,7 +290,6 @@ struct proc_maps_private {
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
unsigned long old_cpus_allowed;
} __randomize_layout;
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);

View File

@ -189,9 +189,6 @@ static void vma_stop(struct proc_maps_private *priv)
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
mmput(mm);
sched_migrate_to_cpumask_end(to_cpumask(&priv->old_cpus_allowed),
cpu_lp_mask);
}
static struct vm_area_struct *
@ -228,9 +225,6 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!mm || !mmget_not_zero(mm))
return NULL;
sched_migrate_to_cpumask_start(to_cpumask(&priv->old_cpus_allowed),
cpu_lp_mask);
down_read(&mm->mmap_sem);
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);

View File

@ -1800,11 +1800,6 @@ static inline bool cpupri_check_rt(void)
}
#endif
void sched_migrate_to_cpumask_start(struct cpumask *old_mask,
const struct cpumask *dest);
void sched_migrate_to_cpumask_end(const struct cpumask *old_mask,
const struct cpumask *dest);
#ifndef cpu_relax_yield
#define cpu_relax_yield() cpu_relax()
#endif

View File

@ -2134,48 +2134,6 @@ out:
return ret;
}
/*
* Calls to sched_migrate_to_cpumask_start() cannot nest. This can only be used
* in process context.
*/
void sched_migrate_to_cpumask_start(struct cpumask *old_mask,
const struct cpumask *dest)
{
struct task_struct *p = current;
raw_spin_lock_irq(&p->pi_lock);
*cpumask_bits(old_mask) = *cpumask_bits(p->cpus_ptr);
raw_spin_unlock_irq(&p->pi_lock);
/*
* This will force the current task onto the destination cpumask. It
* will sleep when a migration to another CPU is actually needed.
*/
set_cpus_allowed_ptr(p, dest);
}
void sched_migrate_to_cpumask_end(const struct cpumask *old_mask,
const struct cpumask *dest)
{
struct task_struct *p = current;
/*
* Check that cpus_allowed didn't change from what it was temporarily
* set to earlier. If so, we can go ahead and lazily restore the old
* cpumask. There's no need to immediately migrate right now.
*/
raw_spin_lock_irq(&p->pi_lock);
if (*cpumask_bits(p->cpus_ptr) == *cpumask_bits(dest)) {
struct rq *rq = this_rq();
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
do_set_cpus_allowed(p, old_mask);
raw_spin_unlock(&rq->lock);
}
raw_spin_unlock_irq(&p->pi_lock);
}
/*
* wait_task_inactive - wait for a thread to unschedule.
*