mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
kernel: sched: Provide a pointer to the valid CPU mask
In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed() wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not much difference in !RT but in RT we used this to implement migrate_disable(). Within a migrate_disable() section the CPU mask is restricted to single CPU while the "normal" CPU mask remains untouched. As an alternative implementation Ingo suggested to use struct task_struct { const cpumask_t *cpus_ptr; cpumask_t cpus_mask; }; with t->cpus_allowed_ptr = &t->cpus_allowed; In -RT we then can switch the cpus_ptr to t->cpus_allowed_ptr = &cpumask_of(task_cpu(p)); in a migration disabled region. The rules are simple: - Code that 'uses' ->cpus_allowed would use the pointer. - Code that 'modifies' ->cpus_allowed would use the direct mask. While converting the existing users I tried to stick with the rules above however… well mostly CPUFREQ tries to temporary switch the CPU mask to do something on a certain CPU and then switches the mask back it its original value. So in theory `cpus_ptr' could or should be used. However if this is invoked in a migration disabled region (which is not the case because it would require something like preempt_disable() and set_cpus_allowed_ptr() might sleep so it can't be) then the "restore" part would restore the wrong mask. So it only looks strange and I go for the pointer… Some drivers copy the cpumask without cpumask_copy() and others use cpumask_copy but without alloc_cpumask_var(). I did not fix those as part of this, could do this as a follow up… So is this the way we want it? Is the usage of `cpus_ptr' vs `cpus_mask' for the set + restore part (see cpufreq users) what we want? At some point it looks like they should use a different interface for their doing. I am not sure why switching to certain CPU is important but maybe it could be done via a workqueue from the CPUFREQ core (so we have a comment desribing why are doing this and a get_online_cpus() to ensure that the CPU does not go offline too early). Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Mike Galbraith <efault@gmx.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> [Sultan Alsawaf: adapt to floral] Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: Zlatan Radovanovic <zlatan.radovanovic@fet.ba> Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
parent
9c92b7716b
commit
14016205f1
@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
|
||||
ti->cpu = cpu;
|
||||
p->stack = ti;
|
||||
p->state = TASK_UNINTERRUPTIBLE;
|
||||
cpumask_set_cpu(cpu, &p->cpus_allowed);
|
||||
cpumask_set_cpu(cpu, &p->cpus_mask);
|
||||
INIT_LIST_HEAD(&p->tasks);
|
||||
p->parent = p->real_parent = p->group_leader = p;
|
||||
INIT_LIST_HEAD(&p->children);
|
||||
|
@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
|
||||
* inline to try to keep the overhead down. If we have been forced to run on
|
||||
* a "CPU" with an FPU because of a previous high level of FP computation,
|
||||
* but did not actually use the FPU during the most recent time-slice (CU1
|
||||
* isn't set), we undo the restriction on cpus_allowed.
|
||||
* isn't set), we undo the restriction on cpus_mask.
|
||||
*
|
||||
* We're not calling set_cpus_allowed() here, because we have no need to
|
||||
* force prompt migration - we're already switching the current CPU to a
|
||||
@ -57,7 +57,7 @@ do { \
|
||||
test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
|
||||
(!(KSTK_STATUS(prev) & ST0_CU1))) { \
|
||||
clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
|
||||
prev->cpus_allowed = prev->thread.user_cpus_allowed; \
|
||||
prev->cpus_mask = prev->thread.user_cpus_allowed; \
|
||||
} \
|
||||
next->thread.emulated_fp = 0; \
|
||||
} while(0)
|
||||
|
@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
||||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
||||
cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
|
||||
cpumask_and(&mask, &allowed, cpu_active_mask);
|
||||
|
||||
out_unlock:
|
||||
|
@ -1193,12 +1193,12 @@ static void mt_ase_fp_affinity(void)
|
||||
* restricted the allowed set to exclude any CPUs with FPUs,
|
||||
* we'll skip the procedure.
|
||||
*/
|
||||
if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
|
||||
if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
|
||||
cpumask_t tmask;
|
||||
|
||||
current->thread.user_cpus_allowed
|
||||
= current->cpus_allowed;
|
||||
cpumask_and(&tmask, ¤t->cpus_allowed,
|
||||
= current->cpus_mask;
|
||||
cpumask_and(&tmask, ¤t->cpus_mask,
|
||||
&mt_fpu_cpumask);
|
||||
set_cpus_allowed_ptr(current, &tmask);
|
||||
set_thread_flag(TIF_FPUBOUND);
|
||||
|
@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
|
||||
* runqueue. The context will be rescheduled on the proper node
|
||||
* if it is timesliced or preempted.
|
||||
*/
|
||||
cpumask_copy(&ctx->cpus_allowed, ¤t->cpus_allowed);
|
||||
cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
|
||||
|
||||
/* Save the current cpu id for spu interrupt routing. */
|
||||
ctx->last_ran = raw_smp_processor_id();
|
||||
|
@ -49,7 +49,7 @@ int hardwall_ipi_valid(int cpu);
|
||||
|
||||
/* Hook hardwall code into changes in affinity. */
|
||||
#define arch_set_cpus_allowed(p, new_mask) do { \
|
||||
if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
|
||||
if (!cpumask_equal(p->cpus_ptr, new_mask)) \
|
||||
hardwall_deactivate_all(p); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -590,12 +590,12 @@ static int hardwall_activate(struct hardwall_info *info)
|
||||
* Get our affinity; if we're not bound to this tile uniquely,
|
||||
* we can't access the network registers.
|
||||
*/
|
||||
if (cpumask_weight(&p->cpus_allowed) != 1)
|
||||
if (p->nr_cpus_allowed != 1)
|
||||
return -EPERM;
|
||||
|
||||
/* Make sure we are bound to a cpu assigned to this resource. */
|
||||
cpu = smp_processor_id();
|
||||
BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
|
||||
BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
|
||||
if (!cpumask_test_cpu(cpu, &info->cpumask))
|
||||
return -EINVAL;
|
||||
|
||||
@ -621,17 +621,17 @@ static int hardwall_activate(struct hardwall_info *info)
|
||||
* Deactivate a task's hardwall. Must hold lock for hardwall_type.
|
||||
* This method may be called from exit_thread(), so we don't want to
|
||||
* rely on too many fields of struct task_struct still being valid.
|
||||
* We assume the cpus_allowed, pid, and comm fields are still valid.
|
||||
* We assume the nr_cpus_allowed, pid, and comm fields are still valid.
|
||||
*/
|
||||
static void _hardwall_deactivate(struct hardwall_type *hwt,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct thread_struct *ts = &task->thread;
|
||||
|
||||
if (cpumask_weight(&task->cpus_allowed) != 1) {
|
||||
if (task->nr_cpus_allowed != 1) {
|
||||
pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
|
||||
task->pid, task->comm, hwt->name,
|
||||
cpumask_weight(&task->cpus_allowed));
|
||||
task->nr_cpus_allowed);
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
@ -593,7 +593,7 @@ int hfi1_get_proc_affinity(int node)
|
||||
struct hfi1_affinity_node *entry;
|
||||
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
|
||||
const struct cpumask *node_mask,
|
||||
*proc_mask = ¤t->cpus_allowed;
|
||||
*proc_mask = current->cpus_ptr;
|
||||
struct hfi1_affinity_node_list *affinity = &node_affinity;
|
||||
struct cpu_mask_set *set = &affinity->proc;
|
||||
|
||||
@ -601,7 +601,7 @@ int hfi1_get_proc_affinity(int node)
|
||||
* check whether process/context affinity has already
|
||||
* been set
|
||||
*/
|
||||
if (cpumask_weight(proc_mask) == 1) {
|
||||
if (current->nr_cpus_allowed == 1) {
|
||||
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
|
||||
current->pid, current->comm,
|
||||
cpumask_pr_args(proc_mask));
|
||||
@ -612,7 +612,7 @@ int hfi1_get_proc_affinity(int node)
|
||||
cpu = cpumask_first(proc_mask);
|
||||
cpumask_set_cpu(cpu, &set->used);
|
||||
goto done;
|
||||
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
|
||||
} else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
|
||||
hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
|
||||
current->pid, current->comm,
|
||||
cpumask_pr_args(proc_mask));
|
||||
|
@ -856,14 +856,13 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
|
||||
{
|
||||
struct sdma_rht_node *rht_node;
|
||||
struct sdma_engine *sde = NULL;
|
||||
const struct cpumask *current_mask = ¤t->cpus_allowed;
|
||||
unsigned long cpu_id;
|
||||
|
||||
/*
|
||||
* To ensure that always the same sdma engine(s) will be
|
||||
* selected make sure the process is pinned to this CPU only.
|
||||
*/
|
||||
if (cpumask_weight(current_mask) != 1)
|
||||
if (current->nr_cpus_allowed != 1)
|
||||
goto out;
|
||||
|
||||
cpu_id = smp_processor_id();
|
||||
|
@ -1167,7 +1167,7 @@ static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
|
||||
static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
|
||||
{
|
||||
struct qib_filedata *fd = fp->private_data;
|
||||
const unsigned int weight = cpumask_weight(¤t->cpus_allowed);
|
||||
const unsigned int weight = current->nr_cpus_allowed;
|
||||
const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
|
||||
int local_cpu;
|
||||
|
||||
@ -1648,9 +1648,8 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
|
||||
ret = find_free_ctxt(i_minor - 1, fp, uinfo);
|
||||
else {
|
||||
int unit;
|
||||
const unsigned int cpu = cpumask_first(¤t->cpus_allowed);
|
||||
const unsigned int weight =
|
||||
cpumask_weight(¤t->cpus_allowed);
|
||||
const unsigned int cpu = cpumask_first(current->cpus_ptr);
|
||||
const unsigned int weight = current->nr_cpus_allowed;
|
||||
|
||||
if (weight == 1 && !test_bit(cpu, qib_cpulist))
|
||||
if (!find_hca(cpu, &unit) && unit >= 0)
|
||||
|
@ -386,9 +386,9 @@ static inline void task_context_switch_counts(struct seq_file *m,
|
||||
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
|
||||
{
|
||||
seq_printf(m, "Cpus_allowed:\t%*pb\n",
|
||||
cpumask_pr_args(&task->cpus_allowed));
|
||||
cpumask_pr_args(task->cpus_ptr));
|
||||
seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
|
||||
cpumask_pr_args(&task->cpus_allowed));
|
||||
cpumask_pr_args(task->cpus_ptr));
|
||||
}
|
||||
|
||||
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
|
||||
|
@ -234,7 +234,8 @@ extern struct cred init_cred;
|
||||
.static_prio = MAX_PRIO-20, \
|
||||
.normal_prio = MAX_PRIO-20, \
|
||||
.policy = SCHED_NORMAL, \
|
||||
.cpus_allowed = CPU_MASK_ALL, \
|
||||
.cpus_ptr = &tsk.cpus_mask, \
|
||||
.cpus_mask = CPU_MASK_ALL, \
|
||||
.nr_cpus_allowed= NR_CPUS, \
|
||||
.cpus_requested = CPU_MASK_ALL, \
|
||||
.mm = NULL, \
|
||||
|
@ -808,7 +808,8 @@ struct task_struct {
|
||||
|
||||
unsigned int policy;
|
||||
int nr_cpus_allowed;
|
||||
cpumask_t cpus_allowed;
|
||||
const cpumask_t *cpus_ptr;
|
||||
cpumask_t cpus_mask;
|
||||
cpumask_t cpus_requested;
|
||||
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
|
||||
int migrate_disable;
|
||||
@ -1602,7 +1603,7 @@ extern struct pid *cad_pid;
|
||||
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
|
||||
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
|
||||
#define PF_PERF_CRITICAL 0x02000000 /* Thread is performance-critical */
|
||||
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
|
||||
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
|
||||
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
|
||||
#define PF_WAKE_UP_IDLE 0x10000000 /* TTWU on an idle CPU */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
|
@ -2126,7 +2126,7 @@ static void cpuset_fork(struct task_struct *task)
|
||||
if (task_css_is_root(task, cpuset_cgrp_id))
|
||||
return;
|
||||
|
||||
set_cpus_allowed_ptr(task, ¤t->cpus_allowed);
|
||||
set_cpus_allowed_ptr(task, current->cpus_ptr);
|
||||
task->mems_allowed = current->mems_allowed;
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
tsk->stack_canary = get_random_canary();
|
||||
#endif
|
||||
|
||||
if (orig->cpus_ptr == &orig->cpus_mask)
|
||||
tsk->cpus_ptr = &tsk->cpus_mask;
|
||||
/*
|
||||
* One for us, one for whoever does the "release_task()" (usually
|
||||
* parent)
|
||||
|
@ -782,7 +782,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
p->sched_class->enqueue_task(rq, p, flags);
|
||||
walt_update_last_enqueue(p);
|
||||
trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
|
||||
trace_sched_enq_deq_task(p, 1, cpumask_bits(p->cpus_ptr)[0]);
|
||||
}
|
||||
|
||||
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@ -800,7 +800,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (p == rq->ed_task)
|
||||
early_detection_notify(rq, sched_ktime_clock());
|
||||
#endif
|
||||
trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
|
||||
trace_sched_enq_deq_task(p, 0, cpumask_bits(p->cpus_ptr)[0]);
|
||||
}
|
||||
|
||||
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@ -939,7 +939,7 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
|
||||
*/
|
||||
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
return false;
|
||||
|
||||
if (is_per_cpu_kthread(p))
|
||||
@ -1040,7 +1040,7 @@ static int migration_cpu_stop(void *data)
|
||||
local_irq_disable();
|
||||
/*
|
||||
* We need to explicitly wake pending tasks before running
|
||||
* __migrate_task() such that we will not miss enforcing cpus_allowed
|
||||
* __migrate_task() such that we will not miss enforcing cpus_ptr
|
||||
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
|
||||
*/
|
||||
sched_ttwu_pending();
|
||||
@ -1071,7 +1071,7 @@ static int migration_cpu_stop(void *data)
|
||||
*/
|
||||
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
|
||||
{
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
cpumask_copy(&p->cpus_mask, new_mask);
|
||||
p->nr_cpus_allowed = cpumask_weight(new_mask);
|
||||
}
|
||||
|
||||
@ -1169,7 +1169,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cpumask_equal(&p->cpus_allowed, new_mask))
|
||||
if (cpumask_equal(p->cpus_ptr, new_mask))
|
||||
goto out;
|
||||
|
||||
cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
|
||||
@ -1346,10 +1346,10 @@ static int migrate_swap_stop(void *data)
|
||||
if (task_cpu(arg->src_task) != arg->src_cpu)
|
||||
goto unlock;
|
||||
|
||||
if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
|
||||
if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
|
||||
goto unlock;
|
||||
|
||||
if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
|
||||
if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
|
||||
goto unlock;
|
||||
|
||||
__migrate_swap_task(arg->src_task, arg->dst_cpu);
|
||||
@ -1390,10 +1390,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
|
||||
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
|
||||
goto out;
|
||||
|
||||
if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
|
||||
if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
|
||||
goto out;
|
||||
|
||||
if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
|
||||
if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
|
||||
goto out;
|
||||
|
||||
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
|
||||
@ -1413,7 +1413,7 @@ void sched_migrate_to_cpumask_start(struct cpumask *old_mask,
|
||||
struct task_struct *p = current;
|
||||
|
||||
raw_spin_lock_irq(&p->pi_lock);
|
||||
*cpumask_bits(old_mask) = *cpumask_bits(&p->cpus_allowed);
|
||||
*cpumask_bits(old_mask) = *cpumask_bits(p->cpus_ptr);
|
||||
raw_spin_unlock_irq(&p->pi_lock);
|
||||
|
||||
/*
|
||||
@ -1434,7 +1434,7 @@ void sched_migrate_to_cpumask_end(const struct cpumask *old_mask,
|
||||
* cpumask. There's no need to immediately migrate right now.
|
||||
*/
|
||||
raw_spin_lock_irq(&p->pi_lock);
|
||||
if (*cpumask_bits(&p->cpus_allowed) == *cpumask_bits(dest)) {
|
||||
if (*cpumask_bits(p->cpus_ptr) == *cpumask_bits(dest)) {
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
@ -1579,7 +1579,7 @@ void kick_process(struct task_struct *p)
|
||||
EXPORT_SYMBOL_GPL(kick_process);
|
||||
|
||||
/*
|
||||
* ->cpus_allowed is protected by both rq->lock and p->pi_lock
|
||||
* ->cpus_ptr is protected by both rq->lock and p->pi_lock
|
||||
*
|
||||
* A few notes on cpu_active vs cpu_online:
|
||||
*
|
||||
@ -1622,14 +1622,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
|
||||
continue;
|
||||
if (cpu_isolated(dest_cpu))
|
||||
continue;
|
||||
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
|
||||
if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
|
||||
return dest_cpu;
|
||||
}
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
/* Any allowed, online CPU? */
|
||||
for_each_cpu(dest_cpu, &p->cpus_allowed) {
|
||||
for_each_cpu(dest_cpu, p->cpus_ptr) {
|
||||
if (!is_cpu_allowed(p, dest_cpu))
|
||||
continue;
|
||||
if (cpu_isolated(dest_cpu)) {
|
||||
@ -1687,7 +1687,7 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
|
||||
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
|
||||
*/
|
||||
static inline
|
||||
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
|
||||
@ -1701,11 +1701,11 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
|
||||
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags,
|
||||
sibling_count_hint);
|
||||
else
|
||||
cpu = cpumask_any(&p->cpus_allowed);
|
||||
cpu = cpumask_any(p->cpus_ptr);
|
||||
|
||||
/*
|
||||
* In order not to call set_task_cpu() on a blocking task we need
|
||||
* to rely on ttwu() to place the task on a valid ->cpus_allowed
|
||||
* to rely on ttwu() to place the task on a valid ->cpus_ptr
|
||||
* CPU.
|
||||
*
|
||||
* Since this is common to all placement strategies, this lives here.
|
||||
@ -2784,7 +2784,7 @@ void wake_up_new_task(struct task_struct *p)
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Fork balancing, do it here and not earlier because:
|
||||
* - cpus_allowed can change in the fork path
|
||||
* - cpus_ptr can change in the fork path
|
||||
* - any previously selected CPU might disappear through hotplug
|
||||
*
|
||||
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
|
||||
@ -4644,7 +4644,7 @@ change:
|
||||
* the entire root_domain to become SCHED_DEADLINE. We
|
||||
* will also fail if there's no bandwidth available.
|
||||
*/
|
||||
if (!cpumask_subset(span, &p->cpus_allowed) ||
|
||||
if (!cpumask_subset(span, p->cpus_ptr) ||
|
||||
rq->rd->dl_bw.bw == 0) {
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
return -EPERM;
|
||||
@ -5376,7 +5376,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
|
||||
goto out_unlock;
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
|
||||
cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
|
||||
|
||||
/* The userspace tasks are forbidden to run on
|
||||
* isolated CPUs. So exclude isolated CPUs from
|
||||
@ -5947,7 +5947,7 @@ int task_can_attach(struct task_struct *p,
|
||||
* allowed nodes is unnecessary. Thus, cpusets are not
|
||||
* applicable for such threads. This prevents checking for
|
||||
* success of set_cpus_allowed_ptr() on all attached tasks
|
||||
* before cpus_allowed may be changed.
|
||||
* before cpus_mask may be changed.
|
||||
*/
|
||||
if (p->flags & PF_NO_SETAFFINITY) {
|
||||
ret = -EINVAL;
|
||||
@ -5974,7 +5974,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
|
||||
if (curr_cpu == target_cpu)
|
||||
return 0;
|
||||
|
||||
if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
|
||||
return -EINVAL;
|
||||
|
||||
/* TODO: This is not properly updating schedstats */
|
||||
@ -6151,14 +6151,14 @@ static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
|
||||
put_prev_task(rq, next);
|
||||
|
||||
if (!migrate_pinned_tasks && next->flags & PF_KTHREAD &&
|
||||
!cpumask_intersects(&avail_cpus, &next->cpus_allowed)) {
|
||||
!cpumask_intersects(&avail_cpus, next->cpus_ptr)) {
|
||||
detach_one_task(next, rq, &tasks);
|
||||
num_pinned_kthreads += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Rules for changing task_struct::cpus_allowed are holding
|
||||
* Rules for changing task_struct::cpus_mask are holding
|
||||
* both pi_lock and rq->lock, such that holding either
|
||||
* stabilizes the mask.
|
||||
*
|
||||
|
@ -127,13 +127,13 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
const struct sched_dl_entity *dl_se = &p->dl;
|
||||
|
||||
if (later_mask &&
|
||||
cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
|
||||
cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
|
||||
return 1;
|
||||
} else {
|
||||
int best_cpu = cpudl_maximum(cp);
|
||||
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
|
||||
|
||||
if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
|
||||
if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
|
||||
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
|
||||
if (later_mask)
|
||||
cpumask_set_cpu(best_cpu, later_mask);
|
||||
|
@ -128,11 +128,11 @@ retry:
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
|
||||
if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
|
||||
continue;
|
||||
|
||||
if (lowest_mask) {
|
||||
cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
|
||||
cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
|
||||
cpumask_andnot(lowest_mask, lowest_mask,
|
||||
cpu_isolated_mask);
|
||||
if (drop_nopreempts)
|
||||
|
@ -507,7 +507,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
|
||||
* If we cannot preempt any rq, fall back to pick any
|
||||
* online cpu.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
|
||||
cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
/*
|
||||
* Fail to find any suitable cpu.
|
||||
@ -1811,7 +1811,7 @@ static void set_curr_task_dl(struct rq *rq)
|
||||
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -1961,7 +1961,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
||||
/* Retry if something changed. */
|
||||
if (double_lock_balance(rq, later_rq)) {
|
||||
if (unlikely(task_rq(task) != rq ||
|
||||
!cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
|
||||
!cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
|
||||
task_running(rq, task) ||
|
||||
!dl_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
|
@ -1671,7 +1671,7 @@ static void task_numa_compare(struct task_numa_env *env,
|
||||
*/
|
||||
if (cur) {
|
||||
/* Skip this swap candidate if cannot move to the source cpu */
|
||||
if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
|
||||
if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
@ -1781,7 +1781,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
|
||||
|
||||
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
|
||||
/* Skip this CPU if the source task cannot migrate */
|
||||
if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
|
||||
continue;
|
||||
|
||||
env->dst_cpu = cpu;
|
||||
@ -5835,14 +5835,14 @@ cpu_is_in_target_set(struct task_struct *p, int cpu)
|
||||
first_cpu = rd->min_cap_orig_cpu;
|
||||
}
|
||||
|
||||
next_usable_cpu = cpumask_next(first_cpu - 1, &p->cpus_allowed);
|
||||
next_usable_cpu = cpumask_next(first_cpu - 1, p->cpus_ptr);
|
||||
return cpu >= next_usable_cpu || next_usable_cpu >= nr_cpu_ids;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bias_to_this_cpu(struct task_struct *p, int cpu, struct cpumask *rtg_target)
|
||||
{
|
||||
bool base_test = cpumask_test_cpu(cpu, &p->cpus_allowed) &&
|
||||
bool base_test = cpumask_test_cpu(cpu, p->cpus_ptr) &&
|
||||
cpu_active(cpu) && task_fits_max(p, cpu) &&
|
||||
!__cpu_overutilized(cpu, task_util(p)) &&
|
||||
cpu_is_in_target_set(p, cpu);
|
||||
@ -6850,8 +6850,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||
int i;
|
||||
|
||||
/* Skip over this group if it has no CPUs allowed */
|
||||
if (!cpumask_intersects(sched_group_span(group),
|
||||
&p->cpus_allowed))
|
||||
if (!cpumask_intersects(sched_group_span(group), p->cpus_ptr))
|
||||
continue;
|
||||
|
||||
local_group = cpumask_test_cpu(this_cpu,
|
||||
@ -6971,7 +6970,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
|
||||
return cpumask_first(sched_group_span(group));
|
||||
|
||||
/* Traverse only the allowed CPUs */
|
||||
for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
|
||||
for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
|
||||
if (idle_cpu(i)) {
|
||||
struct rq *rq = cpu_rq(i);
|
||||
struct cpuidle_state *idle = idle_get_state(rq);
|
||||
@ -7011,7 +7010,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
|
||||
{
|
||||
int new_cpu = cpu;
|
||||
|
||||
if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
|
||||
if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
|
||||
return prev_cpu;
|
||||
|
||||
while (sd) {
|
||||
@ -7122,7 +7121,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
||||
if (!test_idle_cores(target, false))
|
||||
return -1;
|
||||
|
||||
cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
||||
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
|
||||
|
||||
for_each_cpu_wrap(core, cpus, target) {
|
||||
bool idle = true;
|
||||
@ -7156,7 +7155,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
|
||||
return -1;
|
||||
|
||||
for_each_cpu(cpu, cpu_smt_mask(target)) {
|
||||
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
continue;
|
||||
if (idle_cpu(cpu))
|
||||
return cpu;
|
||||
@ -7217,7 +7216,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
|
||||
|
||||
time = local_clock();
|
||||
|
||||
cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
|
||||
cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
|
||||
|
||||
for_each_cpu_wrap(cpu, cpus, target) {
|
||||
if (!--nr)
|
||||
@ -7290,10 +7289,10 @@ static inline int select_idle_sibling_cstate_aware(struct task_struct *p, int pr
|
||||
sg = sd->groups;
|
||||
do {
|
||||
if (!cpumask_intersects(
|
||||
sched_group_span(sg), &p->cpus_allowed))
|
||||
sched_group_span(sg), p->cpus_ptr))
|
||||
goto next;
|
||||
|
||||
for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) {
|
||||
for_each_cpu_and(i, p->cpus_ptr, sched_group_span(sg)) {
|
||||
int idle_idx;
|
||||
unsigned long new_usage;
|
||||
unsigned long capacity_orig;
|
||||
@ -7541,7 +7540,7 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
|
||||
/* Scan CPUs in all SDs */
|
||||
sg = sd->groups;
|
||||
do {
|
||||
for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) {
|
||||
for_each_cpu_and(i, p->cpus_ptr, sched_group_span(sg)) {
|
||||
unsigned long capacity_curr = capacity_curr_of(i);
|
||||
unsigned long capacity_orig = capacity_orig_of(i);
|
||||
unsigned long wake_util, new_util, new_util_cuml;
|
||||
@ -8115,7 +8114,7 @@ static inline struct energy_env *get_eenv(struct task_struct *p, int prev_cpu)
|
||||
eenv->util_delta = task_util_est(p);
|
||||
eenv->util_delta_boosted = boosted_task_util(p);
|
||||
|
||||
cpumask_and(&cpumask_possible_cpus, &p->cpus_allowed, cpu_online_mask);
|
||||
cpumask_and(&cpumask_possible_cpus, p->cpus_ptr, cpu_online_mask);
|
||||
eenv->max_cpu_count = cpumask_weight(&cpumask_possible_cpus);
|
||||
|
||||
for (i=0; i < eenv->max_cpu_count; i++)
|
||||
@ -8239,7 +8238,7 @@ static int find_energy_efficient_cpu(struct sched_domain *sd,
|
||||
if (!sd)
|
||||
sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
|
||||
|
||||
for_each_cpu_and(cpu_iter, &p->cpus_allowed, sched_domain_span(sd)) {
|
||||
for_each_cpu_and(cpu_iter, p->cpus_ptr, sched_domain_span(sd)) {
|
||||
unsigned long spare;
|
||||
|
||||
/* prev_cpu already in list */
|
||||
@ -8423,7 +8422,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||
|
||||
if (sd_flag & SD_BALANCE_WAKE) {
|
||||
int _wake_cap = wake_cap(p, cpu, prev_cpu);
|
||||
int _cpus_allowed = cpumask_test_cpu(cpu, &p->cpus_allowed);
|
||||
int _cpus_allowed = cpumask_test_cpu(cpu, p->cpus_ptr);
|
||||
|
||||
if (sysctl_sched_sync_hint_enable && sync &&
|
||||
_cpus_allowed && !_wake_cap &&
|
||||
@ -9221,14 +9220,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
/*
|
||||
* We do not migrate tasks that are:
|
||||
* 1) throttled_lb_pair, or
|
||||
* 2) cannot be migrated to this CPU due to cpus_allowed, or
|
||||
* 2) cannot be migrated to this CPU due to cpus_ptr, or
|
||||
* 3) running (obviously), or
|
||||
* 4) are cache-hot on their current CPU.
|
||||
*/
|
||||
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
||||
return 0;
|
||||
|
||||
if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
|
||||
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
|
||||
int cpu;
|
||||
|
||||
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
|
||||
@ -9248,7 +9247,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
|
||||
/* Prevent to re-select dst_cpu via env's cpus */
|
||||
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
|
||||
if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
|
||||
if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
|
||||
env->flags |= LBF_DST_PINNED;
|
||||
env->new_dst_cpu = cpu;
|
||||
break;
|
||||
@ -9477,7 +9476,7 @@ redo:
|
||||
next:
|
||||
trace_sched_load_balance_skip_tasks(env->src_cpu, env->dst_cpu,
|
||||
env->src_grp_type, p->pid, load, task_util(p),
|
||||
cpumask_bits(&p->cpus_allowed)[0]);
|
||||
cpumask_bits(p->cpus_ptr)[0]);
|
||||
list_move_tail(&p->se.group_node, tasks);
|
||||
}
|
||||
|
||||
@ -9908,7 +9907,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
|
||||
|
||||
/*
|
||||
* Group imbalance indicates (and tries to solve) the problem where balancing
|
||||
* groups is inadequate due to ->cpus_allowed constraints.
|
||||
* groups is inadequate due to ->cpus_ptr constraints.
|
||||
*
|
||||
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
|
||||
* cpumask covering 1 cpu of the first group and 3 cpus of the second group.
|
||||
@ -10756,7 +10755,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
||||
/*
|
||||
* If the busiest group is imbalanced the below checks don't
|
||||
* work because they assume all things are equal, which typically
|
||||
* isn't true due to cpus_allowed constraints and the like.
|
||||
* isn't true due to cpus_ptr constraints and the like.
|
||||
*/
|
||||
if (busiest->group_type == group_imbalanced)
|
||||
goto force_balance;
|
||||
@ -11254,7 +11253,7 @@ no_move:
|
||||
* if the curr task on busiest cpu can't be
|
||||
* moved to this_cpu
|
||||
*/
|
||||
if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
|
||||
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
|
||||
raw_spin_unlock_irqrestore(&busiest->lock,
|
||||
flags);
|
||||
env.flags |= LBF_ALL_PINNED;
|
||||
|
@ -1743,7 +1743,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
cpumask_test_cpu(cpu, &p->cpus_allowed))
|
||||
cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -1987,7 +1987,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
* Also make sure that it wasn't scheduled on its rq.
|
||||
*/
|
||||
if (unlikely(task_rq(task) != rq ||
|
||||
!cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
|
||||
!cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
|
||||
task_running(rq, task) ||
|
||||
!rt_task(task) ||
|
||||
!task_on_rq_queued(task))) {
|
||||
|
@ -282,7 +282,7 @@ static void move_to_next_cpu(void)
|
||||
* of this thread, than stop migrating for the duration
|
||||
* of the current test.
|
||||
*/
|
||||
if (!cpumask_equal(current_mask, ¤t->cpus_allowed))
|
||||
if (!cpumask_equal(current_mask, current->cpus_ptr))
|
||||
goto disable;
|
||||
|
||||
get_online_cpus();
|
||||
|
@ -23,7 +23,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
|
||||
* Kernel threads bound to a single CPU can safely use
|
||||
* smp_processor_id():
|
||||
*/
|
||||
if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu)))
|
||||
if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
|
||||
|
||||
/* Silly tracepoints */
|
||||
trace_foo_bar("hello", cnt, array, random_strings[len],
|
||||
¤t->cpus_allowed);
|
||||
current->cpus_ptr);
|
||||
|
||||
trace_foo_with_template_simple("HELLO", cnt);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user