mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_
Rusty Russel advises that cpus_* functions are deprecated to work on cpumasks and cpumask_* functions should be called instead, otherwise problems with CPUMASK_OFFSTACK arise. Signed-off-by: Oleg Drokin <green@linuxhacker.ru> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
dc0d838a05
commit
b25e74b56a
@ -204,7 +204,7 @@ cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
|
||||
}
|
||||
|
||||
tmp += rc;
|
||||
for_each_cpu_mask(j, *cptab->ctb_parts[i].cpt_cpumask) {
|
||||
for_each_cpu(j, cptab->ctb_parts[i].cpt_cpumask) {
|
||||
rc = snprintf(tmp, len, "%d ", j);
|
||||
len -= rc;
|
||||
if (len <= 0) {
|
||||
@ -240,8 +240,8 @@ cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt)
|
||||
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
|
||||
|
||||
return cpt == CFS_CPT_ANY ?
|
||||
cpus_weight(*cptab->ctb_cpumask) :
|
||||
cpus_weight(*cptab->ctb_parts[cpt].cpt_cpumask);
|
||||
cpumask_weight(cptab->ctb_cpumask) :
|
||||
cpumask_weight(cptab->ctb_parts[cpt].cpt_cpumask);
|
||||
}
|
||||
EXPORT_SYMBOL(cfs_cpt_weight);
|
||||
|
||||
@ -251,8 +251,10 @@ cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt)
|
||||
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
|
||||
|
||||
return cpt == CFS_CPT_ANY ?
|
||||
any_online_cpu(*cptab->ctb_cpumask) != NR_CPUS :
|
||||
any_online_cpu(*cptab->ctb_parts[cpt].cpt_cpumask) != NR_CPUS;
|
||||
cpumask_any_and(cptab->ctb_cpumask,
|
||||
cpu_online_mask) < nr_cpu_ids :
|
||||
cpumask_any_and(cptab->ctb_parts[cpt].cpt_cpumask,
|
||||
cpu_online_mask) < nr_cpu_ids;
|
||||
}
|
||||
EXPORT_SYMBOL(cfs_cpt_online);
|
||||
|
||||
@ -283,7 +285,7 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
|
||||
|
||||
LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
|
||||
|
||||
if (cpu < 0 || cpu >= NR_CPUS || !cpu_online(cpu)) {
|
||||
if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_online(cpu)) {
|
||||
CDEBUG(D_INFO, "CPU %d is invalid or it's offline\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
@ -296,11 +298,11 @@ cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
|
||||
|
||||
cptab->ctb_cpu2cpt[cpu] = cpt;
|
||||
|
||||
LASSERT(!cpu_isset(cpu, *cptab->ctb_cpumask));
|
||||
LASSERT(!cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
|
||||
LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_cpumask));
|
||||
LASSERT(!cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
|
||||
|
||||
cpu_set(cpu, *cptab->ctb_cpumask);
|
||||
cpu_set(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
|
||||
cpumask_set_cpu(cpu, cptab->ctb_cpumask);
|
||||
cpumask_set_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
@ -324,7 +326,7 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
|
||||
|
||||
LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
|
||||
|
||||
if (cpu < 0 || cpu >= NR_CPUS) {
|
||||
if (cpu < 0 || cpu >= nr_cpu_ids) {
|
||||
CDEBUG(D_INFO, "Invalid CPU id %d\n", cpu);
|
||||
return;
|
||||
}
|
||||
@ -344,11 +346,11 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
LASSERT(cpu_isset(cpu, *cptab->ctb_parts[cpt].cpt_cpumask));
|
||||
LASSERT(cpu_isset(cpu, *cptab->ctb_cpumask));
|
||||
LASSERT(cpumask_test_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask));
|
||||
LASSERT(cpumask_test_cpu(cpu, cptab->ctb_cpumask));
|
||||
|
||||
cpu_clear(cpu, *cptab->ctb_parts[cpt].cpt_cpumask);
|
||||
cpu_clear(cpu, *cptab->ctb_cpumask);
|
||||
cpumask_clear_cpu(cpu, cptab->ctb_parts[cpt].cpt_cpumask);
|
||||
cpumask_clear_cpu(cpu, cptab->ctb_cpumask);
|
||||
cptab->ctb_cpu2cpt[cpu] = -1;
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
@ -356,22 +358,22 @@ cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
|
||||
LASSERT(node_isset(node, *cptab->ctb_parts[cpt].cpt_nodemask));
|
||||
LASSERT(node_isset(node, *cptab->ctb_nodemask));
|
||||
|
||||
for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask) {
|
||||
for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask) {
|
||||
/* this CPT has other CPU belonging to this node? */
|
||||
if (cpu_to_node(i) == node)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == NR_CPUS)
|
||||
if (i >= nr_cpu_ids)
|
||||
node_clear(node, *cptab->ctb_parts[cpt].cpt_nodemask);
|
||||
|
||||
for_each_cpu_mask(i, *cptab->ctb_cpumask) {
|
||||
for_each_cpu(i, cptab->ctb_cpumask) {
|
||||
/* this CPT-table has other CPU belonging to this node? */
|
||||
if (cpu_to_node(i) == node)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == NR_CPUS)
|
||||
if (i >= nr_cpu_ids)
|
||||
node_clear(node, *cptab->ctb_nodemask);
|
||||
|
||||
return;
|
||||
@ -383,13 +385,14 @@ cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (cpus_weight(*mask) == 0 || any_online_cpu(*mask) == NR_CPUS) {
|
||||
if (cpumask_weight(mask) == 0 ||
|
||||
cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
|
||||
CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
|
||||
cpt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for_each_cpu_mask(i, *mask) {
|
||||
for_each_cpu(i, mask) {
|
||||
if (!cfs_cpt_set_cpu(cptab, cpt, i))
|
||||
return 0;
|
||||
}
|
||||
@ -403,7 +406,7 @@ cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_cpu_mask(i, *mask)
|
||||
for_each_cpu(i, mask)
|
||||
cfs_cpt_unset_cpu(cptab, cpt, i);
|
||||
}
|
||||
EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
|
||||
@ -493,7 +496,7 @@ cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
|
||||
}
|
||||
|
||||
for (; cpt <= last; cpt++) {
|
||||
for_each_cpu_mask(i, *cptab->ctb_parts[cpt].cpt_cpumask)
|
||||
for_each_cpu(i, cptab->ctb_parts[cpt].cpt_cpumask)
|
||||
cfs_cpt_unset_cpu(cptab, cpt, i);
|
||||
}
|
||||
}
|
||||
@ -554,7 +557,7 @@ EXPORT_SYMBOL(cfs_cpt_current);
|
||||
int
|
||||
cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu)
|
||||
{
|
||||
LASSERT(cpu >= 0 && cpu < NR_CPUS);
|
||||
LASSERT(cpu >= 0 && cpu < nr_cpu_ids);
|
||||
|
||||
return cptab->ctb_cpu2cpt[cpu];
|
||||
}
|
||||
@ -578,14 +581,14 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
|
||||
nodemask = cptab->ctb_parts[cpt].cpt_nodemask;
|
||||
}
|
||||
|
||||
if (any_online_cpu(*cpumask) == NR_CPUS) {
|
||||
if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) {
|
||||
CERROR("No online CPU found in CPU partition %d, did someone do CPU hotplug on system? You might need to reload Lustre modules to keep system working well.\n",
|
||||
cpt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (cpu_isset(i, *cpumask))
|
||||
if (cpumask_test_cpu(i, cpumask))
|
||||
continue;
|
||||
|
||||
rc = set_cpus_allowed_ptr(current, cpumask);
|
||||
@ -616,14 +619,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
|
||||
|
||||
LASSERT(number > 0);
|
||||
|
||||
if (number >= cpus_weight(*node)) {
|
||||
while (!cpus_empty(*node)) {
|
||||
cpu = first_cpu(*node);
|
||||
if (number >= cpumask_weight(node)) {
|
||||
while (!cpumask_empty(node)) {
|
||||
cpu = cpumask_first(node);
|
||||
|
||||
rc = cfs_cpt_set_cpu(cptab, cpt, cpu);
|
||||
if (!rc)
|
||||
return -EINVAL;
|
||||
cpu_clear(cpu, *node);
|
||||
cpumask_clear_cpu(cpu, node);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -636,27 +639,27 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (!cpus_empty(*node)) {
|
||||
cpu = first_cpu(*node);
|
||||
while (!cpumask_empty(node)) {
|
||||
cpu = cpumask_first(node);
|
||||
|
||||
/* get cpumask for cores in the same socket */
|
||||
cfs_cpu_core_siblings(cpu, socket);
|
||||
cpus_and(*socket, *socket, *node);
|
||||
cpumask_and(socket, socket, node);
|
||||
|
||||
LASSERT(!cpus_empty(*socket));
|
||||
LASSERT(!cpumask_empty(socket));
|
||||
|
||||
while (!cpus_empty(*socket)) {
|
||||
while (!cpumask_empty(socket)) {
|
||||
int i;
|
||||
|
||||
/* get cpumask for hts in the same core */
|
||||
cfs_cpu_ht_siblings(cpu, core);
|
||||
cpus_and(*core, *core, *node);
|
||||
cpumask_and(core, core, node);
|
||||
|
||||
LASSERT(!cpus_empty(*core));
|
||||
LASSERT(!cpumask_empty(core));
|
||||
|
||||
for_each_cpu_mask(i, *core) {
|
||||
cpu_clear(i, *socket);
|
||||
cpu_clear(i, *node);
|
||||
for_each_cpu(i, core) {
|
||||
cpumask_clear_cpu(i, socket);
|
||||
cpumask_clear_cpu(i, node);
|
||||
|
||||
rc = cfs_cpt_set_cpu(cptab, cpt, i);
|
||||
if (!rc) {
|
||||
@ -667,7 +670,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
|
||||
if (--number == 0)
|
||||
goto out;
|
||||
}
|
||||
cpu = first_cpu(*socket);
|
||||
cpu = cpumask_first(socket);
|
||||
}
|
||||
}
|
||||
|
||||
@ -767,7 +770,7 @@ cfs_cpt_table_create(int ncpt)
|
||||
for_each_online_node(i) {
|
||||
cfs_node_to_cpumask(i, mask);
|
||||
|
||||
while (!cpus_empty(*mask)) {
|
||||
while (!cpumask_empty(mask)) {
|
||||
struct cfs_cpu_partition *part;
|
||||
int n;
|
||||
|
||||
@ -776,24 +779,24 @@ cfs_cpt_table_create(int ncpt)
|
||||
|
||||
part = &cptab->ctb_parts[cpt];
|
||||
|
||||
n = num - cpus_weight(*part->cpt_cpumask);
|
||||
n = num - cpumask_weight(part->cpt_cpumask);
|
||||
LASSERT(n > 0);
|
||||
|
||||
rc = cfs_cpt_choose_ncpus(cptab, cpt, mask, n);
|
||||
if (rc < 0)
|
||||
goto failed;
|
||||
|
||||
LASSERT(num >= cpus_weight(*part->cpt_cpumask));
|
||||
if (num == cpus_weight(*part->cpt_cpumask))
|
||||
LASSERT(num >= cpumask_weight(part->cpt_cpumask));
|
||||
if (num == cpumask_weight(part->cpt_cpumask))
|
||||
cpt++;
|
||||
}
|
||||
}
|
||||
|
||||
if (cpt != ncpt ||
|
||||
num != cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
|
||||
num != cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask)) {
|
||||
CERROR("Expect %d(%d) CPU partitions but got %d(%d), CPU hotplug/unplug while setting?\n",
|
||||
cptab->ctb_nparts, num, cpt,
|
||||
cpus_weight(*cptab->ctb_parts[ncpt - 1].cpt_cpumask));
|
||||
cpumask_weight(cptab->ctb_parts[ncpt - 1].cpt_cpumask));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
@ -845,7 +848,7 @@ cfs_cpt_table_create_pattern(char *pattern)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
high = node ? MAX_NUMNODES - 1 : NR_CPUS - 1;
|
||||
high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
|
||||
|
||||
cptab = cfs_cpt_table_alloc(ncpt);
|
||||
if (cptab == NULL) {
|
||||
@ -965,7 +968,8 @@ cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
mutex_lock(&cpt_data.cpt_mutex);
|
||||
/* if all HTs in a core are offline, it may break affinity */
|
||||
cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
|
||||
warn = any_online_cpu(*cpt_data.cpt_cpumask) >= nr_cpu_ids;
|
||||
warn = cpumask_any_and(cpt_data.cpt_cpumask,
|
||||
cpu_online_mask) >= nr_cpu_ids;
|
||||
mutex_unlock(&cpt_data.cpt_mutex);
|
||||
CDEBUG(warn ? D_WARNING : D_INFO,
|
||||
"Lustre: can't support CPU plug-out well now, performance and stability could be impacted [CPU %u action: %lx]\n",
|
||||
|
Loading…
x
Reference in New Issue
Block a user