Commit 4aec74bc authored by Yury Norov's avatar Yury Norov

arch/x86: replace cpumask_weight with cpumask_empty where appropriate

In some cases, arch/x86 code calls cpumask_weight() to check if any bit of
a given cpumask is set. We can do it more efficiently with cpumask_empty()
because cpumask_empty() stops traversing the cpumask as soon as it finds
first set bit, while cpumask_weight() counts all bits unconditionally.
Signed-off-by: default avatarYury Norov <yury.norov@gmail.com>
Reviewed-by: default avatarSteve Wahl <steve.wahl@hpe.com>
parent b6dad11d
...@@ -341,14 +341,14 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -341,14 +341,14 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus belong to parent ctrl group */ /* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask)) { if (!cpumask_empty(tmpmask)) {
rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
return -EINVAL; return -EINVAL;
} }
/* Check whether cpus are dropped from this group */ /* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) { if (!cpumask_empty(tmpmask)) {
/* Give any dropped cpus to parent rdtgroup */ /* Give any dropped cpus to parent rdtgroup */
cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
update_closid_rmid(tmpmask, prgrp); update_closid_rmid(tmpmask, prgrp);
...@@ -359,7 +359,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -359,7 +359,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
* and update per-cpu rmid * and update per-cpu rmid
*/ */
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) { if (!cpumask_empty(tmpmask)) {
head = &prgrp->mon.crdtgrp_list; head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list) { list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
if (crgrp == rdtgrp) if (crgrp == rdtgrp)
...@@ -394,7 +394,7 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -394,7 +394,7 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus are dropped from this group */ /* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) { if (!cpumask_empty(tmpmask)) {
/* Can't drop from default group */ /* Can't drop from default group */
if (rdtgrp == &rdtgroup_default) { if (rdtgrp == &rdtgroup_default) {
rdt_last_cmd_puts("Can't drop CPUs from default group\n"); rdt_last_cmd_puts("Can't drop CPUs from default group\n");
...@@ -413,12 +413,12 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -413,12 +413,12 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
* and update per-cpu closid/rmid. * and update per-cpu closid/rmid.
*/ */
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) { if (!cpumask_empty(tmpmask)) {
list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
if (r == rdtgrp) if (r == rdtgrp)
continue; continue;
cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
if (cpumask_weight(tmpmask1)) if (!cpumask_empty(tmpmask1))
cpumask_rdtgrp_clear(r, tmpmask1); cpumask_rdtgrp_clear(r, tmpmask1);
} }
update_closid_rmid(tmpmask, rdtgrp); update_closid_rmid(tmpmask, rdtgrp);
...@@ -488,7 +488,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -488,7 +488,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
/* check that user didn't specify any offline cpus */ /* check that user didn't specify any offline cpus */
cpumask_andnot(tmpmask, newmask, cpu_online_mask); cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) { if (!cpumask_empty(tmpmask)) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("Can only assign online CPUs\n"); rdt_last_cmd_puts("Can only assign online CPUs\n");
goto unlock; goto unlock;
......
...@@ -400,7 +400,7 @@ static void leave_uniprocessor(void) ...@@ -400,7 +400,7 @@ static void leave_uniprocessor(void)
int cpu; int cpu;
int err; int err;
if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0) if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
return; return;
pr_notice("Re-enabling CPUs...\n"); pr_notice("Re-enabling CPUs...\n");
for_each_cpu(cpu, downed_cpus) { for_each_cpu(cpu, downed_cpus) {
......
...@@ -985,7 +985,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) ...@@ -985,7 +985,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
/* Clear global flags */ /* Clear global flags */
if (master) { if (master) {
if (cpumask_weight(uv_nmi_cpu_mask)) if (!cpumask_empty(uv_nmi_cpu_mask))
uv_nmi_cleanup_mask(); uv_nmi_cleanup_mask();
atomic_set(&uv_nmi_cpus_in_nmi, -1); atomic_set(&uv_nmi_cpus_in_nmi, -1);
atomic_set(&uv_nmi_cpu, -1); atomic_set(&uv_nmi_cpu, -1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment