Commit 75d7ba32 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

perf tools: Use dedicated non-atomic clear/set bit helpers

Use the dedicated non-atomic helpers for {clear,set}_bit() and their
test variants, i.e. the double-underscore versions.  Depsite being
defined in atomic.h, and despite the kernel versions being atomic in the
kernel, tools' {clear,set}_bit() helpers aren't actually atomic.  Move
to the double-underscore versions so that the versions that are expected
to be atomic (for kernel developers) can be made atomic without affecting
users that don't want atomic operations.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Acked-by: default avatarNamhyung Kim <namhyung@kernel.org>
Message-Id: <20221119013450.2643007-6-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7f2b47f2
...@@ -70,7 +70,7 @@ static int do_for_each_set_bit(unsigned int num_bits) ...@@ -70,7 +70,7 @@ static int do_for_each_set_bit(unsigned int num_bits)
bitmap_zero(to_test, num_bits); bitmap_zero(to_test, num_bits);
skip = num_bits / set_bits; skip = num_bits / set_bits;
for (i = 0; i < num_bits; i += skip) for (i = 0; i < num_bits; i += skip)
set_bit(i, to_test); __set_bit(i, to_test);
for (i = 0; i < outer_iterations; i++) { for (i = 0; i < outer_iterations; i++) {
old = accumulator; old = accumulator;
......
...@@ -230,7 +230,7 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he, ...@@ -230,7 +230,7 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
"WARNING: no sample cpu value")) "WARNING: no sample cpu value"))
return; return;
set_bit(sample->cpu, c2c_he->cpuset); __set_bit(sample->cpu, c2c_he->cpuset);
} }
static void c2c_he__set_node(struct c2c_hist_entry *c2c_he, static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
...@@ -247,7 +247,7 @@ static void c2c_he__set_node(struct c2c_hist_entry *c2c_he, ...@@ -247,7 +247,7 @@ static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
if (WARN_ONCE(node < 0, "WARNING: failed to find node\n")) if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
return; return;
set_bit(node, c2c_he->nodeset); __set_bit(node, c2c_he->nodeset);
if (c2c_he->paddr != sample->phys_addr) { if (c2c_he->paddr != sample->phys_addr) {
c2c_he->paddr_cnt++; c2c_he->paddr_cnt++;
...@@ -2318,7 +2318,7 @@ static int setup_nodes(struct perf_session *session) ...@@ -2318,7 +2318,7 @@ static int setup_nodes(struct perf_session *session)
continue; continue;
perf_cpu_map__for_each_cpu(cpu, idx, map) { perf_cpu_map__for_each_cpu(cpu, idx, map) {
set_bit(cpu.cpu, set); __set_bit(cpu.cpu, set);
if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug")) if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
return -EINVAL; return -EINVAL;
......
...@@ -216,7 +216,7 @@ static struct kwork_atom *atom_new(struct perf_kwork *kwork, ...@@ -216,7 +216,7 @@ static struct kwork_atom *atom_new(struct perf_kwork *kwork,
list_add_tail(&page->list, &kwork->atom_page_list); list_add_tail(&page->list, &kwork->atom_page_list);
found_atom: found_atom:
set_bit(i, page->bitmap); __set_bit(i, page->bitmap);
atom->time = sample->time; atom->time = sample->time;
atom->prev = NULL; atom->prev = NULL;
atom->page_addr = page; atom->page_addr = page;
...@@ -229,8 +229,8 @@ static void atom_free(struct kwork_atom *atom) ...@@ -229,8 +229,8 @@ static void atom_free(struct kwork_atom *atom)
if (atom->prev != NULL) if (atom->prev != NULL)
atom_free(atom->prev); atom_free(atom->prev);
clear_bit(atom->bit_inpage, __clear_bit(atom->bit_inpage,
((struct kwork_atom_page *)atom->page_addr)->bitmap); ((struct kwork_atom_page *)atom->page_addr)->bitmap);
} }
static void atom_del(struct kwork_atom *atom) static void atom_del(struct kwork_atom *atom)
......
...@@ -3555,7 +3555,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp ...@@ -3555,7 +3555,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
/* Return ENODEV is input cpu is greater than max cpu */ /* Return ENODEV is input cpu is greater than max cpu */
if ((unsigned long)cpu.cpu > mask->nbits) if ((unsigned long)cpu.cpu > mask->nbits)
return -ENODEV; return -ENODEV;
set_bit(cpu.cpu, mask->bits); __set_bit(cpu.cpu, mask->bits);
} }
return 0; return 0;
...@@ -3627,8 +3627,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map ...@@ -3627,8 +3627,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
pr_debug("nr_threads: %d\n", rec->nr_threads); pr_debug("nr_threads: %d\n", rec->nr_threads);
for (t = 0; t < rec->nr_threads; t++) { for (t = 0; t < rec->nr_threads; t++) {
set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
if (verbose) { if (verbose) {
pr_debug("thread_masks[%d]: ", t); pr_debug("thread_masks[%d]: ", t);
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps"); mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
......
...@@ -1573,7 +1573,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, ...@@ -1573,7 +1573,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
if (sched->map.comp) { if (sched->map.comp) {
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
sched->map.comp_cpus[cpus_nr++] = this_cpu; sched->map.comp_cpus[cpus_nr++] = this_cpu;
new_cpu = true; new_cpu = true;
} }
......
...@@ -18,7 +18,7 @@ static unsigned long *get_bitmap(const char *str, int nbits) ...@@ -18,7 +18,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
if (map && bm) { if (map && bm) {
for (i = 0; i < perf_cpu_map__nr(map); i++) for (i = 0; i < perf_cpu_map__nr(map); i++)
set_bit(perf_cpu_map__cpu(map, i).cpu, bm); __set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
} }
if (map) if (map)
......
...@@ -33,7 +33,7 @@ static unsigned long *get_bitmap(const char *str, int nbits) ...@@ -33,7 +33,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
int i; int i;
perf_cpu_map__for_each_cpu(cpu, i, map) perf_cpu_map__for_each_cpu(cpu, i, map)
set_bit(cpu.cpu, bm); __set_bit(cpu.cpu, bm);
} }
if (map) if (map)
......
...@@ -58,14 +58,14 @@ void affinity__set(struct affinity *a, int cpu) ...@@ -58,14 +58,14 @@ void affinity__set(struct affinity *a, int cpu)
return; return;
a->changed = true; a->changed = true;
set_bit(cpu, a->sched_cpus); __set_bit(cpu, a->sched_cpus);
/* /*
* We ignore errors because affinity is just an optimization. * We ignore errors because affinity is just an optimization.
* This could happen for example with isolated CPUs or cpusets. * This could happen for example with isolated CPUs or cpusets.
* In this case the IPIs inside the kernel's perf API still work. * In this case the IPIs inside the kernel's perf API still work.
*/ */
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus); sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
clear_bit(cpu, a->sched_cpus); __clear_bit(cpu, a->sched_cpus);
} }
static void __affinity__cleanup(struct affinity *a) static void __affinity__cleanup(struct affinity *a)
......
...@@ -79,12 +79,12 @@ struct perf_file_attr { ...@@ -79,12 +79,12 @@ struct perf_file_attr {
void perf_header__set_feat(struct perf_header *header, int feat) void perf_header__set_feat(struct perf_header *header, int feat)
{ {
set_bit(feat, header->adds_features); __set_bit(feat, header->adds_features);
} }
void perf_header__clear_feat(struct perf_header *header, int feat) void perf_header__clear_feat(struct perf_header *header, int feat)
{ {
clear_bit(feat, header->adds_features); __clear_bit(feat, header->adds_features);
} }
bool perf_header__has_feat(const struct perf_header *header, int feat) bool perf_header__has_feat(const struct perf_header *header, int feat)
...@@ -1358,7 +1358,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx) ...@@ -1358,7 +1358,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
rewinddir(dir); rewinddir(dir);
for_each_memory(phys, dir) { for_each_memory(phys, dir) {
set_bit(phys, n->set); __set_bit(phys, n->set);
} }
closedir(dir); closedir(dir);
...@@ -3952,7 +3952,7 @@ int perf_file_header__read(struct perf_file_header *header, ...@@ -3952,7 +3952,7 @@ int perf_file_header__read(struct perf_file_header *header,
if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
bitmap_zero(header->adds_features, HEADER_FEAT_BITS); bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
set_bit(HEADER_BUILD_ID, header->adds_features); __set_bit(HEADER_BUILD_ID, header->adds_features);
} }
} }
......
...@@ -111,7 +111,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, i ...@@ -111,7 +111,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, i
pr_err("Failed to allocate node mask for mbind: error %m\n"); pr_err("Failed to allocate node mask for mbind: error %m\n");
return -1; return -1;
} }
set_bit(node_index, node_mask); __set_bit(node_index, node_mask);
if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) { if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
data, data + mmap_len, node_index); data, data + mmap_len, node_index);
...@@ -256,7 +256,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask) ...@@ -256,7 +256,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
for (idx = 0; idx < nr_cpus; idx++) { for (idx = 0; idx < nr_cpus; idx++) {
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
if (cpu__get_node(cpu) == node) if (cpu__get_node(cpu) == node)
set_bit(cpu.cpu, mask->bits); __set_bit(cpu.cpu, mask->bits);
} }
} }
...@@ -270,7 +270,7 @@ static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params * ...@@ -270,7 +270,7 @@ static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
else if (mp->affinity == PERF_AFFINITY_CPU) else if (mp->affinity == PERF_AFFINITY_CPU)
set_bit(map->core.cpu.cpu, map->affinity_mask.bits); __set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
return 0; return 0;
} }
......
...@@ -1513,7 +1513,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) ...@@ -1513,7 +1513,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS)); memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
for (b = from; b <= to; b++) for (b = from; b <= to; b++)
set_bit(b, bits); __set_bit(b, bits);
} }
void perf_pmu__del_formats(struct list_head *formats) void perf_pmu__del_formats(struct list_head *formats)
......
...@@ -365,7 +365,7 @@ static void perl_process_tracepoint(struct perf_sample *sample, ...@@ -365,7 +365,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
sprintf(handler, "%s::%s", event->system, event->name); sprintf(handler, "%s::%s", event->system, event->name);
if (!test_and_set_bit(event->id, events_defined)) if (!__test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler, event->print_fmt.args); define_event_symbols(event, handler, event->print_fmt.args);
s = nsecs / NSEC_PER_SEC; s = nsecs / NSEC_PER_SEC;
......
...@@ -933,7 +933,7 @@ static void python_process_tracepoint(struct perf_sample *sample, ...@@ -933,7 +933,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
sprintf(handler_name, "%s__%s", event->system, event->name); sprintf(handler_name, "%s__%s", event->system, event->name);
if (!test_and_set_bit(event->id, events_defined)) if (!__test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler_name, event->print_fmt.args); define_event_symbols(event, handler_name, event->print_fmt.args);
handler = get_handler(handler_name); handler = get_handler(handler_name);
......
...@@ -2748,7 +2748,7 @@ int perf_session__cpu_bitmap(struct perf_session *session, ...@@ -2748,7 +2748,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
goto out_delete_map; goto out_delete_map;
} }
set_bit(cpu.cpu, cpu_bitmap); __set_bit(cpu.cpu, cpu_bitmap);
} }
err = 0; err = 0;
......
...@@ -741,7 +741,7 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus) ...@@ -741,7 +741,7 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
break; break;
} }
set_bit(c.cpu, cpumask_bits(b)); __set_bit(c.cpu, cpumask_bits(b));
} }
perf_cpu_map__put(m); perf_cpu_map__put(m);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment