Commit bb447c27 authored by Namhyung Kim's avatar Namhyung Kim Committed by Ingo Molnar

perf/core: Set data->sample_flags in perf_prepare_sample()

The perf_prepare_sample() function sets the perf_sample_data according
to the attr->sample_type before copying it to the ring buffer.  But BPF
also wants to access the sample data so it needs to prepare the sample
even before the regular path.

That means perf_prepare_sample() can be called more than once.  Set
the data->sample_flags consistently so that it can indicate which fields
are set already and skip them if sets.

Also update the filtered_sample_type to have the dependent flags to
reduce the number of branches.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230118060559.615653-6-namhyung@kernel.org
parent eb55b455
...@@ -7046,12 +7046,21 @@ static void perf_aux_sample_output(struct perf_event *event, ...@@ -7046,12 +7046,21 @@ static void perf_aux_sample_output(struct perf_event *event,
ring_buffer_put(rb); ring_buffer_put(rb);
} }
/*
* A set of common sample data types saved even for non-sample records
* when event->attr.sample_id_all is set.
*/
#define PERF_SAMPLE_ID_ALL (PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
static void __perf_event_header__init_id(struct perf_event_header *header, static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data, struct perf_sample_data *data,
struct perf_event *event, struct perf_event *event,
u64 sample_type) u64 sample_type)
{ {
data->type = event->attr.sample_type; data->type = event->attr.sample_type;
data->sample_flags |= data->type & PERF_SAMPLE_ID_ALL;
header->size += event->id_header_size; header->size += event->id_header_size;
if (sample_type & PERF_SAMPLE_TID) { if (sample_type & PERF_SAMPLE_TID) {
...@@ -7554,6 +7563,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) ...@@ -7554,6 +7563,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
return callchain ?: &__empty_callchain; return callchain ?: &__empty_callchain;
} }
static __always_inline u64 __cond_set(u64 flags, u64 s, u64 d)
{
return d * !!(flags & s);
}
void perf_prepare_sample(struct perf_event_header *header, void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data, struct perf_sample_data *data,
struct perf_event *event, struct perf_event *event,
...@@ -7569,14 +7583,24 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7569,14 +7583,24 @@ void perf_prepare_sample(struct perf_event_header *header,
header->misc |= perf_misc_flags(regs); header->misc |= perf_misc_flags(regs);
/* /*
* Clear the sample flags that have already been done by the * Add the sample flags that are dependent to others. And clear the
* PMU driver. * sample flags that have already been done by the PMU driver.
*/ */
filtered_sample_type = sample_type & ~data->sample_flags; filtered_sample_type = sample_type;
filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_CODE_PAGE_SIZE,
PERF_SAMPLE_IP);
filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_DATA_PAGE_SIZE |
PERF_SAMPLE_PHYS_ADDR, PERF_SAMPLE_ADDR);
filtered_sample_type |= __cond_set(sample_type, PERF_SAMPLE_STACK_USER,
PERF_SAMPLE_REGS_USER);
filtered_sample_type &= ~data->sample_flags;
__perf_event_header__init_id(header, data, event, filtered_sample_type); __perf_event_header__init_id(header, data, event, filtered_sample_type);
if (sample_type & (PERF_SAMPLE_IP | PERF_SAMPLE_CODE_PAGE_SIZE)) if (filtered_sample_type & PERF_SAMPLE_IP) {
data->ip = perf_instruction_pointer(regs); data->ip = perf_instruction_pointer(regs);
data->sample_flags |= PERF_SAMPLE_IP;
}
if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN) if (filtered_sample_type & PERF_SAMPLE_CALLCHAIN)
perf_sample_save_callchain(data, event, regs); perf_sample_save_callchain(data, event, regs);
...@@ -7593,10 +7617,15 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7593,10 +7617,15 @@ void perf_prepare_sample(struct perf_event_header *header,
data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
} }
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) if (filtered_sample_type & PERF_SAMPLE_REGS_USER)
perf_sample_regs_user(&data->regs_user, regs); perf_sample_regs_user(&data->regs_user, regs);
if (sample_type & PERF_SAMPLE_REGS_USER) { /*
* It cannot use the filtered_sample_type here as REGS_USER can be set
* by STACK_USER (using __cond_set() above) and we don't want to update
* the dyn_size if it's not requested by users.
*/
if ((sample_type & ~data->sample_flags) & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */ /* regs dump ABI info */
int size = sizeof(u64); int size = sizeof(u64);
...@@ -7606,9 +7635,10 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7606,9 +7635,10 @@ void perf_prepare_sample(struct perf_event_header *header,
} }
data->dyn_size += size; data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_REGS_USER;
} }
if (sample_type & PERF_SAMPLE_STACK_USER) { if (filtered_sample_type & PERF_SAMPLE_STACK_USER) {
/* /*
* Either we need PERF_SAMPLE_STACK_USER bit to be always * Either we need PERF_SAMPLE_STACK_USER bit to be always
* processed as the last one or have additional check added * processed as the last one or have additional check added
...@@ -7631,23 +7661,30 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7631,23 +7661,30 @@ void perf_prepare_sample(struct perf_event_header *header,
data->stack_user_size = stack_size; data->stack_user_size = stack_size;
data->dyn_size += size; data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_STACK_USER;
} }
if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
data->weight.full = 0; data->weight.full = 0;
data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
}
if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) if (filtered_sample_type & PERF_SAMPLE_DATA_SRC) {
data->data_src.val = PERF_MEM_NA; data->data_src.val = PERF_MEM_NA;
data->sample_flags |= PERF_SAMPLE_DATA_SRC;
}
if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) if (filtered_sample_type & PERF_SAMPLE_TRANSACTION) {
data->txn = 0; data->txn = 0;
data->sample_flags |= PERF_SAMPLE_TRANSACTION;
}
if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_DATA_PAGE_SIZE)) { if (filtered_sample_type & PERF_SAMPLE_ADDR) {
if (filtered_sample_type & PERF_SAMPLE_ADDR)
data->addr = 0; data->addr = 0;
data->sample_flags |= PERF_SAMPLE_ADDR;
} }
if (sample_type & PERF_SAMPLE_REGS_INTR) { if (filtered_sample_type & PERF_SAMPLE_REGS_INTR) {
/* regs dump ABI info */ /* regs dump ABI info */
int size = sizeof(u64); int size = sizeof(u64);
...@@ -7660,19 +7697,22 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7660,19 +7697,22 @@ void perf_prepare_sample(struct perf_event_header *header,
} }
data->dyn_size += size; data->dyn_size += size;
data->sample_flags |= PERF_SAMPLE_REGS_INTR;
} }
if (sample_type & PERF_SAMPLE_PHYS_ADDR && if (filtered_sample_type & PERF_SAMPLE_PHYS_ADDR) {
filtered_sample_type & PERF_SAMPLE_PHYS_ADDR)
data->phys_addr = perf_virt_to_phys(data->addr); data->phys_addr = perf_virt_to_phys(data->addr);
data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
}
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
if (sample_type & PERF_SAMPLE_CGROUP) { if (filtered_sample_type & PERF_SAMPLE_CGROUP) {
struct cgroup *cgrp; struct cgroup *cgrp;
/* protected by RCU */ /* protected by RCU */
cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup; cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
data->cgroup = cgroup_id(cgrp); data->cgroup = cgroup_id(cgrp);
data->sample_flags |= PERF_SAMPLE_CGROUP;
} }
#endif #endif
...@@ -7681,13 +7721,17 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7681,13 +7721,17 @@ void perf_prepare_sample(struct perf_event_header *header,
* require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr, * require PERF_SAMPLE_ADDR, kernel implicitly retrieve the data->addr,
* but the value will not dump to the userspace. * but the value will not dump to the userspace.
*/ */
if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) if (filtered_sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) {
data->data_page_size = perf_get_page_size(data->addr); data->data_page_size = perf_get_page_size(data->addr);
data->sample_flags |= PERF_SAMPLE_DATA_PAGE_SIZE;
}
if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) if (filtered_sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) {
data->code_page_size = perf_get_page_size(data->ip); data->code_page_size = perf_get_page_size(data->ip);
data->sample_flags |= PERF_SAMPLE_CODE_PAGE_SIZE;
}
if (sample_type & PERF_SAMPLE_AUX) { if (filtered_sample_type & PERF_SAMPLE_AUX) {
u64 size; u64 size;
header->size += sizeof(u64); /* size */ header->size += sizeof(u64); /* size */
...@@ -7705,6 +7749,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7705,6 +7749,7 @@ void perf_prepare_sample(struct perf_event_header *header,
WARN_ON_ONCE(size + header->size > U16_MAX); WARN_ON_ONCE(size + header->size > U16_MAX);
data->dyn_size += size + sizeof(u64); /* size above */ data->dyn_size += size + sizeof(u64); /* size above */
data->sample_flags |= PERF_SAMPLE_AUX;
} }
header->size += data->dyn_size; header->size += data->dyn_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment