Commit 4cf7a136 authored by Namhyung Kim's avatar Namhyung Kim Committed by Ingo Molnar

perf/core: Save the dynamic parts of sample data size

The perf sample data can be divided into parts.  The event->header_size
and event->id_header_size keep the static part of the sample data which
is determined by the sample_type flags.

But other parts like CALLCHAIN and BRANCH_STACK are changing dynamically
so it needs to see the actual data.  In preparation of handling repeated
calls for perf_prepare_sample(), it can save the dynamic size to the
perf sample data to avoid the duplicate work.
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarSong Liu <song@kernel.org>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230118060559.615653-2-namhyung@kernel.org
parent 65adf3a5
...@@ -1103,6 +1103,7 @@ struct perf_sample_data { ...@@ -1103,6 +1103,7 @@ struct perf_sample_data {
*/ */
u64 sample_flags; u64 sample_flags;
u64 period; u64 period;
u64 dyn_size;
/* /*
* Fields commonly set by __perf_event_header__init_id(), * Fields commonly set by __perf_event_header__init_id(),
...@@ -1158,6 +1159,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, ...@@ -1158,6 +1159,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
/* remaining struct members initialized in perf_prepare_sample() */ /* remaining struct members initialized in perf_prepare_sample() */
data->sample_flags = PERF_SAMPLE_PERIOD; data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period; data->period = period;
data->dyn_size = 0;
if (addr) { if (addr) {
data->addr = addr; data->addr = addr;
......
...@@ -7586,7 +7586,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7586,7 +7586,7 @@ void perf_prepare_sample(struct perf_event_header *header,
size += data->callchain->nr; size += data->callchain->nr;
header->size += size * sizeof(u64); data->dyn_size += size * sizeof(u64);
} }
if (sample_type & PERF_SAMPLE_RAW) { if (sample_type & PERF_SAMPLE_RAW) {
...@@ -7612,7 +7612,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7612,7 +7612,7 @@ void perf_prepare_sample(struct perf_event_header *header,
data->raw = NULL; data->raw = NULL;
} }
header->size += size; data->dyn_size += size;
} }
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
...@@ -7624,7 +7624,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7624,7 +7624,7 @@ void perf_prepare_sample(struct perf_event_header *header,
size += data->br_stack->nr size += data->br_stack->nr
* sizeof(struct perf_branch_entry); * sizeof(struct perf_branch_entry);
} }
header->size += size; data->dyn_size += size;
} }
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
...@@ -7639,7 +7639,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7639,7 +7639,7 @@ void perf_prepare_sample(struct perf_event_header *header,
size += hweight64(mask) * sizeof(u64); size += hweight64(mask) * sizeof(u64);
} }
header->size += size; data->dyn_size += size;
} }
if (sample_type & PERF_SAMPLE_STACK_USER) { if (sample_type & PERF_SAMPLE_STACK_USER) {
...@@ -7664,7 +7664,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7664,7 +7664,7 @@ void perf_prepare_sample(struct perf_event_header *header,
size += sizeof(u64) + stack_size; size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size; data->stack_user_size = stack_size;
header->size += size; data->dyn_size += size;
} }
if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE)
...@@ -7693,7 +7693,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7693,7 +7693,7 @@ void perf_prepare_sample(struct perf_event_header *header,
size += hweight64(mask) * sizeof(u64); size += hweight64(mask) * sizeof(u64);
} }
header->size += size; data->dyn_size += size;
} }
if (sample_type & PERF_SAMPLE_PHYS_ADDR && if (sample_type & PERF_SAMPLE_PHYS_ADDR &&
...@@ -7738,8 +7738,11 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -7738,8 +7738,11 @@ void perf_prepare_sample(struct perf_event_header *header,
size = perf_prepare_sample_aux(event, data, size); size = perf_prepare_sample_aux(event, data, size);
WARN_ON_ONCE(size + header->size > U16_MAX); WARN_ON_ONCE(size + header->size > U16_MAX);
header->size += size; data->dyn_size += size + sizeof(u64); /* size above */
} }
header->size += data->dyn_size;
/* /*
* If you're adding more sample types here, you likely need to do * If you're adding more sample types here, you likely need to do
* something about the overflowing header::size, like repurpose the * something about the overflowing header::size, like repurpose the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment