Commit 5624986d authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86/intel/lbr: Unify the stored format of LBR information

Current LBR information in the structure x86_perf_task_context is stored
in a different format from the PEBS LBR record and Architecture LBR,
which prevents the sharing of the common codes.

Use the format of the PEBS LBR record as a unified format. Use a generic
name lbr_entry to replace pebs_lbr_entry.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1593780569-62993-11-git-send-email-kan.liang@linux.intel.com
parent 49d8184f
...@@ -954,7 +954,7 @@ static void adaptive_pebs_record_size_update(void) ...@@ -954,7 +954,7 @@ static void adaptive_pebs_record_size_update(void)
if (pebs_data_cfg & PEBS_DATACFG_XMMS) if (pebs_data_cfg & PEBS_DATACFG_XMMS)
sz += sizeof(struct pebs_xmm); sz += sizeof(struct pebs_xmm);
if (pebs_data_cfg & PEBS_DATACFG_LBRS) if (pebs_data_cfg & PEBS_DATACFG_LBRS)
sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry); sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
cpuc->pebs_record_size = sz; cpuc->pebs_record_size = sz;
} }
...@@ -1595,10 +1595,10 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, ...@@ -1595,10 +1595,10 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
} }
if (format_size & PEBS_DATACFG_LBRS) { if (format_size & PEBS_DATACFG_LBRS) {
struct pebs_lbr *lbr = next_record; struct lbr_entry *lbr = next_record;
int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT) int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
& 0xff) + 1; & 0xff) + 1;
next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry); next_record = next_record + num_lbr * sizeof(struct lbr_entry);
if (has_branch_stack(event)) { if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr); intel_pmu_store_pebs_lbrs(lbr);
......
...@@ -372,11 +372,11 @@ void intel_pmu_lbr_restore(void *ctx) ...@@ -372,11 +372,11 @@ void intel_pmu_lbr_restore(void *ctx)
mask = x86_pmu.lbr_nr - 1; mask = x86_pmu.lbr_nr - 1;
for (i = 0; i < task_ctx->valid_lbrs; i++) { for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask; lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]); wrlbr_from(lbr_idx, task_ctx->lbr[i].from);
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]); wrlbr_to(lbr_idx, task_ctx->lbr[i].to);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr[i].info);
} }
for (; i < x86_pmu.lbr_nr; i++) { for (; i < x86_pmu.lbr_nr; i++) {
...@@ -440,10 +440,10 @@ void intel_pmu_lbr_save(void *ctx) ...@@ -440,10 +440,10 @@ void intel_pmu_lbr_save(void *ctx)
from = rdlbr_from(lbr_idx); from = rdlbr_from(lbr_idx);
if (!from) if (!from)
break; break;
task_ctx->lbr_from[i] = from; task_ctx->lbr[i].from = from;
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx); task_ctx->lbr[i].to = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr[i].info);
} }
task_ctx->valid_lbrs = i; task_ctx->valid_lbrs = i;
task_ctx->tos = tos; task_ctx->tos = tos;
...@@ -1179,7 +1179,7 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) ...@@ -1179,7 +1179,7 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
} }
} }
void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr) void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i; int i;
...@@ -1193,11 +1193,11 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr) ...@@ -1193,11 +1193,11 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos(); cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
for (i = 0; i < x86_pmu.lbr_nr; i++) { for (i = 0; i < x86_pmu.lbr_nr; i++) {
u64 info = lbr->lbr[i].info; u64 info = lbr[i].info;
struct perf_branch_entry *e = &cpuc->lbr_entries[i]; struct perf_branch_entry *e = &cpuc->lbr_entries[i];
e->from = lbr->lbr[i].from; e->from = lbr[i].from;
e->to = lbr->lbr[i].to; e->to = lbr[i].to;
e->mispred = !!(info & LBR_INFO_MISPRED); e->mispred = !!(info & LBR_INFO_MISPRED);
e->predicted = !(info & LBR_INFO_MISPRED); e->predicted = !(info & LBR_INFO_MISPRED);
e->in_tx = !!(info & LBR_INFO_IN_TX); e->in_tx = !!(info & LBR_INFO_IN_TX);
......
...@@ -765,13 +765,11 @@ struct x86_perf_task_context_opt { ...@@ -765,13 +765,11 @@ struct x86_perf_task_context_opt {
}; };
struct x86_perf_task_context { struct x86_perf_task_context {
u64 lbr_from[MAX_LBR_ENTRIES];
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
u64 lbr_sel; u64 lbr_sel;
int tos; int tos;
int valid_lbrs; int valid_lbrs;
struct x86_perf_task_context_opt opt; struct x86_perf_task_context_opt opt;
struct lbr_entry lbr[MAX_LBR_ENTRIES];
}; };
#define x86_add_quirk(func_) \ #define x86_add_quirk(func_) \
...@@ -1092,7 +1090,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); ...@@ -1092,7 +1090,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event); void intel_pmu_auto_reload_read(struct perf_event *event);
void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr); void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
void intel_ds_init(void); void intel_ds_init(void);
......
...@@ -282,14 +282,10 @@ struct pebs_xmm { ...@@ -282,14 +282,10 @@ struct pebs_xmm {
u64 xmm[16*2]; /* two entries for each register */ u64 xmm[16*2]; /* two entries for each register */
}; };
struct pebs_lbr_entry { struct lbr_entry {
u64 from, to, info; u64 from, to, info;
}; };
struct pebs_lbr {
struct pebs_lbr_entry lbr[0]; /* Variable length */
};
/* /*
* IBS cpuid feature detection * IBS cpuid feature detection
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment