Commit 48f38aa4 authored by Andi Kleen's avatar Andi Kleen Committed by Ingo Molnar

perf/x86/intel: Extract memory code PEBS parser for reuse

Extract some code related to memory profiling from the PEBS record
parser into separate functions. It can be reused by the upcoming
adaptive PEBS parser. No functional changes.
Rename intel_hsw_weight to intel_get_tsx_weight, and
intel_hsw_transaction to intel_get_tsx_transaction. Because the input is
not the hsw pebs format anymore.
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: acme@kernel.org
Cc: jolsa@kernel.org
Link: https://lkml.kernel.org/r/20190402194509.2832-4-kan.liang@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 878068ea
...@@ -1125,34 +1125,50 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) ...@@ -1125,34 +1125,50 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
return 0; return 0;
} }
static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs) static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
{ {
if (pebs->tsx_tuning) { if (tsx_tuning) {
union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning }; union hsw_tsx_tuning tsx = { .value = tsx_tuning };
return tsx.cycles_last_block; return tsx.cycles_last_block;
} }
return 0; return 0;
} }
static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs) static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
{ {
u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32; u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
/* For RTM XABORTs also log the abort code from AX */ /* For RTM XABORTs also log the abort code from AX */
if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1)) if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
return txn; return txn;
} }
#define PERF_X86_EVENT_PEBS_HSW_PREC \
(PERF_X86_EVENT_PEBS_ST_HSW | \
PERF_X86_EVENT_PEBS_LD_HSW | \
PERF_X86_EVENT_PEBS_NA_HSW)
static u64 get_data_src(struct perf_event *event, u64 aux)
{
u64 val = PERF_MEM_NA;
int fl = event->hw.flags;
bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
if (fl & PERF_X86_EVENT_PEBS_LDLAT)
val = load_latency_data(aux);
else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
val = precise_datala_hsw(event, aux);
else if (fst)
val = precise_store_data(aux);
return val;
}
static void setup_pebs_sample_data(struct perf_event *event, static void setup_pebs_sample_data(struct perf_event *event,
struct pt_regs *iregs, void *__pebs, struct pt_regs *iregs, void *__pebs,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
#define PERF_X86_EVENT_PEBS_HSW_PREC \
(PERF_X86_EVENT_PEBS_ST_HSW | \
PERF_X86_EVENT_PEBS_LD_HSW | \
PERF_X86_EVENT_PEBS_NA_HSW)
/* /*
* We cast to the biggest pebs_record but are careful not to * We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries. * unconditionally access the 'extra' entries.
...@@ -1160,17 +1176,13 @@ static void setup_pebs_sample_data(struct perf_event *event, ...@@ -1160,17 +1176,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct pebs_record_skl *pebs = __pebs; struct pebs_record_skl *pebs = __pebs;
u64 sample_type; u64 sample_type;
int fll, fst, dsrc; int fll;
int fl = event->hw.flags;
if (pebs == NULL) if (pebs == NULL)
return; return;
sample_type = event->attr.sample_type; sample_type = event->attr.sample_type;
dsrc = sample_type & PERF_SAMPLE_DATA_SRC; fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
perf_sample_data_init(data, 0, event->hw.last_period); perf_sample_data_init(data, 0, event->hw.last_period);
...@@ -1185,16 +1197,8 @@ static void setup_pebs_sample_data(struct perf_event *event, ...@@ -1185,16 +1197,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
/* /*
* data.data_src encodes the data source * data.data_src encodes the data source
*/ */
if (dsrc) { if (sample_type & PERF_SAMPLE_DATA_SRC)
u64 val = PERF_MEM_NA; data->data_src.val = get_data_src(event, pebs->dse);
if (fll)
val = load_latency_data(pebs->dse);
else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
val = precise_datala_hsw(event, pebs->dse);
else if (fst)
val = precise_store_data(pebs->dse);
data->data_src.val = val;
}
/* /*
* We must however always use iregs for the unwinder to stay sane; the * We must however always use iregs for the unwinder to stay sane; the
...@@ -1281,10 +1285,11 @@ static void setup_pebs_sample_data(struct perf_event *event, ...@@ -1281,10 +1285,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
if (x86_pmu.intel_cap.pebs_format >= 2) { if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */ /* Only set the TSX weight when no memory weight. */
if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll) if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
data->weight = intel_hsw_weight(pebs); data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
if (sample_type & PERF_SAMPLE_TRANSACTION) if (sample_type & PERF_SAMPLE_TRANSACTION)
data->txn = intel_hsw_transaction(pebs); data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
pebs->ax);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment