Commit 8714f7bc authored by Juergen Gross's avatar Juergen Gross

xen/pv: add fault recovery control to pmu msr accesses

Today pmu_msr_read() and pmu_msr_write() fall back to the safe variants
of read/write MSR in case the MSR access isn't emulated via Xen. Allow
the caller to select that faults should not be recovered from by passing
NULL for the error pointer.

Restructure the code to make it more readable.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 61367688
...@@ -131,6 +131,9 @@ static inline uint32_t get_fam15h_addr(u32 addr) ...@@ -131,6 +131,9 @@ static inline uint32_t get_fam15h_addr(u32 addr)
static inline bool is_amd_pmu_msr(unsigned int msr) static inline bool is_amd_pmu_msr(unsigned int msr)
{ {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return false;
if ((msr >= MSR_F15H_PERF_CTL && if ((msr >= MSR_F15H_PERF_CTL &&
msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) ||
(msr >= MSR_K7_EVNTSEL0 && (msr >= MSR_K7_EVNTSEL0 &&
...@@ -144,6 +147,9 @@ static int is_intel_pmu_msr(u32 msr_index, int *type, int *index) ...@@ -144,6 +147,9 @@ static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
{ {
u32 msr_index_pmc; u32 msr_index_pmc;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
switch (msr_index) { switch (msr_index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_FIXED_CTR_CTRL:
case MSR_IA32_DS_AREA: case MSR_IA32_DS_AREA:
...@@ -290,48 +296,52 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) ...@@ -290,48 +296,52 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
return false; return false;
} }
static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read,
bool *emul)
{
int type, index;
if (is_amd_pmu_msr(msr))
*emul = xen_amd_pmu_emulate(msr, val, is_read);
else if (is_intel_pmu_msr(msr, &type, &index))
*emul = xen_intel_pmu_emulate(msr, val, type, index, is_read);
else
return false;
return true;
}
bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
{ {
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { bool emulated;
if (is_amd_pmu_msr(msr)) {
if (!xen_amd_pmu_emulate(msr, val, 1))
*val = native_read_msr_safe(msr, err);
return true;
}
} else {
int type, index;
if (is_intel_pmu_msr(msr, &type, &index)) { if (!pmu_msr_chk_emulated(msr, val, true, &emulated))
if (!xen_intel_pmu_emulate(msr, val, type, index, 1)) return false;
*val = native_read_msr_safe(msr, err);
return true; if (!emulated) {
} *val = err ? native_read_msr_safe(msr, err)
: native_read_msr(msr);
} }
return false; return true;
} }
bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
{ {
uint64_t val = ((uint64_t)high << 32) | low; uint64_t val = ((uint64_t)high << 32) | low;
bool emulated;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { if (!pmu_msr_chk_emulated(msr, &val, false, &emulated))
if (is_amd_pmu_msr(msr)) { return false;
if (!xen_amd_pmu_emulate(msr, &val, 0))
*err = native_write_msr_safe(msr, low, high);
return true;
}
} else {
int type, index;
if (is_intel_pmu_msr(msr, &type, &index)) { if (!emulated) {
if (!xen_intel_pmu_emulate(msr, &val, type, index, 0)) if (err)
*err = native_write_msr_safe(msr, low, high); *err = native_write_msr_safe(msr, low, high);
return true; else
} native_write_msr(msr, low, high);
} }
return false; return true;
} }
static unsigned long long xen_amd_read_pmc(int counter) static unsigned long long xen_amd_read_pmc(int counter)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment