Commit db281766 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull ARM KVM fixes from Paolo Bonzini:

 - a series of fixes to deal with the aliasing between the sp and xzr
   register

 - a fix for the cache flush fix that went in -rc3

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  ARM/arm64: KVM: correct PTE uncachedness check
  arm64: KVM: Get rid of old vcpu_reg()
  arm64: KVM: Correctly handle zero register in system register accesses
  arm64: KVM: Remove const from struct sys_reg_params
  arm64: KVM: Correctly handle zero register during MMIO
parents 8cdef969 09922076
......@@ -28,6 +28,18 @@
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
{
return *vcpu_reg(vcpu, reg_num);
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
*vcpu_reg(vcpu, reg_num) = val;
}
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
......
......@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
data);
data = vcpu_data_host_to_guest(vcpu, data, len);
*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
return 0;
......@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
rt = vcpu->arch.mmio_decode.rt;
if (is_write) {
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data);
......
......@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
if (!kvm_is_device_pfn(pte_pfn(old_pte)))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
......@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
......
......@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
unsigned long context_id;
phys_addr_t target_pc;
cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
......@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
return PSCI_RET_INVALID_PARAMS;
}
target_pc = *vcpu_reg(source_vcpu, 2);
context_id = *vcpu_reg(source_vcpu, 3);
target_pc = vcpu_get_reg(source_vcpu, 2);
context_id = vcpu_get_reg(source_vcpu, 3);
kvm_reset_vcpu(vcpu);
......@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
*vcpu_reg(vcpu, 0) = context_id;
vcpu_set_reg(vcpu, 0, context_id);
vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
......@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
target_affinity = *vcpu_reg(vcpu, 1);
lowest_affinity_level = *vcpu_reg(vcpu, 2);
target_affinity = vcpu_get_reg(vcpu, 1);
lowest_affinity_level = vcpu_get_reg(vcpu, 2);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
......@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
int ret = 1;
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
......@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
*vcpu_reg(vcpu, 0) = val;
vcpu_set_reg(vcpu, 0, val);
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
switch (psci_fn) {
......@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
break;
}
*vcpu_reg(vcpu, 0) = val;
vcpu_set_reg(vcpu, 0, val);
return 1;
}
......
......@@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
}
/*
* vcpu_reg should always be passed a register number coming from a
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
* with banked registers.
* vcpu_get_reg and vcpu_set_reg should always be passed a register number
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
* AArch32 with banked registers.
*/
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
if (reg_num != 31)
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
}
/* Get vcpu SPSR for current mode */
......
......@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
ret = kvm_psci_call(vcpu);
......
......@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (!p->is_write)
......@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
* sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
val = *vcpu_reg(vcpu, p->Rt);
if (!p->is_aarch32) {
vcpu_sys_reg(vcpu, r->reg) = val;
vcpu_sys_reg(vcpu, r->reg) = p->regval;
} else {
if (!p->is_32bit)
vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
}
kvm_toggle_cache(vcpu, was_enabled);
......@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
* for both AArch64 and AArch32 accesses.
*/
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
u64 val;
if (!p->is_write)
return read_from_write_only(vcpu, p);
val = *vcpu_reg(vcpu, p->Rt);
vgic_v3_dispatch_sgi(vcpu, val);
vgic_v3_dispatch_sgi(vcpu, p->regval);
return true;
}
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
......@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
}
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
return ignore_write(vcpu, p);
} else {
*vcpu_reg(vcpu, p->Rt) = (1 << 3);
p->regval = (1 << 3);
return true;
}
}
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
......@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
} else {
u32 val;
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
*vcpu_reg(vcpu, p->Rt) = val;
p->regval = val;
return true;
}
}
......@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
* now use the debug registers.
*/
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
vcpu_sys_reg(vcpu, r->reg) = p->regval;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
p->regval = vcpu_sys_reg(vcpu, r->reg);
}
trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
return true;
}
......@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
* hyp.S code switches between host and guest values in future.
*/
static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
u64 *dbg_reg)
{
u64 val = *vcpu_reg(vcpu, p->Rt);
u64 val = p->regval;
if (p->is_32bit) {
val &= 0xffffffffUL;
......@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
}
static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
u64 *dbg_reg)
{
u64 val = *dbg_reg;
p->regval = *dbg_reg;
if (p->is_32bit)
val &= 0xffffffffUL;
*vcpu_reg(vcpu, p->Rt) = val;
p->regval &= 0xffffffffUL;
}
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
......@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
}
static inline bool trap_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
......@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
}
static inline bool trap_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
......@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
}
static inline bool trap_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
......@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
};
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
......@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
*vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
(6 << 16) | (el3 << 14) | (el3 << 12));
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
| (6 << 16) | (el3 << 14) | (el3 << 12));
return true;
}
}
static bool trap_debug32(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write) {
vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
vcpu_cp14(vcpu, r->reg) = p->regval;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
*vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
p->regval = vcpu_cp14(vcpu, r->reg);
}
return true;
......@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
*/
static inline bool trap_xvr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
......@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
u64 val = *dbg_reg;
val &= 0xffffffffUL;
val |= *vcpu_reg(vcpu, p->Rt) << 32;
val |= p->regval << 32;
*dbg_reg = val;
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} else {
*vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
p->regval = *dbg_reg >> 32;
}
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
......@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Return 0 if the access has been handled, and -1 if not.
*/
static int emulate_cp(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params,
struct sys_reg_params *params,
const struct sys_reg_desc *table,
size_t num)
{
......@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt = (hsr >> 5) & 0xf;
int Rt2 = (hsr >> 10) & 0xf;
params.is_aarch32 = true;
params.is_32bit = false;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.is_write = ((hsr & 1) == 0);
params.Op0 = 0;
......@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
params.CRn = 0;
/*
* Massive hack here. Store Rt2 in the top 32bits so we only
* have one register to deal with. As we use the same trap
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
* backends between AArch32 and AArch64, we get away with it.
*/
if (params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
val &= 0xffffffff;
val |= *vcpu_reg(vcpu, Rt2) << 32;
*vcpu_reg(vcpu, params.Rt) = val;
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
}
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
......@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
unhandled_cp_access(vcpu, &params);
out:
/* Do the opposite hack for the read side */
/* Split up the value between registers for the read side */
if (!params.is_write) {
u64 val = *vcpu_reg(vcpu, params.Rt);
val >>= 32;
*vcpu_reg(vcpu, Rt2) = val;
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
}
return 1;
......@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
int Rt = (hsr >> 5) & 0xf;
params.is_aarch32 = true;
params.is_32bit = true;
params.CRm = (hsr >> 1) & 0xf;
params.Rt = (hsr >> 5) & 0xf;
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = ((hsr & 1) == 0);
params.CRn = (hsr >> 10) & 0xf;
params.Op0 = 0;
params.Op1 = (hsr >> 14) & 0x7;
params.Op2 = (hsr >> 17) & 0x7;
if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
return 1;
if (!emulate_cp(vcpu, &params, global, nr_global))
if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
!emulate_cp(vcpu, &params, global, nr_global)) {
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
return 1;
}
unhandled_cp_access(vcpu, &params);
return 1;
......@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *params)
struct sys_reg_params *params)
{
size_t num;
const struct sys_reg_desc *table, *r;
......@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
int Rt = (esr >> 5) & 0x1f;
int ret;
trace_kvm_handle_sys_reg(esr);
......@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.CRn = (esr >> 10) & 0xf;
params.CRm = (esr >> 1) & 0xf;
params.Op2 = (esr >> 17) & 0x7;
params.Rt = (esr >> 5) & 0x1f;
params.regval = vcpu_get_reg(vcpu, Rt);
params.is_write = !(esr & 1);
return emulate_sys_reg(vcpu, &params);
ret = emulate_sys_reg(vcpu, &params);
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
return ret;
}
/******************************************************************************
......
......@@ -28,7 +28,7 @@ struct sys_reg_params {
u8 CRn;
u8 CRm;
u8 Op2;
u8 Rt;
u64 regval;
bool is_write;
bool is_aarch32;
bool is_32bit; /* Only valid if is_aarch32 is true */
......@@ -44,7 +44,7 @@ struct sys_reg_desc {
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
const struct sys_reg_params *,
struct sys_reg_params *,
const struct sys_reg_desc *);
/* Initialization for vcpu. */
......@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p)
struct sys_reg_params *p)
{
*vcpu_reg(vcpu, p->Rt) = 0;
p->regval = 0;
return true;
}
......
......@@ -31,13 +31,13 @@
#include "sys_regs.h"
static bool access_actlr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
return true;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment