Commit 5b22bbe7 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paolo Bonzini

KVM: X86: Change the type of access u32 to u64

Change the type of access u32 to u64 for FNAME(walk_addr) and
->gva_to_gpa().

The kinds of accesses are usually combinations of UWX, and VMX/SVM's
nested paging adds a new factor of access: is it an access for a guest
page table or for a final guest physical address.

And SMAP relies a factor for supervisor access: explicit or implicit.

So @access in FNAME(walk_addr) and ->gva_to_gpa() is better to include
all these information to do the walk.

Although @access(u32) has enough bits to encode all the kinds, this
patch extends it to u64:
	o Extra bits will be in the higher 32 bits, so that we can
	  easily obtain the traditional access mode (UWX) by converting
	  it to u32.
	o Reuse the value for the access kind defined by SVM's nested
	  paging (PFERR_GUEST_FINAL_MASK and PFERR_GUEST_PAGE_MASK) as
	  @error_code in kvm_handle_page_fault().
Signed-off-by: default avatarLai Jiangshan <jiangshan.ljs@antgroup.com>
Message-Id: <20220311070346.45023-2-jiangshanlai@gmail.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cf1d88b3
...@@ -430,7 +430,7 @@ struct kvm_mmu { ...@@ -430,7 +430,7 @@ struct kvm_mmu {
void (*inject_page_fault)(struct kvm_vcpu *vcpu, void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault); struct x86_exception *fault);
gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t gva_or_gpa, u32 access, gpa_t gva_or_gpa, u64 access,
struct x86_exception *exception); struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu, int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp); struct kvm_mmu_page *sp);
......
...@@ -214,8 +214,10 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -214,8 +214,10 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
*/ */
static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pte_access, unsigned pte_pkey, unsigned pte_access, unsigned pte_pkey,
unsigned pfec) u64 access)
{ {
/* strip nested paging fault error codes */
unsigned int pfec = access;
int cpl = static_call(kvm_x86_get_cpl)(vcpu); int cpl = static_call(kvm_x86_get_cpl)(vcpu);
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
...@@ -317,12 +319,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) ...@@ -317,12 +319,12 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
atomic64_add(count, &kvm->stat.pages[level - 1]); atomic64_add(count, &kvm->stat.pages[level - 1]);
} }
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
struct x86_exception *exception); struct x86_exception *exception);
static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, struct kvm_mmu *mmu,
gpa_t gpa, u32 access, gpa_t gpa, u64 access,
struct x86_exception *exception) struct x86_exception *exception)
{ {
if (mmu != &vcpu->arch.nested_mmu) if (mmu != &vcpu->arch.nested_mmu)
......
...@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu) ...@@ -3703,7 +3703,7 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
} }
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t vaddr, u32 access, gpa_t vaddr, u64 access,
struct x86_exception *exception) struct x86_exception *exception)
{ {
if (exception) if (exception)
......
...@@ -339,7 +339,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu, ...@@ -339,7 +339,7 @@ static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
*/ */
static int FNAME(walk_addr_generic)(struct guest_walker *walker, static int FNAME(walk_addr_generic)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t addr, u32 access) gpa_t addr, u64 access)
{ {
int ret; int ret;
pt_element_t pte; pt_element_t pte;
...@@ -347,7 +347,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -347,7 +347,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn; gfn_t table_gfn;
u64 pt_access, pte_access; u64 pt_access, pte_access;
unsigned index, accessed_dirty, pte_pkey; unsigned index, accessed_dirty, pte_pkey;
unsigned nested_access; u64 nested_access;
gpa_t pte_gpa; gpa_t pte_gpa;
bool have_ad; bool have_ad;
int offset; int offset;
...@@ -540,7 +540,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -540,7 +540,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
} }
static int FNAME(walk_addr)(struct guest_walker *walker, static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gpa_t addr, u32 access) struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
{ {
return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
access); access);
...@@ -988,7 +988,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) ...@@ -988,7 +988,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gpa_t addr, u32 access, gpa_t addr, u64 access,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct guest_walker walker; struct guest_walker walker;
......
...@@ -6726,7 +6726,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu, ...@@ -6726,7 +6726,7 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
static_call(kvm_x86_get_segment)(vcpu, var, seg); static_call(kvm_x86_get_segment)(vcpu, var, seg);
} }
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct kvm_mmu *mmu = vcpu->arch.mmu; struct kvm_mmu *mmu = vcpu->arch.mmu;
...@@ -6746,7 +6746,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -6746,7 +6746,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
} }
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
...@@ -6756,7 +6756,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); ...@@ -6756,7 +6756,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK; access |= PFERR_FETCH_MASK;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
} }
...@@ -6766,7 +6766,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -6766,7 +6766,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK; access |= PFERR_WRITE_MASK;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
} }
...@@ -6782,7 +6782,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -6782,7 +6782,7 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
} }
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u32 access, struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
...@@ -6819,7 +6819,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, ...@@ -6819,7 +6819,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
unsigned offset; unsigned offset;
int ret; int ret;
...@@ -6844,7 +6844,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, ...@@ -6844,7 +6844,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes, gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception) struct x86_exception *exception)
{ {
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
/* /*
* FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
...@@ -6863,7 +6863,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt, ...@@ -6863,7 +6863,7 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception, bool system) struct x86_exception *exception, bool system)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = 0; u64 access = 0;
if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
access |= PFERR_USER_MASK; access |= PFERR_USER_MASK;
...@@ -6881,7 +6881,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt, ...@@ -6881,7 +6881,7 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
} }
static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u32 access, struct kvm_vcpu *vcpu, u64 access,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
...@@ -6915,7 +6915,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v ...@@ -6915,7 +6915,7 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
bool system) bool system)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = PFERR_WRITE_MASK; u64 access = PFERR_WRITE_MASK;
if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3) if (!system && static_call(kvm_x86_get_cpl)(vcpu) == 3)
access |= PFERR_USER_MASK; access |= PFERR_USER_MASK;
...@@ -6984,7 +6984,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -6984,7 +6984,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
bool write) bool write)
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0); | (write ? PFERR_WRITE_MASK : 0);
/* /*
...@@ -12598,7 +12598,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c ...@@ -12598,7 +12598,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
{ {
struct kvm_mmu *mmu = vcpu->arch.walk_mmu; struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
struct x86_exception fault; struct x86_exception fault;
u32 access = error_code & u64 access = error_code &
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) || if (!(error_code & PFERR_PRESENT_MASK) ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment