Commit 134291bf authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Avi Kivity

KVM: MMU: Clean up the error handling of walk_addr_generic()

Avoid two step jump to the error handling part.  This eliminates the use
of the variables present and rsvd_fault.

We also use the const type qualifier to show that write/user/fetch_fault
do not change in the function.

Both of these were suggested by Ingo Molnar.

Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent f8f7e5ee
...@@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -125,18 +125,17 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn; gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access); unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa; gpa_t pte_gpa;
bool eperm, present, rsvd_fault; bool eperm;
int offset, write_fault, user_fault, fetch_fault; int offset;
const int write_fault = access & PFERR_WRITE_MASK;
write_fault = access & PFERR_WRITE_MASK; const int user_fault = access & PFERR_USER_MASK;
user_fault = access & PFERR_USER_MASK; const int fetch_fault = access & PFERR_FETCH_MASK;
fetch_fault = access & PFERR_FETCH_MASK; u16 errcode = 0;
trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
fetch_fault); fetch_fault);
walk: walk:
present = true; eperm = false;
eperm = rsvd_fault = false;
walker->level = mmu->root_level; walker->level = mmu->root_level;
pte = mmu->get_cr3(vcpu); pte = mmu->get_cr3(vcpu);
...@@ -144,10 +143,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -144,10 +143,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
if (walker->level == PT32E_ROOT_LEVEL) { if (walker->level == PT32E_ROOT_LEVEL) {
pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level); trace_kvm_mmu_paging_element(pte, walker->level);
if (!is_present_gpte(pte)) { if (!is_present_gpte(pte))
present = false;
goto error; goto error;
}
--walker->level; --walker->level;
} }
#endif #endif
...@@ -170,35 +167,27 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -170,35 +167,27 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn), real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
PFERR_USER_MASK|PFERR_WRITE_MASK); PFERR_USER_MASK|PFERR_WRITE_MASK);
if (unlikely(real_gfn == UNMAPPED_GVA)) { if (unlikely(real_gfn == UNMAPPED_GVA))
present = false; goto error;
break;
}
real_gfn = gpa_to_gfn(real_gfn); real_gfn = gpa_to_gfn(real_gfn);
host_addr = gfn_to_hva(vcpu->kvm, real_gfn); host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
if (unlikely(kvm_is_error_hva(host_addr))) { if (unlikely(kvm_is_error_hva(host_addr)))
present = false; goto error;
break;
}
ptep_user = (pt_element_t __user *)((void *)host_addr + offset); ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte)))) { if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
present = false; goto error;
break;
}
trace_kvm_mmu_paging_element(pte, walker->level); trace_kvm_mmu_paging_element(pte, walker->level);
if (unlikely(!is_present_gpte(pte))) { if (unlikely(!is_present_gpte(pte)))
present = false; goto error;
break;
}
if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
walker->level))) { walker->level))) {
rsvd_fault = true; errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
break; goto error;
} }
if (unlikely(write_fault && !is_writable_pte(pte) if (unlikely(write_fault && !is_writable_pte(pte)
...@@ -213,17 +202,15 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -213,17 +202,15 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
eperm = true; eperm = true;
#endif #endif
if (!eperm && !rsvd_fault if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
&& unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret; int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index, trace_kvm_mmu_set_accessed_bit(table_gfn, index,
sizeof(pte)); sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_ACCESSED_MASK); pte, pte|PT_ACCESSED_MASK);
if (unlikely(ret < 0)) { if (unlikely(ret < 0))
present = false; goto error;
break; else if (ret)
} else if (ret)
goto walk; goto walk;
mark_page_dirty(vcpu->kvm, table_gfn); mark_page_dirty(vcpu->kvm, table_gfn);
...@@ -276,8 +263,10 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -276,8 +263,10 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
--walker->level; --walker->level;
} }
if (unlikely(!present || eperm || rsvd_fault)) if (unlikely(eperm)) {
errcode |= PFERR_PRESENT_MASK;
goto error; goto error;
}
if (write_fault && unlikely(!is_dirty_gpte(pte))) { if (write_fault && unlikely(!is_dirty_gpte(pte))) {
int ret; int ret;
...@@ -285,10 +274,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -285,10 +274,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
pte, pte|PT_DIRTY_MASK); pte, pte|PT_DIRTY_MASK);
if (unlikely(ret < 0)) { if (unlikely(ret < 0))
present = false;
goto error; goto error;
} else if (ret) else if (ret)
goto walk; goto walk;
mark_page_dirty(vcpu->kvm, table_gfn); mark_page_dirty(vcpu->kvm, table_gfn);
...@@ -303,20 +291,14 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -303,20 +291,14 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
return 1; return 1;
error: error:
walker->fault.vector = PF_VECTOR; errcode |= write_fault | user_fault;
walker->fault.error_code_valid = true;
walker->fault.error_code = 0;
if (present)
walker->fault.error_code |= PFERR_PRESENT_MASK;
walker->fault.error_code |= write_fault | user_fault;
if (fetch_fault && (mmu->nx || if (fetch_fault && (mmu->nx ||
kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))) kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
walker->fault.error_code |= PFERR_FETCH_MASK; errcode |= PFERR_FETCH_MASK;
if (rsvd_fault)
walker->fault.error_code |= PFERR_RSVD_MASK;
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
walker->fault.error_code = errcode;
walker->fault.address = addr; walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment