Commit 577ed7f7 authored by James Hogan's avatar James Hogan

KVM: MIPS: Pass type of fault down to kvm_mips_map_page()

kvm_mips_map_page() will need to know whether the fault was due to a
read or a write in order to support dirty page tracking,
KVM_CAP_SYNC_MMU, and read only memory regions, so get that information
passed down to it via new bool write_fault arguments to various
functions.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent 89d6ad8a
...@@ -597,19 +597,22 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); ...@@ -597,19 +597,22 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu,
bool write_fault);
extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
struct kvm_mips_tlb *tlb, struct kvm_mips_tlb *tlb,
unsigned long gva); unsigned long gva,
bool write_fault);
extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu,
bool write_fault);
extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause, extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
u32 *opc, u32 *opc,
......
...@@ -2704,7 +2704,8 @@ enum emulation_result kvm_mips_check_privilege(u32 cause, ...@@ -2704,7 +2704,8 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run, struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu,
bool write_fault)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
...@@ -2760,8 +2761,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, ...@@ -2760,8 +2761,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
* OK we have a Guest TLB entry, now inject it into the * OK we have a Guest TLB entry, now inject it into the
* shadow host TLB * shadow host TLB
*/ */
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
va)) { write_fault)) {
kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
__func__, va, index, vcpu, __func__, va, index, vcpu,
read_c0_entryhi()); read_c0_entryhi());
......
...@@ -308,6 +308,7 @@ bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) ...@@ -308,6 +308,7 @@ bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
* kvm_mips_map_page() - Map a guest physical page. * kvm_mips_map_page() - Map a guest physical page.
* @vcpu: VCPU pointer. * @vcpu: VCPU pointer.
* @gpa: Guest physical address of fault. * @gpa: Guest physical address of fault.
* @write_fault: Whether the fault was due to a write.
* @out_entry: New PTE for @gpa (written on success unless NULL). * @out_entry: New PTE for @gpa (written on success unless NULL).
* @out_buddy: New PTE for @gpa's buddy (written on success unless * @out_buddy: New PTE for @gpa's buddy (written on success unless
* NULL). * NULL).
...@@ -327,6 +328,7 @@ bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) ...@@ -327,6 +328,7 @@ bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
* as an MMIO access. * as an MMIO access.
*/ */
static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
bool write_fault,
pte_t *out_entry, pte_t *out_buddy) pte_t *out_entry, pte_t *out_buddy)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
...@@ -558,7 +560,8 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) ...@@ -558,7 +560,8 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
/* XXXKYMA: Must be called with interrupts disabled */ /* XXXKYMA: Must be called with interrupts disabled */
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu,
bool write_fault)
{ {
unsigned long gpa; unsigned long gpa;
kvm_pfn_t pfn0, pfn1; kvm_pfn_t pfn0, pfn1;
...@@ -576,10 +579,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, ...@@ -576,10 +579,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
gpa = KVM_GUEST_CPHYSADDR(badvaddr & (PAGE_MASK << 1)); gpa = KVM_GUEST_CPHYSADDR(badvaddr & (PAGE_MASK << 1));
vaddr = badvaddr & (PAGE_MASK << 1); vaddr = badvaddr & (PAGE_MASK << 1);
if (kvm_mips_map_page(vcpu, gpa, &pte_gpa[0], NULL) < 0) if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[0], NULL) < 0)
return -1; return -1;
if (kvm_mips_map_page(vcpu, gpa | PAGE_SIZE, &pte_gpa[1], NULL) < 0) if (kvm_mips_map_page(vcpu, gpa | PAGE_SIZE, write_fault, &pte_gpa[1],
NULL) < 0)
return -1; return -1;
pfn0 = pte_pfn(pte_gpa[0]); pfn0 = pte_pfn(pte_gpa[0]);
...@@ -604,7 +608,8 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, ...@@ -604,7 +608,8 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
struct kvm_mips_tlb *tlb, struct kvm_mips_tlb *tlb,
unsigned long gva) unsigned long gva,
bool write_fault)
{ {
kvm_pfn_t pfn; kvm_pfn_t pfn;
long tlb_lo = 0; long tlb_lo = 0;
...@@ -621,8 +626,8 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, ...@@ -621,8 +626,8 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
tlb_lo = tlb->tlb_lo[idx]; tlb_lo = tlb->tlb_lo[idx];
/* Find host PFN */ /* Find host PFN */
if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo), &pte_gpa, if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo), write_fault,
NULL) < 0) &pte_gpa, NULL) < 0)
return -1; return -1;
pfn = pte_pfn(pte_gpa); pfn = pte_pfn(pte_gpa);
...@@ -757,7 +762,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, ...@@ -757,7 +762,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
int index; int index;
if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu) < 0) if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
return KVM_MIPS_GPA; return KVM_MIPS_GPA;
} else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
...@@ -774,7 +779,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, ...@@ -774,7 +779,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
if (write && !TLB_IS_DIRTY(*tlb, gva)) if (write && !TLB_IS_DIRTY(*tlb, gva))
return KVM_MIPS_TLBMOD; return KVM_MIPS_TLBMOD;
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva)) if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
return KVM_MIPS_GPA; return KVM_MIPS_GPA;
} else { } else {
return KVM_MIPS_GVA; return KVM_MIPS_GVA;
......
...@@ -159,7 +159,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) ...@@ -159,7 +159,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
* into the shadow host TLB * into the shadow host TLB
*/ */
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
else { else {
...@@ -172,7 +172,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) ...@@ -172,7 +172,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
* not expect to ever get them * not expect to ever get them
*/ */
if (kvm_mips_handle_kseg0_tlb_fault if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { (vcpu->arch.host_cp0_badvaddr, vcpu, store) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment