Commit a35381e1 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

KVM: Remove obsolete kvm_unmap_hva notifier backend

kvm_unmap_hva is long gone, and we only have kvm_unmap_hva_range to
deal with. Drop the now obsolete code.

Fixes: fb1522e0 ("KVM: update to new mmu_notifier semantic v2")
Cc: James Hogan <jhogan@kernel.org>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
parent 7d14919c
...@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
......
...@@ -357,7 +357,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -357,7 +357,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events); struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
......
...@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, ...@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
bool write); bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
......
...@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, ...@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1; return 1;
} }
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
unsigned long end = hva + PAGE_SIZE;
handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
kvm_mips_callbacks->flush_shadow_all(kvm);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{ {
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
......
...@@ -1450,7 +1450,6 @@ asmlinkage void kvm_spurious_fault(void); ...@@ -1450,7 +1450,6 @@ asmlinkage void kvm_spurious_fault(void);
____kvm_handle_fault_on_reboot(insn, "") ____kvm_handle_fault_on_reboot(insn, "")
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
......
...@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ...@@ -1853,11 +1853,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
} }
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{ {
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
......
...@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat ...@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
return 0; return 0;
} }
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
unsigned long end = hva + PAGE_SIZE;
if (!kvm->arch.pgd)
return 0;
trace_kvm_unmap_hva(hva);
handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
return 0;
}
int kvm_unmap_hva_range(struct kvm *kvm, int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate, ...@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->vcpu_pc, __entry->instr, __entry->cpsr) __entry->vcpu_pc, __entry->instr, __entry->cpsr)
); );
TRACE_EVENT(kvm_unmap_hva,
TP_PROTO(unsigned long hva),
TP_ARGS(hva),
TP_STRUCT__entry(
__field( unsigned long, hva )
),
TP_fast_assign(
__entry->hva = hva;
),
TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
);
TRACE_EVENT(kvm_unmap_hva_range, TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end), TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end), TP_ARGS(start, end),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment