Commit fce92dce authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: filter out the mmio pfn from the fault pfn

If the page fault is caused by mmio, the gfn can not be found in memslots, and
'bad_pfn' is returned on gfn_to_hva path, so we can use 'bad_pfn' to identify
the mmio page fault.
And, to clarify the meaning of mmio pfn, we return fault page instead of bad
page when the gfn is not allowd to prefetch
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent c3707958
...@@ -2085,8 +2085,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2085,8 +2085,8 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot) { if (!slot) {
get_page(bad_page); get_page(fault_page);
return page_to_pfn(bad_page); return page_to_pfn(fault_page);
} }
hva = gfn_to_hva_memslot(slot, gfn); hva = gfn_to_hva_memslot(slot, gfn);
......
...@@ -327,12 +327,17 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) ...@@ -327,12 +327,17 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
extern struct page *bad_page; extern struct page *bad_page;
extern struct page *fault_page;
extern pfn_t bad_pfn; extern pfn_t bad_pfn;
extern pfn_t fault_pfn;
int is_error_page(struct page *page); int is_error_page(struct page *page);
int is_error_pfn(pfn_t pfn); int is_error_pfn(pfn_t pfn);
int is_hwpoison_pfn(pfn_t pfn); int is_hwpoison_pfn(pfn_t pfn);
int is_fault_pfn(pfn_t pfn); int is_fault_pfn(pfn_t pfn);
int is_noslot_pfn(pfn_t pfn);
int is_invalid_pfn(pfn_t pfn);
int kvm_is_error_hva(unsigned long addr); int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm, int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
......
...@@ -101,8 +101,8 @@ static bool largepages_enabled = true; ...@@ -101,8 +101,8 @@ static bool largepages_enabled = true;
static struct page *hwpoison_page; static struct page *hwpoison_page;
static pfn_t hwpoison_pfn; static pfn_t hwpoison_pfn;
static struct page *fault_page; struct page *fault_page;
static pfn_t fault_pfn; pfn_t fault_pfn;
inline int kvm_is_mmio_pfn(pfn_t pfn) inline int kvm_is_mmio_pfn(pfn_t pfn)
{ {
...@@ -931,6 +931,18 @@ int is_fault_pfn(pfn_t pfn) ...@@ -931,6 +931,18 @@ int is_fault_pfn(pfn_t pfn)
} }
EXPORT_SYMBOL_GPL(is_fault_pfn); EXPORT_SYMBOL_GPL(is_fault_pfn);
int is_noslot_pfn(pfn_t pfn)
{
return pfn == bad_pfn;
}
EXPORT_SYMBOL_GPL(is_noslot_pfn);
int is_invalid_pfn(pfn_t pfn)
{
return pfn == hwpoison_pfn || pfn == fault_pfn;
}
EXPORT_SYMBOL_GPL(is_invalid_pfn);
static inline unsigned long bad_hva(void) static inline unsigned long bad_hva(void)
{ {
return PAGE_OFFSET; return PAGE_OFFSET;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment