Commit 8e73485c authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: add vcpu-specific functions to read/write/translate GFNs

We need to hide SMRAM from guests not running in SMM.  Therefore, all
uses of kvm_read_guest* and kvm_write_guest* must be changed to use
different address spaces, depending on whether the VCPU is in system
management mode.  We need to introduce a new family of functions for
this purpose.

For now, the VCPU-based functions have the same behavior as the
existing per-VM ones, they just accept a different type for the
first argument.  Later however they will be changed to use one of many
"struct kvm_memslots" stored in struct kvm, through an architecture hook.
VM-based functions will unconditionally use the first memslots pointer.

Whenever possible, this patch introduces slot-based functions with an
__ prefix, with two wrappers for generic and vcpu-based actions.
The exceptions are kvm_read_guest and kvm_write_guest, which are copied
into the new functions kvm_vcpu_read_guest and kvm_vcpu_write_guest.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 660a5d51
...@@ -471,6 +471,11 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) ...@@ -471,6 +471,11 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
|| lockdep_is_held(&kvm->slots_lock)); || lockdep_is_held(&kvm->slots_lock));
} }
static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
{
return kvm_memslots(vcpu->kvm);
}
static inline struct kvm_memory_slot * static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id) id_to_memslot(struct kvm_memslots *slots, int id)
{ {
...@@ -576,6 +581,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); ...@@ -576,6 +581,25 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
int len);
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
unsigned long len);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
unsigned long len);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
int offset, int len);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target); int kvm_vcpu_yield_to(struct kvm_vcpu *target);
......
...@@ -1100,6 +1100,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) ...@@ -1100,6 +1100,11 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_memslot); EXPORT_SYMBOL_GPL(gfn_to_memslot);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
}
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{ {
struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
...@@ -1175,6 +1180,12 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) ...@@ -1175,6 +1180,12 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_hva); EXPORT_SYMBOL_GPL(gfn_to_hva);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
/* /*
* If writable is set to false, the hva returned by this function is only * If writable is set to false, the hva returned by this function is only
* allowed to be read. * allowed to be read.
...@@ -1197,6 +1208,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) ...@@ -1197,6 +1208,13 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
return gfn_to_hva_memslot_prot(slot, gfn, writable); return gfn_to_hva_memslot_prot(slot, gfn, writable);
} }
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm, static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int write, struct page **page) unsigned long start, int write, struct page **page)
{ {
...@@ -1412,12 +1430,24 @@ pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) ...@@ -1412,12 +1430,24 @@ pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic); EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{ {
return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
} }
EXPORT_SYMBOL_GPL(gfn_to_pfn); EXPORT_SYMBOL_GPL(gfn_to_pfn);
pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages) struct page **pages, int nr_pages)
{ {
...@@ -1458,6 +1488,16 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) ...@@ -1458,6 +1488,16 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(gfn_to_page); EXPORT_SYMBOL_GPL(gfn_to_page);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
pfn_t pfn;
pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
return kvm_pfn_to_page(pfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_page);
void kvm_release_page_clean(struct page *page) void kvm_release_page_clean(struct page *page)
{ {
WARN_ON(is_error_page(page)); WARN_ON(is_error_page(page));
...@@ -1520,13 +1560,13 @@ static int next_segment(unsigned long len, int offset) ...@@ -1520,13 +1560,13 @@ static int next_segment(unsigned long len, int offset)
return len; return len;
} }
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
int len) void *data, int offset, int len)
{ {
int r; int r;
unsigned long addr; unsigned long addr;
addr = gfn_to_hva_prot(kvm, gfn, NULL); addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr)) if (kvm_is_error_hva(addr))
return -EFAULT; return -EFAULT;
r = __copy_from_user(data, (void __user *)addr + offset, len); r = __copy_from_user(data, (void __user *)addr + offset, len);
...@@ -1534,8 +1574,25 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, ...@@ -1534,8 +1574,25 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page); EXPORT_SYMBOL_GPL(kvm_read_guest_page);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{ {
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
...@@ -1556,15 +1613,33 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) ...@@ -1556,15 +1613,33 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
} }
EXPORT_SYMBOL_GPL(kvm_read_guest); EXPORT_SYMBOL_GPL(kvm_read_guest);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
unsigned long len)
{ {
int r;
unsigned long addr;
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa); int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
addr = gfn_to_hva_prot(kvm, gfn, NULL); static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr)) if (kvm_is_error_hva(addr))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -1574,16 +1649,35 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, ...@@ -1574,16 +1649,35 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
EXPORT_SYMBOL(kvm_read_guest_atomic);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
int offset, int len) unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
int offset = offset_in_page(gpa);
return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_atomic);
int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
void *data, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
int offset = offset_in_page(gpa);
return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)
{ {
int r; int r;
struct kvm_memory_slot *memslot;
unsigned long addr; unsigned long addr;
memslot = gfn_to_memslot(kvm, gfn);
addr = gfn_to_hva_memslot(memslot, gfn); addr = gfn_to_hva_memslot(memslot, gfn);
if (kvm_is_error_hva(addr)) if (kvm_is_error_hva(addr))
return -EFAULT; return -EFAULT;
...@@ -1593,8 +1687,25 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, ...@@ -1593,8 +1687,25 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
mark_page_dirty_in_slot(memslot, gfn); mark_page_dirty_in_slot(memslot, gfn);
return 0; return 0;
} }
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
const void *data, int offset, int len)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
return __kvm_write_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page); EXPORT_SYMBOL_GPL(kvm_write_guest_page);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __kvm_write_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len) unsigned long len)
{ {
...@@ -1616,6 +1727,27 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ...@@ -1616,6 +1727,27 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
} }
EXPORT_SYMBOL_GPL(kvm_write_guest); EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len) gpa_t gpa, unsigned long len)
{ {
...@@ -1750,6 +1882,15 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) ...@@ -1750,6 +1882,15 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
} }
EXPORT_SYMBOL_GPL(mark_page_dirty); EXPORT_SYMBOL_GPL(mark_page_dirty);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
mark_page_dirty_in_slot(memslot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{ {
if (kvm_arch_vcpu_runnable(vcpu)) { if (kvm_arch_vcpu_runnable(vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment