Commit 8f964525 authored by Andrew Honig's avatar Andrew Honig Committed by Gleb Natapov

KVM: Allow cross page reads and writes from cached translations.

This patch adds support for kvm_gfn_to_hva_cache_init functions for
reads and writes that will cross a page.  If the range falls within
the same memslot, then this will be a fast operation.  If the range
is split between two memslots, then the slower kvm_read_guest and
kvm_write_guest are used.

Tested: Test against kvm_clock unit tests.
Signed-off-by: default avatarAndrew Honig <ahonig@google.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 09a6e1f4
...@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) ...@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
if (!pv_eoi_enabled(vcpu)) if (!pv_eoi_enabled(vcpu))
return 0; return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
addr); addr, sizeof(u8));
} }
void kvm_lapic_init(void) void kvm_lapic_init(void)
......
...@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) ...@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0; return 0;
} }
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
sizeof(u32)))
return 1; return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
...@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
gpa_offset = data & ~(PAGE_MASK | 1); gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL)) &vcpu->arch.pv_time, data & ~1ULL,
sizeof(struct pvclock_vcpu_time_info)))
vcpu->arch.pv_time_enabled = false; vcpu->arch.pv_time_enabled = false;
else else
vcpu->arch.pv_time_enabled = true; vcpu->arch.pv_time_enabled = true;
...@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS)) data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1; return 1;
vcpu->arch.st.msr_val = data; vcpu->arch.st.msr_val = data;
......
...@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ...@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len); void *data, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa); gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
......
...@@ -71,6 +71,7 @@ struct gfn_to_hva_cache { ...@@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
u64 generation; u64 generation;
gpa_t gpa; gpa_t gpa;
unsigned long hva; unsigned long hva;
unsigned long len;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
}; };
......
...@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ...@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
} }
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa) gpa_t gpa, unsigned long len)
{ {
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
int offset = offset_in_page(gpa); int offset = offset_in_page(gpa);
gfn_t gfn = gpa >> PAGE_SHIFT; gfn_t start_gfn = gpa >> PAGE_SHIFT;
gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
gfn_t nr_pages_avail;
ghc->gpa = gpa; ghc->gpa = gpa;
ghc->generation = slots->generation; ghc->generation = slots->generation;
ghc->memslot = gfn_to_memslot(kvm, gfn); ghc->len = len;
ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); ghc->memslot = gfn_to_memslot(kvm, start_gfn);
if (!kvm_is_error_hva(ghc->hva)) ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
ghc->hva += offset; ghc->hva += offset;
else } else {
/*
* If the requested region crosses two memslots, we still
* verify that the entire region is valid here.
*/
while (start_gfn <= end_gfn) {
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
&nr_pages_avail);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT; return -EFAULT;
start_gfn += nr_pages_avail;
}
/* Use the slow path for cross page reads and writes. */
ghc->memslot = NULL;
}
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
...@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ...@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
int r; int r;
BUG_ON(len > ghc->len);
if (slots->generation != ghc->generation) if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
if (unlikely(!ghc->memslot))
return kvm_write_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva)) if (kvm_is_error_hva(ghc->hva))
return -EFAULT; return -EFAULT;
...@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ...@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memslots *slots = kvm_memslots(kvm);
int r; int r;
BUG_ON(len > ghc->len);
if (slots->generation != ghc->generation) if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
if (unlikely(!ghc->memslot))
return kvm_read_guest(kvm, ghc->gpa, data, len);
if (kvm_is_error_hva(ghc->hva)) if (kvm_is_error_hva(ghc->hva))
return -EFAULT; return -EFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment