Commit c01c55a3 authored by Paul Durrant's avatar Paul Durrant Committed by Sean Christopherson

KVM: x86/xen: separate initialization of shared_info cache and content

A subsequent patch will allow shared_info to be initialized using either a
GPA or a user-space (i.e. VMM) HVA. To make that patch cleaner, separate
the initialization of the shared_info content from the activation of the
pfncache.
Signed-off-by: default avatarPaul Durrant <pdurrant@amazon.com>
Reviewed-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Link: https://lore.kernel.org/r/20240215152916.1158-11-paul@xen.orgSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 721f5b0d
...@@ -34,41 +34,32 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r); ...@@ -34,41 +34,32 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ); DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) static int kvm_xen_shared_info_init(struct kvm *kvm)
{ {
struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
struct pvclock_wall_clock *wc; struct pvclock_wall_clock *wc;
gpa_t gpa = gfn_to_gpa(gfn);
u32 *wc_sec_hi; u32 *wc_sec_hi;
u32 wc_version; u32 wc_version;
u64 wall_nsec; u64 wall_nsec;
int ret = 0; int ret = 0;
int idx = srcu_read_lock(&kvm->srcu); int idx = srcu_read_lock(&kvm->srcu);
if (gfn == KVM_XEN_INVALID_GFN) { read_lock_irq(&gpc->lock);
kvm_gpc_deactivate(gpc); while (!kvm_gpc_check(gpc, PAGE_SIZE)) {
goto out; read_unlock_irq(&gpc->lock);
}
do { ret = kvm_gpc_refresh(gpc, PAGE_SIZE);
ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
if (ret) if (ret)
goto out; goto out;
/*
* This code mirrors kvm_write_wall_clock() except that it writes
* directly through the pfn cache and doesn't mark the page dirty.
*/
wall_nsec = kvm_get_wall_clock_epoch(kvm);
/* It could be invalid again already, so we need to check */
read_lock_irq(&gpc->lock); read_lock_irq(&gpc->lock);
}
if (gpc->valid) /*
break; * This code mirrors kvm_write_wall_clock() except that it writes
* directly through the pfn cache and doesn't mark the page dirty.
read_unlock_irq(&gpc->lock); */
} while (1); wall_nsec = kvm_get_wall_clock_epoch(kvm);
/* Paranoia checks on the 32-bit struct layout */ /* Paranoia checks on the 32-bit struct layout */
BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900); BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
...@@ -639,12 +630,30 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data) ...@@ -639,12 +630,30 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
} }
break; break;
case KVM_XEN_ATTR_TYPE_SHARED_INFO: case KVM_XEN_ATTR_TYPE_SHARED_INFO: {
int idx;
mutex_lock(&kvm->arch.xen.xen_lock); mutex_lock(&kvm->arch.xen.xen_lock);
r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
idx = srcu_read_lock(&kvm->srcu);
if (data->u.shared_info.gfn == KVM_XEN_INVALID_GFN) {
kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
r = 0;
} else {
r = kvm_gpc_activate(&kvm->arch.xen.shinfo_cache,
gfn_to_gpa(data->u.shared_info.gfn),
PAGE_SIZE);
}
srcu_read_unlock(&kvm->srcu, idx);
if (!r && kvm->arch.xen.shinfo_cache.active)
r = kvm_xen_shared_info_init(kvm);
mutex_unlock(&kvm->arch.xen.xen_lock); mutex_unlock(&kvm->arch.xen.xen_lock);
break; break;
}
case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR: case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
if (data->u.vector && data->u.vector < 0x10) if (data->u.vector && data->u.vector < 0x10)
r = -EINVAL; r = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment