Commit 690edec5 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-for-5.1' of...

Merge tag 'kvmarm-fixes-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-master

KVM/ARM fixes for 5.1

- Fix THP handling in the presence of pre-existing PTEs
- Honor request for PTE mappings even when THPs are available
- GICv4 performance improvement
- Take the srcu lock when writing to guest-controlled ITS data structures
- Reset the virtual PMU in preemptible context
- Various cleanups
parents e2788c4a 8324c3d5
...@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, ...@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
return ret; return ret;
} }
static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_write_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
static inline void *kvm_get_hyp_vector(void) static inline void *kvm_get_hyp_vector(void)
{ {
switch(read_cpuid_part()) { switch(read_cpuid_part()) {
......
...@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) ...@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
#define S2_PMD_MASK PMD_MASK #define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE #define S2_PMD_SIZE PMD_SIZE
#define S2_PUD_MASK PUD_MASK
#define S2_PUD_SIZE PUD_SIZE
static inline bool kvm_stage2_has_pmd(struct kvm *kvm) static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{ {
......
...@@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm, ...@@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
return ret; return ret;
} }
static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
const void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_write_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
#ifdef CONFIG_KVM_INDIRECT_VECTORS #ifdef CONFIG_KVM_INDIRECT_VECTORS
/* /*
* EL2 vectors can be mapped and rerouted in a number of ways, * EL2 vectors can be mapped and rerouted in a number of ways,
......
...@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
int ret = -EINVAL; int ret = -EINVAL;
bool loaded; bool loaded;
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
preempt_disable(); preempt_disable();
loaded = (vcpu->cpu != -1); loaded = (vcpu->cpu != -1);
if (loaded) if (loaded)
...@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.reset_state.reset = false; vcpu->arch.reset_state.reset = false;
} }
/* Reset PMU */
kvm_pmu_vcpu_reset(vcpu);
/* Default workaround setup is enabled (if supported) */ /* Default workaround setup is enabled (if supported) */
if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
......
...@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
} }
} }
if (used_lrs) { if (used_lrs || cpu_if->its_vpe.its_vm) {
int i; int i;
u32 elrsr; u32 elrsr;
...@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) ...@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
int i; int i;
if (used_lrs) { if (used_lrs || cpu_if->its_vpe.its_vm) {
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
for (i = 0; i < used_lrs; i++) for (i = 0; i < used_lrs; i++)
......
...@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn) ...@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
* @addr: IPA * @addr: IPA
* @pmd: pmd pointer for IPA * @pmd: pmd pointer for IPA
* *
* Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
* pages in the range dirty.
*/ */
static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
{ {
...@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) ...@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
* @addr: IPA * @addr: IPA
* @pud: pud pointer for IPA * @pud: pud pointer for IPA
* *
* Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
* pages in the range dirty.
*/ */
static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
{ {
...@@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, ...@@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM. * @kvm: The KVM struct pointer for the VM.
* *
* Allocates only the stage-2 HW PGD level table(s) (can support either full * Allocates only the stage-2 HW PGD level table(s) of size defined by
* 40-bit input addresses or limited to 32-bit input addresses). Clears the * stage2_pgd_size(kvm).
* allocated pages.
* *
* Note we don't need locking here as this is only called when the VM is * Note we don't need locking here as this is only called when the VM is
* created, which can only be done once. * created, which can only be done once.
...@@ -1067,11 +1064,11 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache ...@@ -1067,11 +1064,11 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
{ {
pmd_t *pmd, old_pmd; pmd_t *pmd, old_pmd;
retry:
pmd = stage2_get_pmd(kvm, cache, addr); pmd = stage2_get_pmd(kvm, cache, addr);
VM_BUG_ON(!pmd); VM_BUG_ON(!pmd);
old_pmd = *pmd; old_pmd = *pmd;
if (pmd_present(old_pmd)) {
/* /*
* Multiple vcpus faulting on the same PMD entry, can * Multiple vcpus faulting on the same PMD entry, can
* lead to them sequentially updating the PMD with the * lead to them sequentially updating the PMD with the
...@@ -1086,6 +1083,24 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache ...@@ -1086,6 +1083,24 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
if (pmd_val(old_pmd) == pmd_val(*new_pmd)) if (pmd_val(old_pmd) == pmd_val(*new_pmd))
return 0; return 0;
if (pmd_present(old_pmd)) {
/*
* If we already have PTE level mapping for this block,
* we must unmap it to avoid inconsistent TLB state and
* leaking the table page. We could end up in this situation
* if the memory slot was marked for dirty logging and was
* reverted, leaving PTE level mappings for the pages accessed
* during the period. So, unmap the PTE level mapping for this
* block and retry, as we could have released the upper level
* table in the process.
*
* Normal THP split/merge follows mmu_notifier callbacks and do
* get handled accordingly.
*/
if (!pmd_thp_or_huge(old_pmd)) {
unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
goto retry;
}
/* /*
* Mapping in huge pages should only happen through a * Mapping in huge pages should only happen through a
* fault. If a page is merged into a transparent huge * fault. If a page is merged into a transparent huge
...@@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache ...@@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
* should become splitting first, unmapped, merged, * should become splitting first, unmapped, merged,
* and mapped back in on-demand. * and mapped back in on-demand.
*/ */
VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
pmd_clear(pmd); pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_tlb_flush_vmid_ipa(kvm, addr);
} else { } else {
...@@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac ...@@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
{ {
pud_t *pudp, old_pud; pud_t *pudp, old_pud;
retry:
pudp = stage2_get_pud(kvm, cache, addr); pudp = stage2_get_pud(kvm, cache, addr);
VM_BUG_ON(!pudp); VM_BUG_ON(!pudp);
...@@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac ...@@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
/* /*
* A large number of vcpus faulting on the same stage 2 entry, * A large number of vcpus faulting on the same stage 2 entry,
* can lead to a refault due to the * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
* stage2_pud_clear()/tlb_flush(). Skip updating the page * Skip updating the page tables if there is no change.
* tables if there is no change.
*/ */
if (pud_val(old_pud) == pud_val(*new_pudp)) if (pud_val(old_pud) == pud_val(*new_pudp))
return 0; return 0;
if (stage2_pud_present(kvm, old_pud)) { if (stage2_pud_present(kvm, old_pud)) {
/*
* If we already have table level mapping for this block, unmap
* the range for this block and retry.
*/
if (!stage2_pud_huge(kvm, old_pud)) {
unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
goto retry;
}
WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
stage2_pud_clear(kvm, pudp); stage2_pud_clear(kvm, pudp);
kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_tlb_flush_vmid_ipa(kvm, addr);
} else { } else {
...@@ -1455,8 +1479,6 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud, ...@@ -1455,8 +1479,6 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
* @pgd: pointer to pgd entry * @pgd: pointer to pgd entry
* @addr: range start address * @addr: range start address
* @end: range end address * @end: range end address
*
* Process PUD entries, for a huge PUD we cause a panic.
*/ */
static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd, static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t addr, phys_addr_t end) phys_addr_t addr, phys_addr_t end)
...@@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address, ...@@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address,
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
} }
static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
unsigned long hva) unsigned long hva,
unsigned long map_size)
{ {
gpa_t gpa_start; gpa_t gpa_start;
hva_t uaddr_start, uaddr_end; hva_t uaddr_start, uaddr_end;
...@@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, ...@@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
/* /*
* Pages belonging to memslots that don't have the same alignment * Pages belonging to memslots that don't have the same alignment
* within a PMD for userspace and IPA cannot be mapped with stage-2 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
* PMD entries, because we'll end up mapping the wrong pages. * PMD/PUD entries, because we'll end up mapping the wrong pages.
* *
* Consider a layout like the following: * Consider a layout like the following:
* *
* memslot->userspace_addr: * memslot->userspace_addr:
* +-----+--------------------+--------------------+---+ * +-----+--------------------+--------------------+---+
* |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz| * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
* +-----+--------------------+--------------------+---+ * +-----+--------------------+--------------------+---+
* *
* memslot->base_gfn << PAGE_SIZE: * memslot->base_gfn << PAGE_SIZE:
* +---+--------------------+--------------------+-----+ * +---+--------------------+--------------------+-----+
* |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz| * |abc|def Stage-2 block | Stage-2 block |tvxyz|
* +---+--------------------+--------------------+-----+ * +---+--------------------+--------------------+-----+
* *
* If we create those stage-2 PMDs, we'll end up with this incorrect * If we create those stage-2 blocks, we'll end up with this incorrect
* mapping: * mapping:
* d -> f * d -> f
* e -> g * e -> g
* f -> h * f -> h
*/ */
if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK)) if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
return false; return false;
/* /*
* Next, let's make sure we're not trying to map anything not covered * Next, let's make sure we're not trying to map anything not covered
* by the memslot. This means we have to prohibit PMD size mappings * by the memslot. This means we have to prohibit block size mappings
* for the beginning and end of a non-PMD aligned and non-PMD sized * for the beginning and end of a non-block aligned and non-block sized
* memory slot (illustrated by the head and tail parts of the * memory slot (illustrated by the head and tail parts of the
* userspace view above containing pages 'abcde' and 'xyz', * userspace view above containing pages 'abcde' and 'xyz',
* respectively). * respectively).
...@@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, ...@@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
* userspace_addr or the base_gfn, as both are equally aligned (per * userspace_addr or the base_gfn, as both are equally aligned (per
* the check above) and equally sized. * the check above) and equally sized.
*/ */
return (hva & S2_PMD_MASK) >= uaddr_start && return (hva & ~(map_size - 1)) >= uaddr_start &&
(hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end; (hva & ~(map_size - 1)) + map_size <= uaddr_end;
} }
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...@@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT; return -EFAULT;
} }
if (!fault_supports_stage2_pmd_mappings(memslot, hva))
force_pte = true;
if (logging_active)
force_pte = true;
/* Let's check if we will get back a huge page backed by hugetlbfs */ /* Let's check if we will get back a huge page backed by hugetlbfs */
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
vma = find_vma_intersection(current->mm, hva, hva + 1); vma = find_vma_intersection(current->mm, hva, hva + 1);
...@@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} }
vma_pagesize = vma_kernel_pagesize(vma); vma_pagesize = vma_kernel_pagesize(vma);
if (logging_active ||
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
}
/* /*
* The stage2 has a minimum of 2 level table (For arm64 see * The stage2 has a minimum of 2 level table (For arm64 see
* kvm_arm_setup_stage2()). Hence, we are guaranteed that we can * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
...@@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* As for PUD huge maps, we must make sure that we have at least * As for PUD huge maps, we must make sure that we have at least
* 3 levels, i.e, PMD is not folded. * 3 levels, i.e, PMD is not folded.
*/ */
if ((vma_pagesize == PMD_SIZE || if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
!force_pte) {
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
}
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */ /* We need minimum second+third level pages */
......
...@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, ...@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
u64 indirect_ptr, type = GITS_BASER_TYPE(baser); u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
int esz = GITS_BASER_ENTRY_SIZE(baser); int esz = GITS_BASER_ENTRY_SIZE(baser);
int index; int index, idx;
gfn_t gfn; gfn_t gfn;
bool ret;
switch (type) { switch (type) {
case GITS_BASER_TYPE_DEVICE: case GITS_BASER_TYPE_DEVICE:
...@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, ...@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (eaddr) if (eaddr)
*eaddr = addr; *eaddr = addr;
return kvm_is_visible_gfn(its->dev->kvm, gfn);
goto out;
} }
/* calculate and check the index into the 1st level */ /* calculate and check the index into the 1st level */
...@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, ...@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (eaddr) if (eaddr)
*eaddr = indirect_ptr; *eaddr = indirect_ptr;
return kvm_is_visible_gfn(its->dev->kvm, gfn);
out:
idx = srcu_read_lock(&its->dev->kvm->srcu);
ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
srcu_read_unlock(&its->dev->kvm->srcu, idx);
return ret;
} }
static int vgic_its_alloc_collection(struct vgic_its *its, static int vgic_its_alloc_collection(struct vgic_its *its,
...@@ -1729,7 +1736,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev) ...@@ -1729,7 +1736,7 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
kfree(its); kfree(its);
} }
int vgic_its_has_attr_regs(struct kvm_device *dev, static int vgic_its_has_attr_regs(struct kvm_device *dev,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
const struct vgic_register_region *region; const struct vgic_register_region *region;
...@@ -1750,7 +1757,7 @@ int vgic_its_has_attr_regs(struct kvm_device *dev, ...@@ -1750,7 +1757,7 @@ int vgic_its_has_attr_regs(struct kvm_device *dev,
return 0; return 0;
} }
int vgic_its_attr_regs_access(struct kvm_device *dev, static int vgic_its_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr, struct kvm_device_attr *attr,
u64 *reg, bool is_write) u64 *reg, bool is_write)
{ {
...@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, ...@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id; ite->collection->collection_id;
val = cpu_to_le64(val); val = cpu_to_le64(val);
return kvm_write_guest(kvm, gpa, &val, ite_esz); return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
} }
/** /**
...@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, ...@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1)); (dev->num_eventid_bits - 1));
val = cpu_to_le64(val); val = cpu_to_le64(val);
return kvm_write_guest(kvm, ptr, &val, dte_esz); return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
} }
/** /**
...@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its, ...@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id); collection->collection_id);
val = cpu_to_le64(val); val = cpu_to_le64(val);
return kvm_write_guest(its->dev->kvm, gpa, &val, esz); return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
} }
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
...@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) ...@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/ */
val = 0; val = 0;
BUG_ON(cte_esz > sizeof(val)); BUG_ON(cte_esz > sizeof(val));
ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz); ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret; return ret;
} }
......
...@@ -358,7 +358,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) ...@@ -358,7 +358,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
if (status) { if (status) {
/* clear consumed data */ /* clear consumed data */
val &= ~(1 << bit_nr); val &= ~(1 << bit_nr);
ret = kvm_write_guest(kvm, ptr, &val, 1); ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) ...@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
else else
val &= ~(1 << bit_nr); val &= ~(1 << bit_nr);
ret = kvm_write_guest(kvm, ptr, &val, 1); ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
* either observe the new interrupt before or after doing this check, * either observe the new interrupt before or after doing this check,
* and introducing additional synchronization mechanism doesn't change * and introducing additional synchronization mechanism doesn't change
* this. * this.
*
* Note that we still need to go through the whole thing if anything
* can be directly injected (GICv4).
*/ */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
!vgic_supports_direct_msis(vcpu->kvm))
return; return;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu); vgic_flush_lr_state(vcpu);
raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
}
if (can_access_vgic_from_kernel()) if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu); vgic_restore_state(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment