Commit b5396271 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-riscv-6.5-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.5

- Redirect AMO load/store misaligned traps to KVM guest
- Trap-n-emulate AIA in-kernel irqchip for KVM guest
- Svnapot support for KVM Guest
parents a443e260 07f225b5
......@@ -82,7 +82,9 @@
#define EXC_INST_ACCESS 1
#define EXC_INST_ILLEGAL 2
#define EXC_BREAKPOINT 3
#define EXC_LOAD_MISALIGNED 4
#define EXC_LOAD_ACCESS 5
#define EXC_STORE_MISALIGNED 6
#define EXC_STORE_ACCESS 7
#define EXC_SYSCALL 8
#define EXC_HYPERVISOR_SYSCALL 9
......
......@@ -20,6 +20,33 @@ struct kvm_aia {
/* In-kernel irqchip initialized */
bool initialized;
/* Virtualization mode (Emulation, HW Accelerated, or Auto) */
u32 mode;
/* Number of MSIs */
u32 nr_ids;
/* Number of wired IRQs */
u32 nr_sources;
/* Number of group bits in IMSIC address */
u32 nr_group_bits;
/* Position of group bits in IMSIC address */
u32 nr_group_shift;
/* Number of hart bits in IMSIC address */
u32 nr_hart_bits;
/* Number of guest bits in IMSIC address */
u32 nr_guest_bits;
/* Guest physical address of APLIC */
gpa_t aplic_addr;
/* Internal state of APLIC */
void *aplic_state;
};
struct kvm_vcpu_aia_csr {
......@@ -38,25 +65,53 @@ struct kvm_vcpu_aia {
/* CPU AIA CSR context upon Guest VCPU reset */
struct kvm_vcpu_aia_csr guest_reset_csr;
/* Guest physical address of IMSIC for this VCPU */
gpa_t imsic_addr;
/* HART index of IMSIC extacted from guest physical address */
u32 hart_index;
/* Internal state of IMSIC for this VCPU */
void *imsic_state;
};
#define KVM_RISCV_AIA_UNDEF_ADDR (-1)
#define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
#define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
extern unsigned int kvm_riscv_aia_nr_hgei;
extern unsigned int kvm_riscv_aia_max_ids;
DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
#define kvm_riscv_aia_available() \
static_branch_unlikely(&kvm_riscv_aia_available)
extern struct kvm_device_ops kvm_riscv_aia_device_ops;
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
static inline int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu,
unsigned long isel,
unsigned long *val,
unsigned long new_val,
unsigned long wr_mask)
{
return 0;
}
int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask);
int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
bool write, unsigned long *val);
int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
u32 guest_index, u32 offset, u32 iid);
int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
int kvm_riscv_aia_aplic_init(struct kvm *kvm);
void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
#ifdef CONFIG_32BIT
void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
......@@ -93,31 +148,23 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
static inline int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
{
return 1;
}
static inline void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
{
}
static inline int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
{
return 0;
}
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
static inline void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
{
}
int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
u32 guest_index, u32 iid);
int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
static inline void kvm_riscv_aia_init_vm(struct kvm *kvm)
{
}
void kvm_riscv_aia_init_vm(struct kvm *kvm);
void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
static inline void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
{
}
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
void __iomem **hgei_va, phys_addr_t *hgei_pa);
void kvm_riscv_aia_free_hgei(int cpu, int hgei);
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
void kvm_riscv_aia_enable(void);
void kvm_riscv_aia_disable(void);
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
* Copyright (C) 2022 Ventana Micro Systems Inc.
*/
#ifndef __KVM_RISCV_AIA_IMSIC_H
#define __KVM_RISCV_AIA_IMSIC_H
#include <linux/bitops.h>
#define APLIC_MAX_IDC BIT(14)
#define APLIC_MAX_SOURCE 1024
#define APLIC_DOMAINCFG 0x0000
#define APLIC_DOMAINCFG_RDONLY 0x80000000
#define APLIC_DOMAINCFG_IE BIT(8)
#define APLIC_DOMAINCFG_DM BIT(2)
#define APLIC_DOMAINCFG_BE BIT(0)
#define APLIC_SOURCECFG_BASE 0x0004
#define APLIC_SOURCECFG_D BIT(10)
#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
#define APLIC_SOURCECFG_SM_MASK 0x00000007
#define APLIC_SOURCECFG_SM_INACTIVE 0x0
#define APLIC_SOURCECFG_SM_DETACH 0x1
#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
#define APLIC_IRQBITS_PER_REG 32
#define APLIC_SETIP_BASE 0x1c00
#define APLIC_SETIPNUM 0x1cdc
#define APLIC_CLRIP_BASE 0x1d00
#define APLIC_CLRIPNUM 0x1ddc
#define APLIC_SETIE_BASE 0x1e00
#define APLIC_SETIENUM 0x1edc
#define APLIC_CLRIE_BASE 0x1f00
#define APLIC_CLRIENUM 0x1fdc
#define APLIC_SETIPNUM_LE 0x2000
#define APLIC_SETIPNUM_BE 0x2004
#define APLIC_GENMSI 0x3000
#define APLIC_TARGET_BASE 0x3004
#define APLIC_TARGET_HART_IDX_SHIFT 18
#define APLIC_TARGET_HART_IDX_MASK 0x3fff
#define APLIC_TARGET_GUEST_IDX_SHIFT 12
#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
#define APLIC_TARGET_IPRIO_MASK 0xff
#define APLIC_TARGET_EIID_MASK 0x7ff
#endif
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
* Copyright (C) 2022 Ventana Micro Systems Inc.
*/
#ifndef __KVM_RISCV_AIA_IMSIC_H
#define __KVM_RISCV_AIA_IMSIC_H
#include <linux/types.h>
#include <asm/csr.h>
#define IMSIC_MMIO_PAGE_SHIFT 12
#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
#define IMSIC_MMIO_PAGE_LE 0x00
#define IMSIC_MMIO_PAGE_BE 0x04
#define IMSIC_MIN_ID 63
#define IMSIC_MAX_ID 2048
#define IMSIC_EIDELIVERY 0x70
#define IMSIC_EITHRESHOLD 0x72
#define IMSIC_EIP0 0x80
#define IMSIC_EIP63 0xbf
#define IMSIC_EIPx_BITS 32
#define IMSIC_EIE0 0xc0
#define IMSIC_EIE63 0xff
#define IMSIC_EIEx_BITS 32
#define IMSIC_FIRST IMSIC_EIDELIVERY
#define IMSIC_LAST IMSIC_EIE63
#define IMSIC_MMIO_SETIPNUM_LE 0x00
#define IMSIC_MMIO_SETIPNUM_BE 0x04
#endif
......@@ -27,6 +27,8 @@
#define KVM_VCPU_MAX_FEATURES 0
#define KVM_IRQCHIP_NUM_PINS 1024
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
......@@ -318,6 +320,8 @@ int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
void __kvm_riscv_unpriv_trap(void);
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
......
......@@ -14,9 +14,15 @@
#define KVM_SBI_VERSION_MAJOR 1
#define KVM_SBI_VERSION_MINOR 0
enum kvm_riscv_sbi_ext_status {
KVM_RISCV_SBI_EXT_UNINITIALIZED,
KVM_RISCV_SBI_EXT_AVAILABLE,
KVM_RISCV_SBI_EXT_UNAVAILABLE,
};
struct kvm_vcpu_sbi_context {
int return_handled;
bool extension_disabled[KVM_RISCV_SBI_EXT_MAX];
enum kvm_riscv_sbi_ext_status ext_status[KVM_RISCV_SBI_EXT_MAX];
};
struct kvm_vcpu_sbi_return {
......@@ -66,4 +72,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
#ifdef CONFIG_RISCV_PMU_SBI
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
#endif
#endif /* __RISCV_KVM_VCPU_SBI_H__ */
......@@ -15,6 +15,7 @@
#include <asm/bitsperlong.h>
#include <asm/ptrace.h>
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
......@@ -121,6 +122,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZICBOZ,
KVM_RISCV_ISA_EXT_ZBB,
KVM_RISCV_ISA_EXT_SSAIA,
KVM_RISCV_ISA_EXT_SVNAPOT,
KVM_RISCV_ISA_EXT_MAX,
};
......@@ -203,6 +205,77 @@ enum KVM_RISCV_SBI_EXT_ID {
#define KVM_REG_RISCV_SBI_MULTI_REG_LAST \
KVM_REG_RISCV_SBI_MULTI_REG(KVM_RISCV_SBI_EXT_MAX - 1)
/* Device Control API: RISC-V AIA */
#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
#define KVM_DEV_RISCV_APLIC_SIZE 0x4000
#define KVM_DEV_RISCV_APLIC_MAX_HARTS 0x4000
#define KVM_DEV_RISCV_IMSIC_ALIGN 0x1000
#define KVM_DEV_RISCV_IMSIC_SIZE 0x1000
#define KVM_DEV_RISCV_AIA_GRP_CONFIG 0
#define KVM_DEV_RISCV_AIA_CONFIG_MODE 0
#define KVM_DEV_RISCV_AIA_CONFIG_IDS 1
#define KVM_DEV_RISCV_AIA_CONFIG_SRCS 2
#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS 3
#define KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT 4
#define KVM_DEV_RISCV_AIA_CONFIG_HART_BITS 5
#define KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS 6
/*
* Modes of RISC-V AIA device:
* 1) EMUL (aka Emulation): Trap-n-emulate IMSIC
* 2) HWACCEL (aka HW Acceleration): Virtualize IMSIC using IMSIC guest files
* 3) AUTO (aka Automatic): Virtualize IMSIC using IMSIC guest files whenever
* available otherwise fallback to trap-n-emulation
*/
#define KVM_DEV_RISCV_AIA_MODE_EMUL 0
#define KVM_DEV_RISCV_AIA_MODE_HWACCEL 1
#define KVM_DEV_RISCV_AIA_MODE_AUTO 2
#define KVM_DEV_RISCV_AIA_IDS_MIN 63
#define KVM_DEV_RISCV_AIA_IDS_MAX 2048
#define KVM_DEV_RISCV_AIA_SRCS_MAX 1024
#define KVM_DEV_RISCV_AIA_GROUP_BITS_MAX 8
#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN 24
#define KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX 56
#define KVM_DEV_RISCV_AIA_HART_BITS_MAX 16
#define KVM_DEV_RISCV_AIA_GUEST_BITS_MAX 8
#define KVM_DEV_RISCV_AIA_GRP_ADDR 1
#define KVM_DEV_RISCV_AIA_ADDR_APLIC 0
#define KVM_DEV_RISCV_AIA_ADDR_IMSIC(__vcpu) (1 + (__vcpu))
#define KVM_DEV_RISCV_AIA_ADDR_MAX \
(1 + KVM_DEV_RISCV_APLIC_MAX_HARTS)
#define KVM_DEV_RISCV_AIA_GRP_CTRL 2
#define KVM_DEV_RISCV_AIA_CTRL_INIT 0
/*
* The device attribute type contains the memory mapped offset of the
* APLIC register (range 0x0000-0x3FFF) and it must be 4-byte aligned.
*/
#define KVM_DEV_RISCV_AIA_GRP_APLIC 3
/*
* The lower 12-bits of the device attribute type contains the iselect
* value of the IMSIC register (range 0x70-0xFF) whereas the higher order
* bits contains the VCPU id.
*/
#define KVM_DEV_RISCV_AIA_GRP_IMSIC 4
#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS 12
#define KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK \
((1U << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) - 1)
#define KVM_DEV_RISCV_AIA_IMSIC_MKATTR(__vcpu, __isel) \
(((__vcpu) << KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS) | \
((__isel) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK))
#define KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(__attr) \
((__attr) & KVM_DEV_RISCV_AIA_IMSIC_ISEL_MASK)
#define KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(__attr) \
((__attr) >> KVM_DEV_RISCV_AIA_IMSIC_ISEL_BITS)
/* One single KVM irqchip, ie. the AIA */
#define KVM_NR_IRQCHIPS 1
#endif
#endif /* __LINUX_KVM_RISCV_H */
......@@ -21,6 +21,10 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
depends on RISCV_SBI && MMU
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING
......
......@@ -27,3 +27,6 @@ kvm-y += vcpu_sbi_hsm.o
kvm-y += vcpu_timer.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o
kvm-y += aia.o
kvm-y += aia_device.o
kvm-y += aia_aplic.o
kvm-y += aia_imsic.o
......@@ -8,11 +8,49 @@
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/hwcap.h>
#include <asm/kvm_aia_imsic.h>
struct aia_hgei_control {
raw_spinlock_t lock;
unsigned long free_bitmap;
struct kvm_vcpu *owners[BITS_PER_LONG];
};
static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
static int hgei_parent_irq;
unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
static int aia_find_hgei(struct kvm_vcpu *owner)
{
int i, hgei;
unsigned long flags;
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
hgei = -1;
for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
if (hgctrl->owners[i] == owner) {
hgei = i;
break;
}
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
return hgei;
}
static void aia_set_hvictl(bool ext_irq_pending)
{
unsigned long hvictl;
......@@ -56,6 +94,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
int hgei;
unsigned long seip;
if (!kvm_riscv_aia_available())
......@@ -74,6 +113,10 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
hgei = aia_find_hgei(vcpu);
if (hgei > 0)
return !!(csr_read(CSR_HGEIP) & BIT(hgei));
return false;
}
......@@ -323,8 +366,6 @@ static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
return KVM_INSN_CONTINUE_NEXT_SEPC;
}
#define IMSIC_FIRST 0x70
#define IMSIC_LAST 0xff
int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
......@@ -348,6 +389,143 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
return KVM_INSN_EXIT_TO_USER_SPACE;
}
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
void __iomem **hgei_va, phys_addr_t *hgei_pa)
{
int ret = -ENOENT;
unsigned long flags;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
if (!kvm_riscv_aia_available() || !hgctrl)
return -ENODEV;
raw_spin_lock_irqsave(&hgctrl->lock, flags);
if (hgctrl->free_bitmap) {
ret = __ffs(hgctrl->free_bitmap);
hgctrl->free_bitmap &= ~BIT(ret);
hgctrl->owners[ret] = owner;
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
/* TODO: To be updated later by AIA IMSIC HW guest file support */
if (hgei_va)
*hgei_va = NULL;
if (hgei_pa)
*hgei_pa = 0;
return ret;
}
void kvm_riscv_aia_free_hgei(int cpu, int hgei)
{
unsigned long flags;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
if (!kvm_riscv_aia_available() || !hgctrl)
return;
raw_spin_lock_irqsave(&hgctrl->lock, flags);
if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
if (!(hgctrl->free_bitmap & BIT(hgei))) {
hgctrl->free_bitmap |= BIT(hgei);
hgctrl->owners[hgei] = NULL;
}
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
{
int hgei;
if (!kvm_riscv_aia_available())
return;
hgei = aia_find_hgei(owner);
if (hgei > 0) {
if (enable)
csr_set(CSR_HGEIE, BIT(hgei));
else
csr_clear(CSR_HGEIE, BIT(hgei));
}
}
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{
int i;
unsigned long hgei_mask, flags;
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
csr_clear(CSR_HGEIE, hgei_mask);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
if (hgctrl->owners[i])
kvm_vcpu_kick(hgctrl->owners[i]);
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
return IRQ_HANDLED;
}
static int aia_hgei_init(void)
{
int cpu, rc;
struct irq_domain *domain;
struct aia_hgei_control *hgctrl;
/* Initialize per-CPU guest external interrupt line management */
for_each_possible_cpu(cpu) {
hgctrl = per_cpu_ptr(&aia_hgei, cpu);
raw_spin_lock_init(&hgctrl->lock);
if (kvm_riscv_aia_nr_hgei) {
hgctrl->free_bitmap =
BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
hgctrl->free_bitmap &= ~BIT(0);
} else
hgctrl->free_bitmap = 0;
}
/* Find INTC irq domain */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
DOMAIN_BUS_ANY);
if (!domain) {
kvm_err("unable to find INTC domain\n");
return -ENOENT;
}
/* Map per-CPU SGEI interrupt from INTC domain */
hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
if (!hgei_parent_irq) {
kvm_err("unable to map SGEI IRQ\n");
return -ENOMEM;
}
/* Request per-CPU SGEI interrupt */
rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
"riscv-kvm", &aia_hgei);
if (rc) {
kvm_err("failed to request SGEI IRQ\n");
return rc;
}
return 0;
}
static void aia_hgei_exit(void)
{
/* Free per-CPU SGEI interrupt */
free_percpu_irq(hgei_parent_irq, &aia_hgei);
}
void kvm_riscv_aia_enable(void)
{
if (!kvm_riscv_aia_available())
......@@ -362,21 +540,105 @@ void kvm_riscv_aia_enable(void)
csr_write(CSR_HVIPRIO1H, 0x0);
csr_write(CSR_HVIPRIO2H, 0x0);
#endif
/* Enable per-CPU SGEI interrupt */
enable_percpu_irq(hgei_parent_irq,
irq_get_trigger_type(hgei_parent_irq));
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
}
void kvm_riscv_aia_disable(void)
{
int i;
unsigned long flags;
struct kvm_vcpu *vcpu;
struct aia_hgei_control *hgctrl;
if (!kvm_riscv_aia_available())
return;
hgctrl = get_cpu_ptr(&aia_hgei);
/* Disable per-CPU SGEI interrupt */
csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
disable_percpu_irq(hgei_parent_irq);
aia_set_hvictl(false);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
vcpu = hgctrl->owners[i];
if (!vcpu)
continue;
/*
* We release hgctrl->lock before notifying IMSIC
* so that we don't have lock ordering issues.
*/
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
/* Notify IMSIC */
kvm_riscv_vcpu_aia_imsic_release(vcpu);
/*
* Wakeup VCPU if it was blocked so that it can
* run on other HARTs
*/
if (csr_read(CSR_HGEIE) & BIT(i)) {
csr_clear(CSR_HGEIE, BIT(i));
kvm_vcpu_kick(vcpu);
}
raw_spin_lock_irqsave(&hgctrl->lock, flags);
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
}
int kvm_riscv_aia_init(void)
{
int rc;
if (!riscv_isa_extension_available(NULL, SxAIA))
return -ENODEV;
/* Figure-out number of bits in HGEIE */
csr_write(CSR_HGEIE, -1UL);
kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
csr_write(CSR_HGEIE, 0);
if (kvm_riscv_aia_nr_hgei)
kvm_riscv_aia_nr_hgei--;
/*
* Number of usable HGEI lines should be minimum of per-HART
* IMSIC guest files and number of bits in HGEIE
*
* TODO: To be updated later by AIA IMSIC HW guest file support
*/
kvm_riscv_aia_nr_hgei = 0;
/*
* Find number of guest MSI IDs
*
* TODO: To be updated later by AIA IMSIC HW guest file support
*/
kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
/* Initialize guest external interrupt line management */
rc = aia_hgei_init();
if (rc)
return rc;
/* Register device operations */
rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
KVM_DEV_TYPE_RISCV_AIA);
if (rc) {
aia_hgei_exit();
return rc;
}
/* Enable KVM AIA support */
static_branch_enable(&kvm_riscv_aia_available);
......@@ -385,4 +647,12 @@ int kvm_riscv_aia_init(void)
void kvm_riscv_aia_exit(void)
{
if (!kvm_riscv_aia_available())
return;
/* Unregister device operations */
kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
/* Cleanup the HGEI state */
aia_hgei_exit();
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -116,7 +116,8 @@ static int __init riscv_kvm_init(void)
kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
if (kvm_riscv_aia_available())
kvm_info("AIA available\n");
kvm_info("AIA available with %d guest external interrupts\n",
kvm_riscv_aia_nr_hgei);
rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (rc) {
......
......@@ -296,7 +296,7 @@ static void make_xfence_request(struct kvm *kvm,
unsigned int actual_req = req;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
bitmap_clear(vcpu_mask, 0, KVM_MAX_VCPUS);
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (hbase != -1UL) {
if (vcpu->vcpu_id < hbase)
......
......@@ -61,6 +61,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
......@@ -102,6 +103,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZBB:
return false;
......@@ -250,10 +252,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_aia_wakeon_hgei(vcpu, true);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_aia_wakeon_hgei(vcpu, false);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
......
......@@ -183,6 +183,8 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->exit_reason = KVM_EXIT_UNKNOWN;
switch (trap->scause) {
case EXC_INST_ILLEGAL:
case EXC_LOAD_MISALIGNED:
case EXC_STORE_MISALIGNED:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
kvm_riscv_vcpu_trap_redirect(vcpu, trap);
ret = 1;
......
......@@ -20,9 +20,7 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
};
#endif
#ifdef CONFIG_RISCV_PMU_SBI
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
#else
#ifndef CONFIG_RISCV_PMU_SBI
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
.extid_start = -1UL,
.extid_end = -1UL,
......@@ -31,49 +29,49 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
#endif
struct kvm_riscv_sbi_extension_entry {
enum KVM_RISCV_SBI_EXT_ID dis_idx;
enum KVM_RISCV_SBI_EXT_ID ext_idx;
const struct kvm_vcpu_sbi_extension *ext_ptr;
};
static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
{
.dis_idx = KVM_RISCV_SBI_EXT_V01,
.ext_idx = KVM_RISCV_SBI_EXT_V01,
.ext_ptr = &vcpu_sbi_ext_v01,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
.ext_ptr = &vcpu_sbi_ext_base,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_TIME,
.ext_idx = KVM_RISCV_SBI_EXT_TIME,
.ext_ptr = &vcpu_sbi_ext_time,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_IPI,
.ext_idx = KVM_RISCV_SBI_EXT_IPI,
.ext_ptr = &vcpu_sbi_ext_ipi,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_RFENCE,
.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
.ext_ptr = &vcpu_sbi_ext_rfence,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_SRST,
.ext_idx = KVM_RISCV_SBI_EXT_SRST,
.ext_ptr = &vcpu_sbi_ext_srst,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_HSM,
.ext_idx = KVM_RISCV_SBI_EXT_HSM,
.ext_ptr = &vcpu_sbi_ext_hsm,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_PMU,
.ext_idx = KVM_RISCV_SBI_EXT_PMU,
.ext_ptr = &vcpu_sbi_ext_pmu,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
.ext_ptr = &vcpu_sbi_ext_experimental,
},
{
.dis_idx = KVM_RISCV_SBI_EXT_VENDOR,
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
.ext_ptr = &vcpu_sbi_ext_vendor,
},
};
......@@ -147,7 +145,7 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].dis_idx == reg_num) {
if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
......@@ -155,7 +153,15 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
if (!sext)
return -ENOENT;
scontext->extension_disabled[sext->dis_idx] = !reg_val;
/*
* We can't set the extension status to available here, since it may
* have a probe() function which needs to confirm availability first,
* but it may be too early to call that here. We can set the status to
* unavailable, though.
*/
if (!reg_val)
scontext->ext_status[sext->ext_idx] =
KVM_RISCV_SBI_EXT_UNAVAILABLE;
return 0;
}
......@@ -172,7 +178,7 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].dis_idx == reg_num) {
if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
......@@ -180,7 +186,15 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
if (!sext)
return -ENOENT;
*reg_val = !scontext->extension_disabled[sext->dis_idx];
/*
* If the extension status is still uninitialized, then we should probe
* to determine if it's available, but it may be too early to do that
* here. The best we can do is report that the extension has not been
* disabled, i.e. we return 1 when the extension is available and also
* when it only may be available.
*/
*reg_val = scontext->ext_status[sext->ext_idx] !=
KVM_RISCV_SBI_EXT_UNAVAILABLE;
return 0;
}
......@@ -307,18 +321,32 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
struct kvm_vcpu *vcpu, unsigned long extid)
{
int i;
const struct kvm_riscv_sbi_extension_entry *sext;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
const struct kvm_riscv_sbi_extension_entry *entry;
const struct kvm_vcpu_sbi_extension *ext;
int i;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
sext = &sbi_ext[i];
if (sext->ext_ptr->extid_start <= extid &&
sext->ext_ptr->extid_end >= extid) {
if (sext->dis_idx < KVM_RISCV_SBI_EXT_MAX &&
scontext->extension_disabled[sext->dis_idx])
entry = &sbi_ext[i];
ext = entry->ext_ptr;
if (ext->extid_start <= extid && ext->extid_end >= extid) {
if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
scontext->ext_status[entry->ext_idx] ==
KVM_RISCV_SBI_EXT_AVAILABLE)
return ext;
if (scontext->ext_status[entry->ext_idx] ==
KVM_RISCV_SBI_EXT_UNAVAILABLE)
return NULL;
return sbi_ext[i].ext_ptr;
if (ext->probe && !ext->probe(vcpu)) {
scontext->ext_status[entry->ext_idx] =
KVM_RISCV_SBI_EXT_UNAVAILABLE;
return NULL;
}
scontext->ext_status[entry->ext_idx] =
KVM_RISCV_SBI_EXT_AVAILABLE;
return ext;
}
}
......
......@@ -55,11 +55,129 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_riscv_aia_destroy_vm(kvm);
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
bool line_status)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
struct kvm_msi msi;
if (!level)
return -1;
msi.address_lo = e->msi.address_lo;
msi.address_hi = e->msi.address_hi;
msi.data = e->msi.data;
msi.flags = e->msi.flags;
msi.devid = e->msi.devid;
return kvm_riscv_aia_inject_msi(kvm, &msi);
}
static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
}
int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
{
struct kvm_irq_routing_entry *ents;
int i, rc;
ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
if (!ents)
return -ENOMEM;
for (i = 0; i < lines; i++) {
ents[i].gsi = i;
ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
ents[i].u.irqchip.irqchip = 0;
ents[i].u.irqchip.pin = i;
}
rc = kvm_set_irq_routing(kvm, ents, lines, 0);
kfree(ents);
return rc;
}
bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
{
return irqchip_in_kernel(kvm);
}
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int r = -EINVAL;
switch (ue->type) {
case KVM_IRQ_ROUTING_IRQCHIP:
e->set = kvm_riscv_set_irq;
e->irqchip.irqchip = ue->u.irqchip.irqchip;
e->irqchip.pin = ue->u.irqchip.pin;
if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
(e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
goto out;
break;
case KVM_IRQ_ROUTING_MSI:
e->set = kvm_set_msi;
e->msi.address_lo = ue->u.msi.address_lo;
e->msi.address_hi = ue->u.msi.address_hi;
e->msi.data = ue->u.msi.data;
e->msi.flags = ue->flags;
e->msi.devid = ue->u.msi.devid;
break;
default:
goto out;
}
r = 0;
out:
return r;
}
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
if (!level)
return -EWOULDBLOCK;
switch (e->type) {
case KVM_IRQ_ROUTING_MSI:
return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
case KVM_IRQ_ROUTING_IRQCHIP:
return kvm_riscv_set_irq(e, kvm, irq_source_id,
level, line_status);
}
return -EWOULDBLOCK;
}
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return irqchip_in_kernel(kvm);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
r = kvm_riscv_aia_available();
break;
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
......
......@@ -1442,6 +1442,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_ARM_PV_TIME,
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
KVM_DEV_TYPE_RISCV_AIA,
#define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA
KVM_DEV_TYPE_MAX,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment