Commit 1baa5efb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM updates from Paolo Bonzini:
 "PPC changes will come next week.

   - s390: Support for runtime instrumentation within guests, support of
     248 VCPUs.

   - ARM: rewrite of the arm64 world switch in C, support for 16-bit VM
     identifiers.  Performance counter virtualization missed the boat.

   - x86: Support for more Hyper-V features (synthetic interrupt
     controller), MMU cleanups"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (115 commits)
  kvm: x86: Fix vmwrite to SECONDARY_VM_EXEC_CONTROL
  kvm/x86: Hyper-V SynIC timers tracepoints
  kvm/x86: Hyper-V SynIC tracepoints
  kvm/x86: Update SynIC timers on guest entry only
  kvm/x86: Skip SynIC vector check for QEMU side
  kvm/x86: Hyper-V fix SynIC timer disabling condition
  kvm/x86: Reorg stimer_expiration() to better control timer restart
  kvm/x86: Hyper-V unify stimer_start() and stimer_restart()
  kvm/x86: Drop stimer_stop() function
  kvm/x86: Hyper-V timers fix incorrect logical operation
  KVM: move architecture-dependent requests to arch/
  KVM: renumber vcpu->request bits
  KVM: document which architecture uses each request bit
  KVM: Remove unused KVM_REQ_KICK to save a bit in vcpu->requests
  kvm: x86: Check kvm_write_guest return value in kvm_write_wall_clock
  KVM: s390: implement the RI support of guest
  kvm/s390: drop unpaired smp_mb
  kvm: x86: fix comment about {mmu,nested_mmu}.gva_to_gpa
  KVM: x86: MMU: Use clear_page() instead of init_shadow_page_table()
  arm/arm64: KVM: Detect vGIC presence at runtime
  ...
parents c9bed1cf 45bdbcfd
...@@ -1451,6 +1451,7 @@ struct kvm_irq_routing_entry { ...@@ -1451,6 +1451,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing_irqchip irqchip; struct kvm_irq_routing_irqchip irqchip;
struct kvm_irq_routing_msi msi; struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter; struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
__u32 pad[8]; __u32 pad[8];
} u; } u;
}; };
...@@ -1459,6 +1460,7 @@ struct kvm_irq_routing_entry { ...@@ -1459,6 +1460,7 @@ struct kvm_irq_routing_entry {
#define KVM_IRQ_ROUTING_IRQCHIP 1 #define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2 #define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3 #define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
No flags are specified so far, the corresponding field must be set to zero. No flags are specified so far, the corresponding field must be set to zero.
...@@ -1482,6 +1484,10 @@ struct kvm_irq_routing_s390_adapter { ...@@ -1482,6 +1484,10 @@ struct kvm_irq_routing_s390_adapter {
__u32 adapter_id; __u32 adapter_id;
}; };
struct kvm_irq_routing_hv_sint {
__u32 vcpu;
__u32 sint;
};
4.53 KVM_ASSIGN_SET_MSIX_NR (deprecated) 4.53 KVM_ASSIGN_SET_MSIX_NR (deprecated)
...@@ -3331,6 +3337,28 @@ the userspace IOAPIC should process the EOI and retrigger the interrupt if ...@@ -3331,6 +3337,28 @@ the userspace IOAPIC should process the EOI and retrigger the interrupt if
it is still asserted. Vector is the LAPIC interrupt vector for which the it is still asserted. Vector is the LAPIC interrupt vector for which the
EOI was received. EOI was received.
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
__u32 type;
union {
struct {
__u32 msr;
__u64 control;
__u64 evt_page;
__u64 msg_page;
} synic;
} u;
};
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
Indicates that the VCPU exits into userspace to process some tasks
related to Hyper-V emulation.
Valid values for 'type' are:
KVM_EXIT_HYPERV_SYNIC -- synchronously notify user-space about
Hyper-V SynIC state change. Notification is used to remap SynIC
event/message pages and to enable/disable SynIC messages/events processing
in userspace.
/* Fix the size of the union. */ /* Fix the size of the union. */
char padding[256]; char padding[256];
}; };
...@@ -3685,3 +3713,16 @@ available, means that that the kernel has an implementation of the ...@@ -3685,3 +3713,16 @@ available, means that that the kernel has an implementation of the
H_RANDOM hypercall backed by a hardware random-number generator. H_RANDOM hypercall backed by a hardware random-number generator.
If present, the kernel H_RANDOM handler can be enabled for guest use If present, the kernel H_RANDOM handler can be enabled for guest use
with the KVM_CAP_PPC_ENABLE_HCALL capability. with the KVM_CAP_PPC_ENABLE_HCALL capability.
8.2 KVM_CAP_HYPERV_SYNIC
Architectures: x86
This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel has an implementation of the
Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is
used to support Windows Hyper-V based guest paravirt drivers(VMBus).
In order to use SynIC, it has to be activated by setting this
capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this
will disable the use of APIC hardware virtualization even if supported
by the CPU, as it's incompatible with SynIC auto-EOI behavior.
...@@ -37,7 +37,8 @@ Returns: -EFAULT if the given address is not accessible ...@@ -37,7 +37,8 @@ Returns: -EFAULT if the given address is not accessible
Allows userspace to query the actual limit and set a new limit for Allows userspace to query the actual limit and set a new limit for
the maximum guest memory size. The limit will be rounded up to the maximum guest memory size. The limit will be rounded up to
2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by 2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
the number of page table levels. the number of page table levels. In the case that there is no limit we will set
the limit to KVM_S390_NO_MEM_LIMIT (U64_MAX).
2. GROUP: KVM_S390_VM_CPU_MODEL 2. GROUP: KVM_S390_VM_CPU_MODEL
Architectures: s390 Architectures: s390
......
...@@ -203,10 +203,10 @@ Shadow pages contain the following information: ...@@ -203,10 +203,10 @@ Shadow pages contain the following information:
page cannot be destroyed. See role.invalid. page cannot be destroyed. See role.invalid.
parent_ptes: parent_ptes:
The reverse mapping for the pte/ptes pointing at this page's spt. If The reverse mapping for the pte/ptes pointing at this page's spt. If
parent_ptes bit 0 is zero, only one spte points at this pages and parent_ptes bit 0 is zero, only one spte points at this page and
parent_ptes points at this single spte, otherwise, there exists multiple parent_ptes points at this single spte, otherwise, there exists multiple
sptes pointing at this page and (parent_ptes & ~0x1) points at a data sptes pointing at this page and (parent_ptes & ~0x1) points at a data
structure with a list of parent_ptes. structure with a list of parent sptes.
unsync: unsync:
If true, then the translations in this page may not match the guest's If true, then the translations in this page may not match the guest's
translation. This is equivalent to the state of the tlb when a pte is translation. This is equivalent to the state of the tlb when a pte is
......
...@@ -6089,6 +6089,7 @@ M: Marc Zyngier <marc.zyngier@arm.com> ...@@ -6089,6 +6089,7 @@ M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm W: http://systems.cs.columbia.edu/projects/kvm-arm
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
S: Supported S: Supported
F: arch/arm/include/uapi/asm/kvm* F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm* F: arch/arm/include/asm/kvm*
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#ifndef __ARM_KVM_ARM_H__ #ifndef __ARM_KVM_ARM_H__
#define __ARM_KVM_ARM_H__ #define __ARM_KVM_ARM_H__
#include <linux/const.h>
#include <linux/types.h> #include <linux/types.h>
/* Hyp Configuration Register (HCR) bits */ /* Hyp Configuration Register (HCR) bits */
...@@ -132,10 +133,9 @@ ...@@ -132,10 +133,9 @@
* space. * space.
*/ */
#define KVM_PHYS_SHIFT (40) #define KVM_PHYS_SHIFT (40)
#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) #define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) #define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) #define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
/* Virtualization Translation Control Register (VTCR) bits */ /* Virtualization Translation Control Register (VTCR) bits */
#define VTCR_SH0 (3 << 12) #define VTCR_SH0 (3 << 12)
...@@ -162,17 +162,17 @@ ...@@ -162,17 +162,17 @@
#define VTTBR_X (5 - KVM_T0SZ) #define VTTBR_X (5 - KVM_T0SZ)
#endif #endif
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (48LLU) #define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */ /* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26) #define HSR_EC_SHIFT (26)
#define HSR_EC (0x3fU << HSR_EC_SHIFT) #define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
#define HSR_IL (1U << 25) #define HSR_IL (_AC(1, UL) << 25)
#define HSR_ISS (HSR_IL - 1) #define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24) #define HSR_ISV_SHIFT (24)
#define HSR_ISV (1U << HSR_ISV_SHIFT) #define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16) #define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) #define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_FSC (0x3f) #define HSR_FSC (0x3f)
...@@ -180,9 +180,9 @@ ...@@ -180,9 +180,9 @@
#define HSR_SSE (1 << 21) #define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6) #define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24) #define HSR_CV_SHIFT (24)
#define HSR_CV (1U << HSR_CV_SHIFT) #define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
#define HSR_COND_SHIFT (20) #define HSR_COND_SHIFT (20)
#define HSR_COND (0xfU << HSR_COND_SHIFT) #define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
#define FSC_FAULT (0x04) #define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08) #define FSC_ACCESS (0x08)
...@@ -210,13 +210,13 @@ ...@@ -210,13 +210,13 @@
#define HSR_EC_DABT (0x24) #define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25) #define HSR_EC_DABT_HYP (0x25)
#define HSR_WFI_IS_WFE (1U << 0) #define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) #define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7) #define HSR_DABT_S1PTW (_AC(1, UL) << 7)
#define HSR_DABT_CM (1U << 8) #define HSR_DABT_CM (_AC(1, UL) << 8)
#define HSR_DABT_EA (1U << 9) #define HSR_DABT_EA (_AC(1, UL) << 9)
#define kvm_arm_exception_type \ #define kvm_arm_exception_type \
{0, "RESET" }, \ {0, "RESET" }, \
......
...@@ -150,6 +150,12 @@ struct kvm_vcpu_stat { ...@@ -150,6 +150,12 @@ struct kvm_vcpu_stat {
u32 halt_successful_poll; u32 halt_successful_poll;
u32 halt_attempted_poll; u32 halt_attempted_poll;
u32 halt_wakeup; u32 halt_wakeup;
u32 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 exits;
}; };
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
......
...@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, ...@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *merged_hyp_pgd, pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start) { } unsigned long hyp_idmap_start) { }
static inline unsigned int kvm_get_vmid_bits(void)
{
return 8;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */ #endif /* __ARM_KVM_MMU_H__ */
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h> #include <asm/kvm_psci.h>
#include <asm/sections.h>
#ifdef REQUIRES_VIRT #ifdef REQUIRES_VIRT
__asm__(".arch_extension virt"); __asm__(".arch_extension virt");
...@@ -58,9 +59,12 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); ...@@ -58,9 +59,12 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */ /* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid; static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock); static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
...@@ -132,7 +136,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -132,7 +136,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.vmid_gen = 0; kvm->arch.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */ /* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus(); kvm->arch.max_vcpus = vgic_present ?
kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
return ret; return ret;
out_free_stage2_pgd: out_free_stage2_pgd:
...@@ -172,6 +177,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -172,6 +177,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r; int r;
switch (ext) { switch (ext) {
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
r = vgic_present;
break;
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL: case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
...@@ -433,11 +440,12 @@ static void update_vttbr(struct kvm *kvm) ...@@ -433,11 +440,12 @@ static void update_vttbr(struct kvm *kvm)
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid; kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++; kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
/* update vttbr to be used with the new vmid */ /* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid; kvm->arch.vttbr = pgd_phys | vmid;
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
...@@ -603,6 +611,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -603,6 +611,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
/* /*
* Back from guest * Back from guest
*************************************************************/ *************************************************************/
...@@ -913,6 +922,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, ...@@ -913,6 +922,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
switch (dev_id) { switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2: case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present)
return -ENXIO;
return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default: default:
return -ENODEV; return -ENODEV;
...@@ -927,6 +938,8 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -927,6 +938,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) { switch (ioctl) {
case KVM_CREATE_IRQCHIP: { case KVM_CREATE_IRQCHIP: {
if (!vgic_present)
return -ENXIO;
return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
} }
case KVM_ARM_SET_DEVICE_ADDR: { case KVM_ARM_SET_DEVICE_ADDR: {
...@@ -1067,6 +1080,12 @@ static int init_hyp_mode(void) ...@@ -1067,6 +1080,12 @@ static int init_hyp_mode(void)
goto out_free_mappings; goto out_free_mappings;
} }
err = create_hyp_mappings(__start_rodata, __end_rodata);
if (err) {
kvm_err("Cannot map rodata section\n");
goto out_free_mappings;
}
/* /*
* Map the Hyp stack pages * Map the Hyp stack pages
*/ */
...@@ -1111,8 +1130,17 @@ static int init_hyp_mode(void) ...@@ -1111,8 +1130,17 @@ static int init_hyp_mode(void)
* Init HYP view of VGIC * Init HYP view of VGIC
*/ */
err = kvm_vgic_hyp_init(); err = kvm_vgic_hyp_init();
if (err) switch (err) {
case 0:
vgic_present = true;
break;
case -ENODEV:
case -ENXIO:
vgic_present = false;
break;
default:
goto out_free_context; goto out_free_context;
}
/* /*
* Init HYP architected timer support * Init HYP architected timer support
...@@ -1127,6 +1155,10 @@ static int init_hyp_mode(void) ...@@ -1127,6 +1155,10 @@ static int init_hyp_mode(void)
kvm_perf_init(); kvm_perf_init();
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
kvm_info("Hyp mode initialized successfully\n"); kvm_info("Hyp mode initialized successfully\n");
return 0; return 0;
......
...@@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) ...@@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
return vbar; return vbar;
} }
/*
* Switch to an exception mode, updating both CPSR and SPSR. Follow
* the logic described in AArch32.EnterMode() from the ARMv8 ARM.
*/
static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
switch (mode) {
case FIQ_MODE:
*vcpu_cpsr(vcpu) |= PSR_F_BIT;
/* Fall through */
case ABT_MODE:
case IRQ_MODE:
*vcpu_cpsr(vcpu) |= PSR_A_BIT;
/* Fall through */
default:
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
}
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
if (sctlr & SCTLR_TE)
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
if (sctlr & SCTLR_EE)
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
/* Note: These now point to the mode banked copies */
*vcpu_spsr(vcpu) = cpsr;
}
/** /**
* kvm_inject_undefined - inject an undefined exception into the guest * kvm_inject_undefined - inject an undefined exception into the guest
* @vcpu: The VCPU to receive the undefined exception * @vcpu: The VCPU to receive the undefined exception
...@@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) ...@@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
*/ */
void kvm_inject_undefined(struct kvm_vcpu *vcpu) void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{ {
unsigned long new_lr_value;
unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT); bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset = 4; u32 vect_offset = 4;
u32 return_offset = (is_thumb) ? 2 : 4; u32 return_offset = (is_thumb) ? 2 : 4;
new_spsr_value = cpsr; kvm_update_psr(vcpu, UND_MODE);
new_lr_value = *vcpu_pc(vcpu) - return_offset; *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
if (sctlr & SCTLR_TE)
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
if (sctlr & SCTLR_EE)
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
/* Note: These now point to UND banked copies */
*vcpu_spsr(vcpu) = cpsr;
*vcpu_reg(vcpu, 14) = new_lr_value;
/* Branch to exception vector */ /* Branch to exception vector */
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
...@@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) ...@@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/ */
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
{ {
unsigned long new_lr_value;
unsigned long new_spsr_value;
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
bool is_thumb = (cpsr & PSR_T_BIT); bool is_thumb = (cpsr & PSR_T_BIT);
u32 vect_offset; u32 vect_offset;
u32 return_offset = (is_thumb) ? 4 : 0; u32 return_offset = (is_thumb) ? 4 : 0;
bool is_lpae; bool is_lpae;
new_spsr_value = cpsr; kvm_update_psr(vcpu, ABT_MODE);
new_lr_value = *vcpu_pc(vcpu) + return_offset; *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
if (sctlr & SCTLR_TE)
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
if (sctlr & SCTLR_EE)
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
/* Note: These now point to ABT banked copies */
*vcpu_spsr(vcpu) = cpsr;
*vcpu_reg(vcpu, 14) = new_lr_value;
if (is_pabt) if (is_pabt)
vect_offset = 12; vect_offset = 12;
......
...@@ -33,6 +33,12 @@ ...@@ -33,6 +33,12 @@
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat),
VCPU_STAT(mmio_exit_user),
VCPU_STAT(mmio_exit_kernel),
VCPU_STAT(exits),
{ NULL } { NULL }
}; };
......
...@@ -42,6 +42,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -42,6 +42,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu)); kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
ret = kvm_psci_call(vcpu); ret = kvm_psci_call(vcpu);
if (ret < 0) { if (ret < 0) {
...@@ -89,9 +90,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -89,9 +90,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) { if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
trace_kvm_wfx(*vcpu_pc(vcpu), true); trace_kvm_wfx(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu);
} else { } else {
trace_kvm_wfx(*vcpu_pc(vcpu), false); trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
} }
......
...@@ -210,8 +210,11 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -210,8 +210,11 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
if (!ret) { if (!ret) {
/* We handled the access successfully in the kernel. */ /* We handled the access successfully in the kernel. */
vcpu->stat.mmio_exit_kernel++;
kvm_handle_mmio_return(vcpu, run); kvm_handle_mmio_return(vcpu, run);
return 1; return 1;
} else {
vcpu->stat.mmio_exit_user++;
} }
run->exit_reason = KVM_EXIT_MMIO; run->exit_reason = KVM_EXIT_MMIO;
......
...@@ -656,9 +656,9 @@ static void *kvm_alloc_hwpgd(void) ...@@ -656,9 +656,9 @@ static void *kvm_alloc_hwpgd(void)
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM. * @kvm: The KVM struct pointer for the VM.
* *
* Allocates the 1st level table only of size defined by S2_PGD_ORDER (can * Allocates only the stage-2 HW PGD level table(s) (can support either full
* support either full 40-bit input addresses or limited to 32-bit input * 40-bit input addresses or limited to 32-bit input addresses). Clears the
* addresses). Clears the allocated pages. * allocated pages.
* *
* Note we don't need locking here as this is only called when the VM is * Note we don't need locking here as this is only called when the VM is
* created, which can only be done once. * created, which can only be done once.
......
...@@ -125,6 +125,7 @@ ...@@ -125,6 +125,7 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6) #define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_T0SZ_40B 24
#define VTCR_EL2_VS 19
/* /*
* We configure the Stage-2 page tables to always restrict the IPA space to be * We configure the Stage-2 page tables to always restrict the IPA space to be
...@@ -169,7 +170,7 @@ ...@@ -169,7 +170,7 @@
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_SHIFT (UL(48))
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */ /* Hyp System Trap Register */
#define HSTR_EL2_T(x) (1 << x) #define HSTR_EL2_T(x) (1 << x)
......
...@@ -20,82 +20,6 @@ ...@@ -20,82 +20,6 @@
#include <asm/virt.h> #include <asm/virt.h>
/*
* 0 is reserved as an invalid value.
* Order *must* be kept in sync with the hyp switch code.
*/
#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
#define CSSELR_EL1 2 /* Cache Size Selection Register */
#define SCTLR_EL1 3 /* System Control Register */
#define ACTLR_EL1 4 /* Auxiliary Control Register */
#define CPACR_EL1 5 /* Coprocessor Access Control */
#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
#define TCR_EL1 8 /* Translation Control Register */
#define ESR_EL1 9 /* Exception Syndrome Register */
#define AFSR0_EL1 10 /* Auxilary Fault Status Register 0 */
#define AFSR1_EL1 11 /* Auxilary Fault Status Register 1 */
#define FAR_EL1 12 /* Fault Address Register */
#define MAIR_EL1 13 /* Memory Attribute Indirection Register */
#define VBAR_EL1 14 /* Vector Base Address Register */
#define CONTEXTIDR_EL1 15 /* Context ID Register */
#define TPIDR_EL0 16 /* Thread ID, User R/W */
#define TPIDRRO_EL0 17 /* Thread ID, User R/O */
#define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
/* 32bit specific registers. Keep them at the end of the range */
#define DACR32_EL2 24 /* Domain Access Control Register */
#define IFSR32_EL2 25 /* Instruction Fault Status Register */
#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
#define ARM_EXCEPTION_IRQ 0 #define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_TRAP 1 #define ARM_EXCEPTION_TRAP 1
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cputype.h> #include <asm/cputype.h>
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/kvm_types.h> #include <linux/kvm_types.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED #define __KVM_HAVE_ARCH_INTC_INITIALIZED
...@@ -85,6 +84,86 @@ struct kvm_vcpu_fault_info { ...@@ -85,6 +84,86 @@ struct kvm_vcpu_fault_info {
u64 hpfar_el2; /* Hyp IPA Fault Address Register */ u64 hpfar_el2; /* Hyp IPA Fault Address Register */
}; };
/*
* 0 is reserved as an invalid value.
* Order should be kept in sync with the save/restore code.
*/
enum vcpu_sysreg {
__INVALID_SYSREG__,
MPIDR_EL1, /* MultiProcessor Affinity Register */
CSSELR_EL1, /* Cache Size Selection Register */
SCTLR_EL1, /* System Control Register */
ACTLR_EL1, /* Auxiliary Control Register */
CPACR_EL1, /* Coprocessor Access Control */
TTBR0_EL1, /* Translation Table Base Register 0 */
TTBR1_EL1, /* Translation Table Base Register 1 */
TCR_EL1, /* Translation Control Register */
ESR_EL1, /* Exception Syndrome Register */
AFSR0_EL1, /* Auxilary Fault Status Register 0 */
AFSR1_EL1, /* Auxilary Fault Status Register 1 */
FAR_EL1, /* Fault Address Register */
MAIR_EL1, /* Memory Attribute Indirection Register */
VBAR_EL1, /* Vector Base Address Register */
CONTEXTIDR_EL1, /* Context ID Register */
TPIDR_EL0, /* Thread ID, User R/W */
TPIDRRO_EL0, /* Thread ID, User R/O */
TPIDR_EL1, /* Thread ID, Privileged */
AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
CNTKCTL_EL1, /* Timer Control Register (EL1) */
PAR_EL1, /* Physical Address Register */
MDSCR_EL1, /* Monitor Debug System Control Register */
MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
FPEXC32_EL2, /* Floating-Point Exception Control Register */
DBGVCR32_EL2, /* Debug Vector Catch Register */
NR_SYS_REGS /* Nothing after this line! */
};
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
#define c0_CSSELR (CSSELR_EL1 * 2)/* Cache Size Selection Register */
#define c1_SCTLR (SCTLR_EL1 * 2) /* System Control Register */
#define c1_ACTLR (ACTLR_EL1 * 2) /* Auxiliary Control Register */
#define c1_CPACR (CPACR_EL1 * 2) /* Coprocessor Access Control */
#define c2_TTBR0 (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
#define c2_TTBR0_high (c2_TTBR0 + 1) /* TTBR0 top 32 bits */
#define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
#define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
#define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
#define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
#define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
#define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
#define c5_ADFSR (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
#define c13_CID (CONTEXTIDR_EL1 * 2) /* Context ID Register */
#define c13_TID_URW (TPIDR_EL0 * 2) /* Thread ID, User R/W */
#define c13_TID_URO (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
#define c13_TID_PRIV (TPIDR_EL1 * 2) /* Thread ID, Privileged */
#define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
#define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
#define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
#define cp14_DBGDSCRext (MDSCR_EL1 * 2)
#define cp14_DBGBCR0 (DBGBCR0_EL1 * 2)
#define cp14_DBGBVR0 (DBGBVR0_EL1 * 2)
#define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1)
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
struct kvm_cpu_context { struct kvm_cpu_context {
struct kvm_regs gp_regs; struct kvm_regs gp_regs;
union { union {
...@@ -197,6 +276,12 @@ struct kvm_vcpu_stat { ...@@ -197,6 +276,12 @@ struct kvm_vcpu_stat {
u32 halt_successful_poll; u32 halt_successful_poll;
u32 halt_attempted_poll; u32 halt_attempted_poll;
u32 halt_wakeup; u32 halt_wakeup;
u32 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
u64 mmio_exit_user;
u64 mmio_exit_kernel;
u64 exits;
}; };
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#define __ARM64_KVM_MMIO_H__ #define __ARM64_KVM_MMIO_H__
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
/* /*
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/cpufeature.h>
/* /*
* As we only have the TTBR0_EL2 register, we cannot express * As we only have the TTBR0_EL2 register, we cannot express
...@@ -158,7 +159,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) ...@@ -158,7 +159,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
#endif #endif
#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) #define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
...@@ -302,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, ...@@ -302,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
} }
static inline unsigned int kvm_get_vmid_bits(void)
{
int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#ifndef __ASM_SYSREG_H #ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H #define __ASM_SYSREG_H
#include <linux/stringify.h>
#include <asm/opcodes.h> #include <asm/opcodes.h>
/* /*
...@@ -208,6 +210,8 @@ ...@@ -208,6 +210,8 @@
#else #else
#include <linux/types.h>
asm( asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
" .equ __reg_num_x\\num, \\num\n" " .equ __reg_num_x\\num, \\num\n"
...@@ -232,6 +236,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set) ...@@ -232,6 +236,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
val |= set; val |= set;
asm volatile("msr sctlr_el1, %0" : : "r" (val)); asm volatile("msr sctlr_el1, %0" : : "r" (val));
} }
/*
* Unlike read_cpuid, calls to read_sysreg are never expected to be
* optimized away or replaced with synthetic values.
*/
#define read_sysreg(r) ({ \
u64 __val; \
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
__val; \
})
#define write_sysreg(v, r) do { \
u64 __val = (u64)v; \
asm volatile("msr " __stringify(r) ", %0" \
: : "r" (__val)); \
} while (0)
#endif #endif
#endif /* __ASM_SYSREG_H */ #endif /* __ASM_SYSREG_H */
...@@ -109,49 +109,11 @@ int main(void) ...@@ -109,49 +109,11 @@ int main(void)
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1)); DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr));
DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr));
DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr));
DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr));
DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif #endif
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
......
...@@ -10,6 +10,7 @@ KVM=../../../virt/kvm ...@@ -10,6 +10,7 @@ KVM=../../../virt/kvm
ARM=../../../arch/arm/kvm ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
...@@ -22,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi ...@@ -22,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
...@@ -28,13 +28,21 @@ ...@@ -28,13 +28,21 @@
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include "trace.h" #include "trace.h"
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat),
VCPU_STAT(mmio_exit_user),
VCPU_STAT(mmio_exit_kernel),
VCPU_STAT(exits),
{ NULL } { NULL }
}; };
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
...@@ -39,6 +40,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -39,6 +40,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu)); kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
ret = kvm_psci_call(vcpu); ret = kvm_psci_call(vcpu);
if (ret < 0) { if (ret < 0) {
...@@ -71,9 +73,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -71,9 +73,11 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu);
} else { } else {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
} }
......
...@@ -94,6 +94,15 @@ __do_hyp_init: ...@@ -94,6 +94,15 @@ __do_hyp_init:
*/ */
mrs x5, ID_AA64MMFR0_EL1 mrs x5, ID_AA64MMFR0_EL1
bfi x4, x5, #16, #3 bfi x4, x5, #16, #3
/*
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
* VTCR_EL2.
*/
mrs x5, ID_AA64MMFR1_EL1
ubfx x5, x5, #5, #1
lsl x5, x5, #VTCR_EL2_VS
orr x4, x4, x5
msr vtcr_el2, x4 msr vtcr_el2, x4
mrs x4, mair_el1 mrs x4, mair_el1
......
This diff is collapsed.
#
# Makefile for Kernel-based Virtual Machine module, HYP part
#
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include "hyp.h"
#define read_debug(r,n) read_sysreg(r##n##_el1)
#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
case 14: ptr[14] = read_debug(reg, 14); \
case 13: ptr[13] = read_debug(reg, 13); \
case 12: ptr[12] = read_debug(reg, 12); \
case 11: ptr[11] = read_debug(reg, 11); \
case 10: ptr[10] = read_debug(reg, 10); \
case 9: ptr[9] = read_debug(reg, 9); \
case 8: ptr[8] = read_debug(reg, 8); \
case 7: ptr[7] = read_debug(reg, 7); \
case 6: ptr[6] = read_debug(reg, 6); \
case 5: ptr[5] = read_debug(reg, 5); \
case 4: ptr[4] = read_debug(reg, 4); \
case 3: ptr[3] = read_debug(reg, 3); \
case 2: ptr[2] = read_debug(reg, 2); \
case 1: ptr[1] = read_debug(reg, 1); \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
case 14: write_debug(ptr[14], reg, 14); \
case 13: write_debug(ptr[13], reg, 13); \
case 12: write_debug(ptr[12], reg, 12); \
case 11: write_debug(ptr[11], reg, 11); \
case 10: write_debug(ptr[10], reg, 10); \
case 9: write_debug(ptr[9], reg, 9); \
case 8: write_debug(ptr[8], reg, 8); \
case 7: write_debug(ptr[7], reg, 7); \
case 6: write_debug(ptr[6], reg, 6); \
case 5: write_debug(ptr[5], reg, 5); \
case 4: write_debug(ptr[4], reg, 4); \
case 3: write_debug(ptr[3], reg, 3); \
case 2: write_debug(ptr[2], reg, 2); \
case 1: write_debug(ptr[1], reg, 1); \
default: write_debug(ptr[0], reg, 0); \
}
void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{
u64 aa64dfr0;
int brps, wrps;
if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
return;
aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
brps = (aa64dfr0 >> 12) & 0xf;
wrps = (aa64dfr0 >> 20) & 0xf;
save_debug(dbg->dbg_bcr, dbgbcr, brps);
save_debug(dbg->dbg_bvr, dbgbvr, brps);
save_debug(dbg->dbg_wcr, dbgwcr, wrps);
save_debug(dbg->dbg_wvr, dbgwvr, wrps);
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
}
void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt)
{
u64 aa64dfr0;
int brps, wrps;
if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
return;
aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
brps = (aa64dfr0 >> 12) & 0xf;
wrps = (aa64dfr0 >> 20) & 0xf;
restore_debug(dbg->dbg_bcr, dbgbcr, brps);
restore_debug(dbg->dbg_bvr, dbgbvr, brps);
restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
}
void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
{
/* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
* a full save/restore cycle. */
if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
(vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
__debug_save_state(vcpu, &vcpu->arch.host_debug_state,
kern_hyp_va(vcpu->arch.host_cpu_context));
}
void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
{
__debug_restore_state(vcpu, &vcpu->arch.host_debug_state,
kern_hyp_va(vcpu->arch.host_cpu_context));
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
}
static u32 __hyp_text __debug_read_mdcr_el2(void)
{
return read_sysreg(mdcr_el2);
}
__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
.text
.pushsection .hyp.text, "ax"
.macro save_callee_saved_regs ctxt
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
.endm
.macro restore_callee_saved_regs ctxt
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
.endm
/*
* u64 __guest_enter(struct kvm_vcpu *vcpu,
* struct kvm_cpu_context *host_ctxt);
*/
ENTRY(__guest_enter)
// x0: vcpu
// x1: host/guest context
// x2-x18: clobbered by macros
// Store the host regs
save_callee_saved_regs x1
// Preserve vcpu & host_ctxt for use at exit time
stp x0, x1, [sp, #-16]!
add x1, x0, #VCPU_CONTEXT
// Prepare x0-x1 for later restore by pushing them onto the stack
ldp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
stp x2, x3, [sp, #-16]!
// x2-x18
ldp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
ldp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
ldp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
ldp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
ldp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
ldp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
ldp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
ldp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
ldr x18, [x1, #CPU_XREG_OFFSET(18)]
// x19-x29, lr
restore_callee_saved_regs x1
// Last bits of the 64bit state
ldp x0, x1, [sp], #16
// Do not touch any register after this!
eret
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
// x0: vcpu
// x1: return code
// x2-x3: free
// x4-x29,lr: vcpu regs
// vcpu x0-x3 on the stack
add x2, x0, #VCPU_CONTEXT
stp x4, x5, [x2, #CPU_XREG_OFFSET(4)]
stp x6, x7, [x2, #CPU_XREG_OFFSET(6)]
stp x8, x9, [x2, #CPU_XREG_OFFSET(8)]
stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
str x18, [x2, #CPU_XREG_OFFSET(18)]
ldp x6, x7, [sp], #16 // x2, x3
ldp x4, x5, [sp], #16 // x0, x1
stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
save_callee_saved_regs x2
// Restore vcpu & host_ctxt from the stack
// (preserving return code in x1)
ldp x0, x2, [sp], #16
// Now restore the host regs
restore_callee_saved_regs x2
mov x0, x1
ret
ENDPROC(__guest_exit)
ENTRY(__fpsimd_guest_restore)
stp x4, lr, [sp, #-16]!
mrs x2, cptr_el2
bic x2, x2, #CPTR_EL2_TFP
msr cptr_el2, x2
isb
mrs x3, tpidr_el2
ldr x0, [x3, #VCPU_HOST_CONTEXT]
kern_hyp_va x0
add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
bl __fpsimd_save_state
add x2, x3, #VCPU_CONTEXT
add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
bl __fpsimd_restore_state
// Skip restoring fpexc32 for AArch64 guests
mrs x1, hcr_el2
tbnz x1, #HCR_RW_SHIFT, 1f
ldr x4, [x3, #VCPU_FPEXC32_EL2]
msr fpexc32_el2, x4
1:
ldp x4, lr, [sp], #16
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
eret
ENDPROC(__fpsimd_guest_restore)
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/fpsimdmacros.h>
.text
.pushsection .hyp.text, "ax"
ENTRY(__fpsimd_save_state)
fpsimd_save x0, 1
ret
ENDPROC(__fpsimd_save_state)
ENTRY(__fpsimd_restore_state)
fpsimd_restore x0, 1
ret
ENDPROC(__fpsimd_restore_state)
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
.text
.pushsection .hyp.text, "ax"
.macro save_x0_to_x3
stp x0, x1, [sp, #-16]!
stp x2, x3, [sp, #-16]!
.endm
.macro restore_x0_to_x3
ldp x2, x3, [sp], #16
ldp x0, x1, [sp], #16
.endm
el1_sync: // Guest trapped into EL2
save_x0_to_x3
mrs x1, esr_el2
lsr x2, x1, #ESR_ELx_EC_SHIFT
cmp x2, #ESR_ELx_EC_HVC64
b.ne el1_trap
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
cbnz x3, el1_trap // called HVC
/* Here, we're pretty sure the host called HVC. */
restore_x0_to_x3
/* Check for __hyp_get_vectors */
cbnz x0, 1f
mrs x0, vbar_el2
b 2f
1: stp lr, xzr, [sp, #-16]!
/*
* Compute the function address in EL2, and shuffle the parameters.
*/
kern_hyp_va x0
mov lr, x0
mov x0, x1
mov x1, x2
mov x2, x3
blr lr
ldp lr, xzr, [sp], #16
2: eret
el1_trap:
/*
* x1: ESR
* x2: ESR_EC
*/
/* Guest accessed VFP/SIMD registers, save host, restore Guest */
cmp x2, #ESR_ELx_EC_FP_ASIMD
b.eq __fpsimd_guest_restore
cmp x2, #ESR_ELx_EC_DABT_LOW
mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */
alternative_if_not ARM64_WORKAROUND_834220
and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
alternative_else
nop // Use the permission fault path to
nop // check for a valid S1 translation,
nop // regardless of the ESR value.
alternative_endif
/*
* Check for Stage-1 page table walk, which is guaranteed
* to give a valid HPFAR_EL2.
*/
tbnz x1, #7, 1f // S1PTW is set
/* Preserve PAR_EL1 */
mrs x3, par_el1
stp x3, xzr, [sp, #-16]!
/*
* Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA.
* Stage-1 translation already validated the memory access rights.
* As such, we can use the EL1 translation regime, and don't have
* to distinguish between EL0 and EL1 access.
*/
mrs x2, far_el2
at s1e1r, x2
isb
/* Read result */
mrs x3, par_el1
ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack
msr par_el1, x0
tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR
b 2f
1: mrs x3, hpfar_el2
mrs x2, far_el2
2: mrs x0, tpidr_el2
str w1, [x0, #VCPU_ESR_EL2]
str x2, [x0, #VCPU_FAR_EL2]
str x3, [x0, #VCPU_HPFAR_EL2]
mov x1, #ARM_EXCEPTION_TRAP
b __guest_exit
/*
* Translation failed. Just return to the guest and
* let it fault again. Another CPU is probably playing
* behind our back.
*/
3: restore_x0_to_x3
eret
el1_irq:
save_x0_to_x3
mrs x0, tpidr_el2
mov x1, #ARM_EXCEPTION_IRQ
b __guest_exit
ENTRY(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
ldr lr, =panic
msr elr_el2, lr
eret
ENDPROC(__hyp_do_panic)
.macro invalid_vector label, target = __hyp_panic
.align 2
\label:
b \target
ENDPROC(\label)
.endm
/* None of these should ever happen */
invalid_vector el2t_sync_invalid
invalid_vector el2t_irq_invalid
invalid_vector el2t_fiq_invalid
invalid_vector el2t_error_invalid
invalid_vector el2h_sync_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
invalid_vector el2h_error_invalid
invalid_vector el1_sync_invalid
invalid_vector el1_irq_invalid
invalid_vector el1_fiq_invalid
invalid_vector el1_error_invalid
.ltorg
.align 11
ENTRY(__kvm_hyp_vector)
ventry el2t_sync_invalid // Synchronous EL2t
ventry el2t_irq_invalid // IRQ EL2t
ventry el2t_fiq_invalid // FIQ EL2t
ventry el2t_error_invalid // Error EL2t
ventry el2h_sync_invalid // Synchronous EL2h
ventry el2h_irq_invalid // IRQ EL2h
ventry el2h_fiq_invalid // FIQ EL2h
ventry el2h_error_invalid // Error EL2h
ventry el1_sync // Synchronous 64-bit EL1
ventry el1_irq // IRQ 64-bit EL1
ventry el1_fiq_invalid // FIQ 64-bit EL1
ventry el1_error_invalid // Error 64-bit EL1
ventry el1_sync // Synchronous 32-bit EL1
ventry el1_irq // IRQ 32-bit EL1
ventry el1_fiq_invalid // FIQ 32-bit EL1
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ARM64_KVM_HYP_H__
#define __ARM64_KVM_HYP_H__
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace
#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
+ PAGE_OFFSET)
/**
* hyp_alternate_select - Generates patchable code sequences that are
* used to switch between two implementations of a function, depending
* on the availability of a feature.
*
* @fname: a symbol name that will be defined as a function returning a
* function pointer whose type will match @orig and @alt
* @orig: A pointer to the default function, as returned by @fname when
* @cond doesn't hold
* @alt: A pointer to the alternate function, as returned by @fname
* when @cond holds
* @cond: a CPU feature (as described in asm/cpufeature.h)
*/
#define hyp_alternate_select(fname, orig, alt, cond) \
typeof(orig) * __hyp_text fname(void) \
{ \
typeof(alt) *val = orig; \
asm volatile(ALTERNATIVE("nop \n", \
"mov %0, %1 \n", \
cond) \
: "+r" (val) : "r" (alt)); \
return val; \
}
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __timer_save_state(struct kvm_vcpu *vcpu);
void __timer_restore_state(struct kvm_vcpu *vcpu);
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
void __sysreg32_save_state(struct kvm_vcpu *vcpu);
void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
void __debug_save_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt);
void __debug_restore_state(struct kvm_vcpu *vcpu,
struct kvm_guest_debug_arch *dbg,
struct kvm_cpu_context *ctxt);
void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
static inline bool __fpsimd_enabled(void)
{
return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
}
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
void __noreturn __hyp_do_panic(unsigned long, ...);
#endif /* __ARM64_KVM_HYP_H__ */
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hyp.h"
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
/*
* We are about to set CPTR_EL2.TFP to trap all floating point
* register accesses to EL2, however, the ARM ARM clearly states that
* traps are only taken to EL2 if the operation would not otherwise
* trap to EL1. Therefore, always make sure that for 32-bit guests,
* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
*/
val = vcpu->arch.hcr_el2;
if (!(val & HCR_RW)) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
write_sysreg(val, hcr_el2);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
{
write_sysreg(HCR_RW, hcr_el2);
write_sysreg(0, hstr_el2);
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
write_sysreg(0, cptr_el2);
}
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
}
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
{
write_sysreg(0, vttbr_el2);
}
static hyp_alternate_select(__vgic_call_save_state,
__vgic_v2_save_state, __vgic_v3_save_state,
ARM64_HAS_SYSREG_GIC_CPUIF);
static hyp_alternate_select(__vgic_call_restore_state,
__vgic_v2_restore_state, __vgic_v3_restore_state,
ARM64_HAS_SYSREG_GIC_CPUIF);
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
{
__vgic_call_save_state()(vcpu);
write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
}
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
{
u64 val;
val = read_sysreg(hcr_el2);
val |= HCR_INT_OVERRIDE;
val |= vcpu->arch.irq_lines;
write_sysreg(val, hcr_el2);
__vgic_call_restore_state()(vcpu);
}
static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
bool fp_enabled;
u64 exit_code;
vcpu = kern_hyp_va(vcpu);
write_sysreg(vcpu, tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
guest_ctxt = &vcpu->arch.ctxt;
__sysreg_save_state(host_ctxt);
__debug_cond_save_host_state(vcpu);
__activate_traps(vcpu);
__activate_vm(vcpu);
__vgic_restore_state(vcpu);
__timer_restore_state(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks
* to Cortex-A57 erratum #852523.
*/
__sysreg32_restore_state(vcpu);
__sysreg_restore_state(guest_ctxt);
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
/* Jump in the fire! */
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
fp_enabled = __fpsimd_enabled();
__sysreg_save_state(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_save_state(vcpu);
__vgic_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_state(host_ctxt);
if (fp_enabled) {
__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
}
__debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
__debug_cond_restore_host_state(vcpu);
return exit_code;
}
__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
void __hyp_text __noreturn __hyp_panic(void)
{
unsigned long str_va = (unsigned long)__hyp_panic_string;
u64 spsr = read_sysreg(spsr_el2);
u64 elr = read_sysreg(elr_el2);
u64 par = read_sysreg(par_el1);
if (read_sysreg(vttbr_el2)) {
struct kvm_vcpu *vcpu;
struct kvm_cpu_context *host_ctxt;
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_state(host_ctxt);
}
/* Call panic for real */
__hyp_do_panic(hyp_kern_va(str_va),
spsr, elr,
read_sysreg(esr_el2), read_sysreg(far_el2),
read_sysreg(hpfar_el2), par,
(void *)read_sysreg(tpidr_el2));
unreachable();
}
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include "hyp.h"
/* ctxt is already in the HYP VA space */
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1);
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1);
ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1);
ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1);
ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1);
ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1);
ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1);
ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1);
ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1);
ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1);
ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1);
ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1);
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
ctxt->gp_regs.regs.pc = read_sysreg(elr_el2);
ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1);
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
}
void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1);
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1);
write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1);
write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1);
write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1);
write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1);
write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1);
write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1);
write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1);
write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1);
write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1);
write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1);
write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
write_sysreg(ctxt->gp_regs.regs.pc, elr_el2);
write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg(ctxt->gp_regs.elr_el1, elr_el1);
write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
}
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
{
u64 *spsr, *sysreg;
if (read_sysreg(hcr_el2) & HCR_RW)
return;
spsr = vcpu->arch.ctxt.gp_regs.spsr;
sysreg = vcpu->arch.ctxt.sys_regs;
spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
if (__fpsimd_enabled())
sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
}
void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{
u64 *spsr, *sysreg;
if (read_sysreg(hcr_el2) & HCR_RW)
return;
spsr = vcpu->arch.ctxt.gp_regs.spsr;
sysreg = vcpu->arch.ctxt.sys_regs;
write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
}
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <clocksource/arm_arch_timer.h>
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include "hyp.h"
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;
if (kvm->arch.timer.enabled) {
timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
timer->cntv_cval = read_sysreg(cntv_cval_el0);
}
/* Disable the virtual timer */
write_sysreg(0, cntv_ctl_el0);
/* Allow physical timer/counter access for the host */
val = read_sysreg(cnthctl_el2);
val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
write_sysreg(val, cnthctl_el2);
/* Clear cntvoff for the host */
write_sysreg(0, cntvoff_el2);
}
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;
/*
* Disallow physical timer access for the guest
* Physical counter access is allowed
*/
val = read_sysreg(cnthctl_el2);
val &= ~CNTHCTL_EL1PCEN;
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
if (kvm->arch.timer.enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
write_sysreg(timer->cntv_cval, cntv_cval_el0);
isb();
write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
}
}
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "hyp.h"
static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
/*
* We could do so much better if we had the VA as well.
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
ipa >>= 12;
asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
/*
* We have to ensure completion of the invalidation at Stage-2,
* since a table walk on another CPU could refill a TLB with a
* complete (S1 + S2) walk based on the old Stage-2 mapping if
* the Stage-1 invalidation happened first.
*/
dsb(ish);
asm volatile("tlbi vmalle1is" : : );
dsb(ish);
isb();
write_sysreg(0, vttbr_el2);
}
__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
phys_addr_t ipa);
static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
isb();
asm volatile("tlbi vmalls12e1is" : : );
dsb(ish);
isb();
write_sysreg(0, vttbr_el2);
}
__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
static void __hyp_text __tlb_flush_vm_context(void)
{
dsb(ishst);
asm volatile("tlbi alle1is \n"
"ic ialluis ": : );
dsb(ish);
}
__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include "hyp.h"
/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
u32 eisr0, eisr1, elrsr0, elrsr1;
int i, nr_lr;
if (!base)
return;
nr_lr = vcpu->arch.vgic_cpu.nr_lr;
cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
eisr0 = readl_relaxed(base + GICH_EISR0);
elrsr0 = readl_relaxed(base + GICH_ELRSR0);
if (unlikely(nr_lr > 32)) {
eisr1 = readl_relaxed(base + GICH_EISR1);
elrsr1 = readl_relaxed(base + GICH_ELRSR1);
} else {
eisr1 = elrsr1 = 0;
}
#ifdef CONFIG_CPU_BIG_ENDIAN
cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
#else
cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
#endif
cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
writel_relaxed(0, base + GICH_HCR);
for (i = 0; i < nr_lr; i++)
cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
}
/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
int i, nr_lr;
if (!base)
return;
writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
nr_lr = vcpu->arch.vgic_cpu.nr_lr;
for (i = 0; i < nr_lr; i++)
writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
}
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
#include "hyp.h"
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
#define read_gicreg(r) \
({ \
u64 reg; \
asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
reg; \
})
#define write_gicreg(v,r) \
do { \
u64 __val = (v); \
asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
} while (0)
/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val;
u32 max_lr_idx, nr_pri_bits;
/*
* Make sure stores to the GIC via the memory mapped interface
* are now visible to the system register interface.
*/
dsb(st);
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
write_gicreg(0, ICH_HCR_EL2);
val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val);
nr_pri_bits = vtr_to_nr_pri_bits(val);
switch (max_lr_idx) {
case 15:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
case 14:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
case 13:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
case 12:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
case 11:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
case 10:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
case 9:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
case 8:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
case 7:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
case 6:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
case 5:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
case 4:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
case 3:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
case 2:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
case 1:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
case 0:
cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
}
switch (nr_pri_bits) {
case 7:
cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
case 6:
cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
default:
cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
}
switch (nr_pri_bits) {
case 7:
cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
case 6:
cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
default:
cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
}
val = read_gicreg(ICC_SRE_EL2);
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
write_gicreg(1, ICC_SRE_EL1);
}
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val;
u32 max_lr_idx, nr_pri_bits;
/*
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
* Group0 interrupt (as generated in GICv2 mode) to be
* delivered as a FIQ to the guest, with potentially fatal
* consequences. So we must make sure that ICC_SRE_EL1 has
* been actually programmed with the value we want before
* starting to mess with the rest of the GIC.
*/
write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
isb();
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val);
nr_pri_bits = vtr_to_nr_pri_bits(val);
switch (nr_pri_bits) {
case 7:
write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
case 6:
write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
default:
write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
}
switch (nr_pri_bits) {
case 7:
write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
case 6:
write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
default:
write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
}
switch (max_lr_idx) {
case 15:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
case 14:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
case 13:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
case 12:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
case 11:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
case 10:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
case 9:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
case 8:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
case 7:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
case 6:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
case 5:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
case 4:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
case 3:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
case 2:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
case 1:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
case 0:
write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
}
/*
* Ensures that the above will have reached the
* (re)distributors. This ensure the guest will read the
* correct values from the memory-mapped interface.
*/
isb();
dsb(sy);
/*
* Prevent the guest from touching the GIC system registers if
* SRE isn't enabled for GICv3 emulation.
*/
if (!cpu_if->vgic_sre) {
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
ICC_SRE_EL2);
}
}
static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
{
return read_gicreg(ICH_VTR_EL2);
}
__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h> #include <asm/esr.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
...@@ -219,9 +220,9 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, ...@@ -219,9 +220,9 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
* All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
* hyp.S code switches between host and guest values in future. * hyp.S code switches between host and guest values in future.
*/ */
static inline void reg_to_dbg(struct kvm_vcpu *vcpu, static void reg_to_dbg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
u64 *dbg_reg) u64 *dbg_reg)
{ {
u64 val = p->regval; u64 val = p->regval;
...@@ -234,18 +235,18 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, ...@@ -234,18 +235,18 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
} }
static inline void dbg_to_reg(struct kvm_vcpu *vcpu, static void dbg_to_reg(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
u64 *dbg_reg) u64 *dbg_reg)
{ {
p->regval = *dbg_reg; p->regval = *dbg_reg;
if (p->is_32bit) if (p->is_32bit)
p->regval &= 0xffffffffUL; p->regval &= 0xffffffffUL;
} }
static inline bool trap_bvr(struct kvm_vcpu *vcpu, static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
...@@ -279,15 +280,15 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -279,15 +280,15 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0; return 0;
} }
static inline void reset_bvr(struct kvm_vcpu *vcpu, static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
} }
static inline bool trap_bcr(struct kvm_vcpu *vcpu, static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
...@@ -322,15 +323,15 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -322,15 +323,15 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0; return 0;
} }
static inline void reset_bcr(struct kvm_vcpu *vcpu, static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
} }
static inline bool trap_wvr(struct kvm_vcpu *vcpu, static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
...@@ -365,15 +366,15 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -365,15 +366,15 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0; return 0;
} }
static inline void reset_wvr(struct kvm_vcpu *vcpu, static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
} }
static inline bool trap_wcr(struct kvm_vcpu *vcpu, static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
...@@ -407,8 +408,8 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, ...@@ -407,8 +408,8 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return 0; return 0;
} }
static inline void reset_wcr(struct kvm_vcpu *vcpu, static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
} }
...@@ -722,9 +723,9 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, ...@@ -722,9 +723,9 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
* system is in. * system is in.
*/ */
static inline bool trap_xvr(struct kvm_vcpu *vcpu, static bool trap_xvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p, struct sys_reg_params *p,
const struct sys_reg_desc *rd) const struct sys_reg_desc *rd)
{ {
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
......
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/asm-offsets.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
.text
.pushsection .hyp.text, "ax"
/*
* Save the VGIC CPU state into memory
* x0: Register pointing to VCPU struct
* Do not corrupt x1!!!
*/
ENTRY(__save_vgic_v2_state)
__save_vgic_v2_state:
/* Get VGIC VCTRL base into x2 */
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
ldr x2, [x2, #KVM_VGIC_VCTRL]
kern_hyp_va x2
cbz x2, 2f // disabled
/* Compute the address of struct vgic_cpu */
add x3, x0, #VCPU_VGIC_CPU
/* Save all interesting registers */
ldr w5, [x2, #GICH_VMCR]
ldr w6, [x2, #GICH_MISR]
ldr w7, [x2, #GICH_EISR0]
ldr w8, [x2, #GICH_EISR1]
ldr w9, [x2, #GICH_ELRSR0]
ldr w10, [x2, #GICH_ELRSR1]
ldr w11, [x2, #GICH_APR]
CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 )
CPU_BE( rev w7, w7 )
CPU_BE( rev w8, w8 )
CPU_BE( rev w9, w9 )
CPU_BE( rev w10, w10 )
CPU_BE( rev w11, w11 )
str w5, [x3, #VGIC_V2_CPU_VMCR]
str w6, [x3, #VGIC_V2_CPU_MISR]
CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
str w11, [x3, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */
str wzr, [x2, #GICH_HCR]
/* Save list registers */
add x2, x2, #GICH_LR0
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_V2_CPU_LR
1: ldr w5, [x2], #4
CPU_BE( rev w5, w5 )
str w5, [x3], #4
sub w4, w4, #1
cbnz w4, 1b
2:
ret
ENDPROC(__save_vgic_v2_state)
/*
* Restore the VGIC CPU state from memory
* x0: Register pointing to VCPU struct
*/
ENTRY(__restore_vgic_v2_state)
__restore_vgic_v2_state:
/* Get VGIC VCTRL base into x2 */
ldr x2, [x0, #VCPU_KVM]
kern_hyp_va x2
ldr x2, [x2, #KVM_VGIC_VCTRL]
kern_hyp_va x2
cbz x2, 2f // disabled
/* Compute the address of struct vgic_cpu */
add x3, x0, #VCPU_VGIC_CPU
/* We only restore a minimal set of registers */
ldr w4, [x3, #VGIC_V2_CPU_HCR]
ldr w5, [x3, #VGIC_V2_CPU_VMCR]
ldr w6, [x3, #VGIC_V2_CPU_APR]
CPU_BE( rev w4, w4 )
CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 )
str w4, [x2, #GICH_HCR]
str w5, [x2, #GICH_VMCR]
str w6, [x2, #GICH_APR]
/* Restore list registers */
add x2, x2, #GICH_LR0
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_V2_CPU_LR
1: ldr w5, [x3], #4
CPU_BE( rev w5, w5 )
str w5, [x2], #4
sub w4, w4, #1
cbnz w4, 1b
2:
ret
ENDPROC(__restore_vgic_v2_state)
.popsection
/*
* Copyright (C) 2012,2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/asm-offsets.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
.text
.pushsection .hyp.text, "ax"
/*
* We store LRs in reverse order to let the CPU deal with streaming
* access. Use this macro to make it look saner...
*/
#define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8)
/*
* Save the VGIC CPU state into memory
* x0: Register pointing to VCPU struct
* Do not corrupt x1!!!
*/
.macro save_vgic_v3_state
// Compute the address of struct vgic_cpu
add x3, x0, #VCPU_VGIC_CPU
// Make sure stores to the GIC via the memory mapped interface
// are now visible to the system register interface
dsb st
// Save all interesting registers
mrs_s x5, ICH_VMCR_EL2
mrs_s x6, ICH_MISR_EL2
mrs_s x7, ICH_EISR_EL2
mrs_s x8, ICH_ELSR_EL2
str w5, [x3, #VGIC_V3_CPU_VMCR]
str w6, [x3, #VGIC_V3_CPU_MISR]
str w7, [x3, #VGIC_V3_CPU_EISR]
str w8, [x3, #VGIC_V3_CPU_ELRSR]
msr_s ICH_HCR_EL2, xzr
mrs_s x21, ICH_VTR_EL2
mvn w22, w21
ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
adr x24, 1f
add x24, x24, x23
br x24
1:
mrs_s x20, ICH_LR15_EL2
mrs_s x19, ICH_LR14_EL2
mrs_s x18, ICH_LR13_EL2
mrs_s x17, ICH_LR12_EL2
mrs_s x16, ICH_LR11_EL2
mrs_s x15, ICH_LR10_EL2
mrs_s x14, ICH_LR9_EL2
mrs_s x13, ICH_LR8_EL2
mrs_s x12, ICH_LR7_EL2
mrs_s x11, ICH_LR6_EL2
mrs_s x10, ICH_LR5_EL2
mrs_s x9, ICH_LR4_EL2
mrs_s x8, ICH_LR3_EL2
mrs_s x7, ICH_LR2_EL2
mrs_s x6, ICH_LR1_EL2
mrs_s x5, ICH_LR0_EL2
adr x24, 1f
add x24, x24, x23
br x24
1:
str x20, [x3, #LR_OFFSET(15)]
str x19, [x3, #LR_OFFSET(14)]
str x18, [x3, #LR_OFFSET(13)]
str x17, [x3, #LR_OFFSET(12)]
str x16, [x3, #LR_OFFSET(11)]
str x15, [x3, #LR_OFFSET(10)]
str x14, [x3, #LR_OFFSET(9)]
str x13, [x3, #LR_OFFSET(8)]
str x12, [x3, #LR_OFFSET(7)]
str x11, [x3, #LR_OFFSET(6)]
str x10, [x3, #LR_OFFSET(5)]
str x9, [x3, #LR_OFFSET(4)]
str x8, [x3, #LR_OFFSET(3)]
str x7, [x3, #LR_OFFSET(2)]
str x6, [x3, #LR_OFFSET(1)]
str x5, [x3, #LR_OFFSET(0)]
tbnz w21, #29, 6f // 6 bits
tbz w21, #30, 5f // 5 bits
// 7 bits
mrs_s x20, ICH_AP0R3_EL2
str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
mrs_s x19, ICH_AP0R2_EL2
str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
6: mrs_s x18, ICH_AP0R1_EL2
str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
5: mrs_s x17, ICH_AP0R0_EL2
str w17, [x3, #VGIC_V3_CPU_AP0R]
tbnz w21, #29, 6f // 6 bits
tbz w21, #30, 5f // 5 bits
// 7 bits
mrs_s x20, ICH_AP1R3_EL2
str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
mrs_s x19, ICH_AP1R2_EL2
str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
6: mrs_s x18, ICH_AP1R1_EL2
str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
5: mrs_s x17, ICH_AP1R0_EL2
str w17, [x3, #VGIC_V3_CPU_AP1R]
// Restore SRE_EL1 access and re-enable SRE at EL1.
mrs_s x5, ICC_SRE_EL2
orr x5, x5, #ICC_SRE_EL2_ENABLE
msr_s ICC_SRE_EL2, x5
isb
mov x5, #1
msr_s ICC_SRE_EL1, x5
.endm
/*
* Restore the VGIC CPU state from memory
* x0: Register pointing to VCPU struct
*/
.macro restore_vgic_v3_state
// Compute the address of struct vgic_cpu
add x3, x0, #VCPU_VGIC_CPU
// Restore all interesting registers
ldr w4, [x3, #VGIC_V3_CPU_HCR]
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
ldr w25, [x3, #VGIC_V3_CPU_SRE]
msr_s ICC_SRE_EL1, x25
// make sure SRE is valid before writing the other registers
isb
msr_s ICH_HCR_EL2, x4
msr_s ICH_VMCR_EL2, x5
mrs_s x21, ICH_VTR_EL2
tbnz w21, #29, 6f // 6 bits
tbz w21, #30, 5f // 5 bits
// 7 bits
ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
msr_s ICH_AP1R3_EL2, x20
ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
msr_s ICH_AP1R2_EL2, x19
6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
msr_s ICH_AP1R1_EL2, x18
5: ldr w17, [x3, #VGIC_V3_CPU_AP1R]
msr_s ICH_AP1R0_EL2, x17
tbnz w21, #29, 6f // 6 bits
tbz w21, #30, 5f // 5 bits
// 7 bits
ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
msr_s ICH_AP0R3_EL2, x20
ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
msr_s ICH_AP0R2_EL2, x19
6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
msr_s ICH_AP0R1_EL2, x18
5: ldr w17, [x3, #VGIC_V3_CPU_AP0R]
msr_s ICH_AP0R0_EL2, x17
and w22, w21, #0xf
mvn w22, w21
ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4
adr x24, 1f
add x24, x24, x23
br x24
1:
ldr x20, [x3, #LR_OFFSET(15)]
ldr x19, [x3, #LR_OFFSET(14)]
ldr x18, [x3, #LR_OFFSET(13)]
ldr x17, [x3, #LR_OFFSET(12)]
ldr x16, [x3, #LR_OFFSET(11)]
ldr x15, [x3, #LR_OFFSET(10)]
ldr x14, [x3, #LR_OFFSET(9)]
ldr x13, [x3, #LR_OFFSET(8)]
ldr x12, [x3, #LR_OFFSET(7)]
ldr x11, [x3, #LR_OFFSET(6)]
ldr x10, [x3, #LR_OFFSET(5)]
ldr x9, [x3, #LR_OFFSET(4)]
ldr x8, [x3, #LR_OFFSET(3)]
ldr x7, [x3, #LR_OFFSET(2)]
ldr x6, [x3, #LR_OFFSET(1)]
ldr x5, [x3, #LR_OFFSET(0)]
adr x24, 1f
add x24, x24, x23
br x24
1:
msr_s ICH_LR15_EL2, x20
msr_s ICH_LR14_EL2, x19
msr_s ICH_LR13_EL2, x18
msr_s ICH_LR12_EL2, x17
msr_s ICH_LR11_EL2, x16
msr_s ICH_LR10_EL2, x15
msr_s ICH_LR9_EL2, x14
msr_s ICH_LR8_EL2, x13
msr_s ICH_LR7_EL2, x12
msr_s ICH_LR6_EL2, x11
msr_s ICH_LR5_EL2, x10
msr_s ICH_LR4_EL2, x9
msr_s ICH_LR3_EL2, x8
msr_s ICH_LR2_EL2, x7
msr_s ICH_LR1_EL2, x6
msr_s ICH_LR0_EL2, x5
// Ensure that the above will have reached the
// (re)distributors. This ensure the guest will read
// the correct values from the memory-mapped interface.
isb
dsb sy
// Prevent the guest from touching the GIC system registers
// if SRE isn't enabled for GICv3 emulation
cbnz x25, 1f
mrs_s x5, ICC_SRE_EL2
and x5, x5, #~ICC_SRE_EL2_ENABLE
msr_s ICC_SRE_EL2, x5
1:
.endm
ENTRY(__save_vgic_v3_state)
save_vgic_v3_state
ret
ENDPROC(__save_vgic_v3_state)
ENTRY(__restore_vgic_v3_state)
restore_vgic_v3_state
ret
ENDPROC(__restore_vgic_v3_state)
ENTRY(__vgic_v3_get_ich_vtr_el2)
mrs_s x0, ICH_VTR_EL2
ret
ENDPROC(__vgic_v3_get_ich_vtr_el2)
.popsection
...@@ -50,6 +50,10 @@ ...@@ -50,6 +50,10 @@
#define KVM_NR_IRQCHIPS 1 #define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 256 #define KVM_IRQCHIP_NUM_PINS 256
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG 8
#define KVM_REQ_EPR_EXIT 9
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
......
...@@ -314,16 +314,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) ...@@ -314,16 +314,10 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
{ {
int r; struct kvm_vcpu *ret;
struct kvm_vcpu *v, *ret = NULL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_for_each_vcpu(r, v, kvm) { ret = kvm_get_vcpu_by_id(kvm, id);
if (v->vcpu_id == id) {
ret = v;
break;
}
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; return ret;
} }
......
...@@ -512,7 +512,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) ...@@ -512,7 +512,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
put_page(hpage); put_page(hpage);
} }
static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
{ {
ulong mp_pa = vcpu->arch.magic_page_pa; ulong mp_pa = vcpu->arch.magic_page_pa;
...@@ -521,7 +521,7 @@ static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) ...@@ -521,7 +521,7 @@ static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
gpa &= ~0xFFFULL; gpa &= ~0xFFFULL;
if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
return 1; return true;
} }
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
......
...@@ -104,6 +104,9 @@ ...@@ -104,6 +104,9 @@
#define HWCAP_S390_TE 1024 #define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048 #define HWCAP_S390_VXRS 2048
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
/* /*
* These are used to set parameters in the core dumps. * These are used to set parameters in the core dumps.
*/ */
...@@ -169,6 +172,10 @@ extern unsigned int vdso_enabled; ...@@ -169,6 +172,10 @@ extern unsigned int vdso_enabled;
extern unsigned long elf_hwcap; extern unsigned long elf_hwcap;
#define ELF_HWCAP (elf_hwcap) #define ELF_HWCAP (elf_hwcap)
/* Internal hardware capabilities, not exposed via elf */
extern unsigned long int_hwcap;
/* This yields a string that ld.so will use to load implementation /* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo. intent than poking at uname or /proc/cpuinfo.
......
...@@ -25,7 +25,9 @@ ...@@ -25,7 +25,9 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/isc.h> #include <asm/isc.h>
#define KVM_MAX_VCPUS 64 #define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
#define KVM_USER_MEM_SLOTS 32 #define KVM_USER_MEM_SLOTS 32
/* /*
...@@ -37,12 +39,41 @@ ...@@ -37,12 +39,41 @@
#define KVM_IRQCHIP_NUM_PINS 4096 #define KVM_IRQCHIP_NUM_PINS 4096
#define KVM_HALT_POLL_NS_DEFAULT 0 #define KVM_HALT_POLL_NS_DEFAULT 0
/* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS 8
#define KVM_REQ_DISABLE_IBS 9
#define SIGP_CTRL_C 0x80 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f #define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry { union bsca_sigp_ctrl {
__u8 value;
struct {
__u8 c : 1;
__u8 r : 1;
__u8 scn : 6;
};
} __packed;
union esca_sigp_ctrl {
__u16 value;
struct {
__u8 c : 1;
__u8 reserved: 7;
__u8 scn;
};
} __packed;
struct esca_entry {
union esca_sigp_ctrl sigp_ctrl;
__u16 reserved1[3];
__u64 sda;
__u64 reserved2[6];
} __packed;
struct bsca_entry {
__u8 reserved0; __u8 reserved0;
__u8 sigp_ctrl; union bsca_sigp_ctrl sigp_ctrl;
__u16 reserved[3]; __u16 reserved[3];
__u64 sda; __u64 sda;
__u64 reserved2[2]; __u64 reserved2[2];
...@@ -57,14 +88,22 @@ union ipte_control { ...@@ -57,14 +88,22 @@ union ipte_control {
}; };
}; };
struct sca_block { struct bsca_block {
union ipte_control ipte_control; union ipte_control ipte_control;
__u64 reserved[5]; __u64 reserved[5];
__u64 mcn; __u64 mcn;
__u64 reserved2; __u64 reserved2;
struct sca_entry cpu[64]; struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
} __attribute__((packed)); } __attribute__((packed));
struct esca_block {
union ipte_control ipte_control;
__u64 reserved1[7];
__u64 mcn[4];
__u64 reserved2[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
} __packed;
#define CPUSTAT_STOPPED 0x80000000 #define CPUSTAT_STOPPED 0x80000000
#define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_WAIT 0x10000000
#define CPUSTAT_ECALL_PEND 0x08000000 #define CPUSTAT_ECALL_PEND 0x08000000
...@@ -182,7 +221,8 @@ struct kvm_s390_sie_block { ...@@ -182,7 +221,8 @@ struct kvm_s390_sie_block {
__u64 pp; /* 0x01de */ __u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */ __u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */ __u64 itdba; /* 0x01e8 */
__u8 reserved1f0[16]; /* 0x01f0 */ __u64 riccbd; /* 0x01f0 */
__u8 reserved1f8[8]; /* 0x01f8 */
} __attribute__((packed)); } __attribute__((packed));
struct kvm_s390_itdb { struct kvm_s390_itdb {
...@@ -585,11 +625,14 @@ struct kvm_s390_crypto_cb { ...@@ -585,11 +625,14 @@ struct kvm_s390_crypto_cb {
}; };
struct kvm_arch{ struct kvm_arch{
struct sca_block *sca; void *sca;
int use_esca;
rwlock_t sca_lock;
debug_info_t *dbf; debug_info_t *dbf;
struct kvm_s390_float_interrupt float_int; struct kvm_s390_float_interrupt float_int;
struct kvm_device *flic; struct kvm_device *flic;
struct gmap *gmap; struct gmap *gmap;
unsigned long mem_limit;
int css_support; int css_support;
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
......
...@@ -29,7 +29,10 @@ struct sclp_ipl_info { ...@@ -29,7 +29,10 @@ struct sclp_ipl_info {
struct sclp_core_entry { struct sclp_core_entry {
u8 core_id; u8 core_id;
u8 reserved0[2]; u8 reserved0;
u8 : 4;
u8 sief2 : 1;
u8 : 3;
u8 : 3; u8 : 3;
u8 siif : 1; u8 siif : 1;
u8 sigpif : 1; u8 sigpif : 1;
...@@ -53,6 +56,9 @@ struct sclp_info { ...@@ -53,6 +56,9 @@ struct sclp_info {
unsigned char has_sigpif : 1; unsigned char has_sigpif : 1;
unsigned char has_core_type : 1; unsigned char has_core_type : 1;
unsigned char has_sprp : 1; unsigned char has_sprp : 1;
unsigned char has_hvs : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
unsigned int ibc; unsigned int ibc;
unsigned int mtid; unsigned int mtid;
unsigned int mtid_cp; unsigned int mtid_cp;
......
...@@ -66,6 +66,8 @@ struct kvm_s390_io_adapter_req { ...@@ -66,6 +66,8 @@ struct kvm_s390_io_adapter_req {
#define KVM_S390_VM_MEM_CLR_CMMA 1 #define KVM_S390_VM_MEM_CLR_CMMA 1
#define KVM_S390_VM_MEM_LIMIT_SIZE 2 #define KVM_S390_VM_MEM_LIMIT_SIZE 2
#define KVM_S390_NO_MEM_LIMIT U64_MAX
/* kvm attributes for KVM_S390_VM_TOD */ /* kvm attributes for KVM_S390_VM_TOD */
#define KVM_S390_VM_TOD_LOW 0 #define KVM_S390_VM_TOD_LOW 0
#define KVM_S390_VM_TOD_HIGH 1 #define KVM_S390_VM_TOD_HIGH 1
...@@ -151,6 +153,7 @@ struct kvm_guest_debug_arch { ...@@ -151,6 +153,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_ARCH0 (1UL << 4) #define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5) #define KVM_SYNC_PFAULT (1UL << 5)
#define KVM_SYNC_VRS (1UL << 6) #define KVM_SYNC_VRS (1UL << 6)
#define KVM_SYNC_RICCB (1UL << 7)
/* definition of registers in kvm_run */ /* definition of registers in kvm_run */
struct kvm_sync_regs { struct kvm_sync_regs {
__u64 prefix; /* prefix register */ __u64 prefix; /* prefix register */
...@@ -168,6 +171,8 @@ struct kvm_sync_regs { ...@@ -168,6 +171,8 @@ struct kvm_sync_regs {
__u64 vrs[32][2]; /* vector registers */ __u64 vrs[32][2]; /* vector registers */
__u8 reserved[512]; /* for future vector expansion */ __u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* only valid with vector registers */ __u32 fpc; /* only valid with vector registers */
__u8 padding[52]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
}; };
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
......
...@@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te", "vx" "edat", "etf3eh", "highgprs", "te", "vx"
}; };
static const char * const int_hwcap_str[] = {
"sie"
};
unsigned long n = (unsigned long) v - 1; unsigned long n = (unsigned long) v - 1;
int i; int i;
...@@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++) for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i))) if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]); seq_printf(m, "%s ", hwcap_str[i]);
for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
seq_printf(m, "%s ", int_hwcap_str[i]);
seq_puts(m, "\n"); seq_puts(m, "\n");
show_cacheinfo(m); show_cacheinfo(m);
} }
......
...@@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq); ...@@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq);
unsigned long elf_hwcap __read_mostly = 0; unsigned long elf_hwcap __read_mostly = 0;
char elf_platform[ELF_PLATFORM_SIZE]; char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
int __initdata memory_end_set; int __initdata memory_end_set;
unsigned long __initdata memory_end; unsigned long __initdata memory_end;
unsigned long __initdata max_physmem_end; unsigned long __initdata max_physmem_end;
...@@ -793,6 +795,13 @@ static int __init setup_hwcaps(void) ...@@ -793,6 +795,13 @@ static int __init setup_hwcaps(void)
strcpy(elf_platform, "z13"); strcpy(elf_platform, "z13");
break; break;
} }
/*
* Virtualization support HWCAP_INT_SIE is bit 0.
*/
if (sclp.has_sief2)
int_hwcap |= HWCAP_INT_SIE;
return 0; return 0;
} }
arch_initcall(setup_hwcaps); arch_initcall(setup_hwcaps);
......
...@@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) ...@@ -155,10 +155,8 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tcpu; struct kvm_vcpu *tcpu;
int tid; int tid;
int i;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++; vcpu->stat.diagnose_9c++;
...@@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu) ...@@ -167,12 +165,9 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
if (tid == vcpu->vcpu_id) if (tid == vcpu->vcpu_id)
return 0; return 0;
kvm_for_each_vcpu(i, tcpu, kvm) tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
if (tcpu->vcpu_id == tid) { if (tcpu)
kvm_vcpu_yield_to(tcpu); kvm_vcpu_yield_to(tcpu);
break;
}
return 0; return 0;
} }
......
...@@ -259,10 +259,14 @@ struct aste { ...@@ -259,10 +259,14 @@ struct aste {
int ipte_lock_held(struct kvm_vcpu *vcpu) int ipte_lock_held(struct kvm_vcpu *vcpu)
{ {
union ipte_control *ic = &vcpu->kvm->arch.sca->ipte_control; if (vcpu->arch.sie_block->eca & 1) {
int rc;
if (vcpu->arch.sie_block->eca & 1) read_lock(&vcpu->kvm->arch.sca_lock);
return ic->kh != 0; rc = kvm_s390_get_ipte_control(vcpu->kvm)->kh != 0;
read_unlock(&vcpu->kvm->arch.sca_lock);
return rc;
}
return vcpu->kvm->arch.ipte_lock_count != 0; return vcpu->kvm->arch.ipte_lock_count != 0;
} }
...@@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu) ...@@ -274,16 +278,20 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count++; vcpu->kvm->arch.ipte_lock_count++;
if (vcpu->kvm->arch.ipte_lock_count > 1) if (vcpu->kvm->arch.ipte_lock_count > 1)
goto out; goto out;
ic = &vcpu->kvm->arch.sca->ipte_control; retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
while (old.k) { if (old.k) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched(); cond_resched();
old = READ_ONCE(*ic); goto retry;
} }
new = old; new = old;
new.k = 1; new.k = 1;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
out: out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex); mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
} }
...@@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu) ...@@ -296,12 +304,14 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
vcpu->kvm->arch.ipte_lock_count--; vcpu->kvm->arch.ipte_lock_count--;
if (vcpu->kvm->arch.ipte_lock_count) if (vcpu->kvm->arch.ipte_lock_count)
goto out; goto out;
ic = &vcpu->kvm->arch.sca->ipte_control; read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
new = old; new = old;
new.k = 0; new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
out: out:
mutex_unlock(&vcpu->kvm->arch.ipte_mutex); mutex_unlock(&vcpu->kvm->arch.ipte_mutex);
...@@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) ...@@ -311,24 +321,29 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
{ {
union ipte_control old, new, *ic; union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control; retry:
read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
while (old.kg) { if (old.kg) {
read_unlock(&vcpu->kvm->arch.sca_lock);
cond_resched(); cond_resched();
old = READ_ONCE(*ic); goto retry;
} }
new = old; new = old;
new.k = 1; new.k = 1;
new.kh++; new.kh++;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
} }
static void ipte_unlock_siif(struct kvm_vcpu *vcpu) static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
{ {
union ipte_control old, new, *ic; union ipte_control old, new, *ic;
ic = &vcpu->kvm->arch.sca->ipte_control; read_lock(&vcpu->kvm->arch.sca_lock);
ic = kvm_s390_get_ipte_control(vcpu->kvm);
do { do {
old = READ_ONCE(*ic); old = READ_ONCE(*ic);
new = old; new = old;
...@@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) ...@@ -336,6 +351,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
if (!new.kh) if (!new.kh)
new.k = 0; new.k = 0;
} while (cmpxchg(&ic->val, old.val, new.val) != old.val); } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
read_unlock(&vcpu->kvm->arch.sca_lock);
if (!new.kh) if (!new.kh)
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
} }
......
...@@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc) ...@@ -54,9 +54,6 @@ void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
static int handle_noop(struct kvm_vcpu *vcpu) static int handle_noop(struct kvm_vcpu *vcpu)
{ {
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case 0x0:
vcpu->stat.exit_null++;
break;
case 0x10: case 0x10:
vcpu->stat.exit_external_request++; vcpu->stat.exit_external_request++;
break; break;
...@@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) ...@@ -338,8 +335,10 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{ {
if (kvm_is_ucontrol(vcpu->kvm))
return -EOPNOTSUPP;
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case 0x00:
case 0x10: case 0x10:
case 0x18: case 0x18:
return handle_noop(vcpu); return handle_noop(vcpu);
......
...@@ -34,6 +34,106 @@ ...@@ -34,6 +34,106 @@
#define PFAULT_DONE 0x0680 #define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00 #define VIRTIO_PARAM 0x0d00
/* handle external calls via sigp interpretation facility */
static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
int c, scn;
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl sigp_ctrl =
sca->cpu[vcpu->vcpu_id].sigp_ctrl;
c = sigp_ctrl.c;
scn = sigp_ctrl.scn;
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (src_id)
*src_id = scn;
return c;
}
static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
new_val.scn = src_id;
new_val.c = 1;
old_val.c = 0;
expect = old_val.value;
rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
if (rc != expect) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
union esca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union esca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
} else {
struct bsca_block *sca = vcpu->kvm->arch.sca;
union bsca_sigp_ctrl *sigp_ctrl =
&(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
union bsca_sigp_ctrl old = *sigp_ctrl;
expect = old.value;
rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
}
read_unlock(&vcpu->kvm->arch.sca_lock);
WARN_ON(rc != expect); /* cannot clear? */
}
int psw_extint_disabled(struct kvm_vcpu *vcpu) int psw_extint_disabled(struct kvm_vcpu *vcpu)
{ {
return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
...@@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = { ...@@ -792,13 +892,11 @@ static const deliver_irq_t deliver_irq_funcs[] = {
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
if (!sclp.has_sigpif) if (!sclp.has_sigpif)
return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
return (sigp_ctrl & SIGP_CTRL_C) && return sca_ext_call_pending(vcpu, NULL);
(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
} }
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
...@@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -909,9 +1007,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
memset(&li->irq, 0, sizeof(li->irq)); memset(&li->irq, 0, sizeof(li->irq));
spin_unlock(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ sca_clear_ext_call(vcpu);
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
} }
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
...@@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1003,21 +1099,6 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0; return 0;
} }
static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
{
unsigned char new_val, old_val;
uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
old_val = *sigp_ctrl & ~SIGP_CTRL_C;
if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
/* another external call is pending */
return -EBUSY;
}
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0;
}
static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
...@@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1034,7 +1115,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return -EINVAL; return -EINVAL;
if (sclp.has_sigpif) if (sclp.has_sigpif)
return __inject_extcall_sigpif(vcpu, src_id); return sca_inject_ext_call(vcpu, src_id);
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
...@@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li, ...@@ -2203,7 +2284,7 @@ static void store_local_irq(struct kvm_s390_local_interrupt *li,
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
{ {
uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; int scn;
unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
unsigned long pending_irqs; unsigned long pending_irqs;
...@@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len) ...@@ -2243,14 +2324,12 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
} }
} }
if ((sigp_ctrl & SIGP_CTRL_C) && if (sca_ext_call_pending(vcpu, &scn)) {
(atomic_read(&vcpu->arch.sie_block->cpuflags) &
CPUSTAT_ECALL_PEND)) {
if (n + sizeof(irq) > len) if (n + sizeof(irq) > len)
return -ENOBUFS; return -ENOBUFS;
memset(&irq, 0, sizeof(irq)); memset(&irq, 0, sizeof(irq));
irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK; irq.u.extcall.code = scn;
if (copy_to_user(&buf[n], &irq, sizeof(irq))) if (copy_to_user(&buf[n], &irq, sizeof(irq)))
return -EFAULT; return -EFAULT;
n += sizeof(irq); n += sizeof(irq);
......
This diff is collapsed.
...@@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); ...@@ -340,4 +340,11 @@ void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{
struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
return &sca->ipte_control;
}
#endif #endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment