Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0098fc39
Commit
0098fc39
authored
Mar 15, 2013
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm-fixes' of
git://github.com/columbia/linux-kvm-arm
into devel-stable
parents
73a09d21
f42798c6
Changes
19
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
585 additions
and
387 deletions
+585
-387
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_arm.h
+4
-0
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/kvm_asm.h
+1
-1
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_emulate.h
+100
-7
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_host.h
+34
-8
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/kvm_mmu.h
+67
-0
arch/arm/include/asm/kvm_vgic.h
arch/arm/include/asm/kvm_vgic.h
+0
-1
arch/arm/include/uapi/asm/kvm.h
arch/arm/include/uapi/asm/kvm.h
+6
-6
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/asm-offsets.c
+4
-4
arch/arm/kvm/Makefile
arch/arm/kvm/Makefile
+1
-1
arch/arm/kvm/arm.c
arch/arm/kvm/arm.c
+4
-190
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.c
+14
-14
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc.h
+2
-2
arch/arm/kvm/emulate.c
arch/arm/kvm/emulate.c
+52
-23
arch/arm/kvm/guest.c
arch/arm/kvm/guest.c
+17
-0
arch/arm/kvm/handle_exit.c
arch/arm/kvm/handle_exit.c
+164
-0
arch/arm/kvm/interrupts.S
arch/arm/kvm/interrupts.S
+8
-5
arch/arm/kvm/mmio.c
arch/arm/kvm/mmio.c
+18
-28
arch/arm/kvm/mmu.c
arch/arm/kvm/mmu.c
+88
-96
arch/arm/kvm/vgic.c
arch/arm/kvm/vgic.c
+1
-1
No files found.
arch/arm/include/asm/kvm_arm.h
View file @
0098fc39
...
...
@@ -211,4 +211,8 @@
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
#define HSR_DABT_S1PTW (1U << 7)
#define HSR_DABT_CM (1U << 8)
#define HSR_DABT_EA (1U << 9)
#endif
/* __ARM_KVM_ARM_H__ */
arch/arm/include/asm/kvm_asm.h
View file @
0098fc39
...
...
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
);
extern
void
__kvm_flush_vm_context
(
void
);
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
);
extern
void
__kvm_tlb_flush_vmid
_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
);
extern
int
__kvm_vcpu_run
(
struct
kvm_vcpu
*
vcpu
);
#endif
...
...
arch/arm/include/asm/kvm_emulate.h
View file @
0098fc39
...
...
@@ -22,11 +22,12 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>
u
32
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
);
u
32
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
);
u
nsigned
long
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
);
u
nsigned
long
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_handle_wfi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
);
bool
kvm_condition_valid
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_skip_instr
(
struct
kvm_vcpu
*
vcpu
,
bool
is_wide_instr
);
void
kvm_inject_undefined
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_inject_dabt
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
addr
);
...
...
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
return
1
;
}
static
inline
u
32
*
vcpu_pc
(
struct
kvm_vcpu
*
vcpu
)
static
inline
u
nsigned
long
*
vcpu_pc
(
struct
kvm_vcpu
*
vcpu
)
{
return
(
u32
*
)
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_pc
;
return
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_pc
;
}
static
inline
u
32
*
vcpu_cpsr
(
struct
kvm_vcpu
*
vcpu
)
static
inline
u
nsigned
long
*
vcpu_cpsr
(
struct
kvm_vcpu
*
vcpu
)
{
return
(
u32
*
)
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_cpsr
;
return
&
vcpu
->
arch
.
regs
.
usr_regs
.
ARM_cpsr
;
}
static
inline
void
vcpu_set_thumb
(
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
return
reg
==
15
;
}
static
inline
u32
kvm_vcpu_get_hsr
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
fault
.
hsr
;
}
static
inline
unsigned
long
kvm_vcpu_get_hfar
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
fault
.
hxfar
;
}
static
inline
phys_addr_t
kvm_vcpu_get_fault_ipa
(
struct
kvm_vcpu
*
vcpu
)
{
return
((
phys_addr_t
)
vcpu
->
arch
.
fault
.
hpfar
&
HPFAR_MASK
)
<<
8
;
}
static
inline
unsigned
long
kvm_vcpu_get_hyp_pc
(
struct
kvm_vcpu
*
vcpu
)
{
return
vcpu
->
arch
.
fault
.
hyp_pc
;
}
static
inline
bool
kvm_vcpu_dabt_isvalid
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_ISV
;
}
static
inline
bool
kvm_vcpu_dabt_iswrite
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_WNR
;
}
static
inline
bool
kvm_vcpu_dabt_issext
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_SSE
;
}
static
inline
int
kvm_vcpu_dabt_get_rd
(
struct
kvm_vcpu
*
vcpu
)
{
return
(
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_SRT_MASK
)
>>
HSR_SRT_SHIFT
;
}
static
inline
bool
kvm_vcpu_dabt_isextabt
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_DABT_EA
;
}
static
inline
bool
kvm_vcpu_dabt_iss1tw
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_DABT_S1PTW
;
}
/* Get Access Size from a data abort */
static
inline
int
kvm_vcpu_dabt_get_as
(
struct
kvm_vcpu
*
vcpu
)
{
switch
((
kvm_vcpu_get_hsr
(
vcpu
)
>>
22
)
&
0x3
)
{
case
0
:
return
1
;
case
1
:
return
2
;
case
2
:
return
4
;
default:
kvm_err
(
"Hardware is weird: SAS 0b11 is reserved
\n
"
);
return
-
EFAULT
;
}
}
/* This one is not specific to Data Abort */
static
inline
bool
kvm_vcpu_trap_il_is32bit
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_IL
;
}
static
inline
u8
kvm_vcpu_trap_get_class
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
>>
HSR_EC_SHIFT
;
}
static
inline
bool
kvm_vcpu_trap_is_iabt
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_trap_get_class
(
vcpu
)
==
HSR_EC_IABT
;
}
static
inline
u8
kvm_vcpu_trap_get_fault
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_FSC_TYPE
;
}
static
inline
u32
kvm_vcpu_hvc_get_imm
(
struct
kvm_vcpu
*
vcpu
)
{
return
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_HVC_IMM_MASK
;
}
#endif
/* __ARM_KVM_EMULATE_H__ */
arch/arm/include/asm/kvm_host.h
View file @
0098fc39
...
...
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
void
*
objects
[
KVM_NR_MEM_OBJS
];
};
struct
kvm_vcpu_fault_info
{
u32
hsr
;
/* Hyp Syndrome Register */
u32
hxfar
;
/* Hyp Data/Inst. Fault Address Register */
u32
hpfar
;
/* Hyp IPA Fault Address Register */
u32
hyp_pc
;
/* PC when exception was taken from Hyp mode */
};
typedef
struct
vfp_hard_struct
kvm_kernel_vfp_t
;
struct
kvm_vcpu_arch
{
struct
kvm_regs
regs
;
...
...
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
u32
midr
;
/* Exception Information */
u32
hsr
;
/* Hyp Syndrome Register */
u32
hxfar
;
/* Hyp Data/Inst Fault Address Register */
u32
hpfar
;
/* Hyp IPA Fault Address Register */
struct
kvm_vcpu_fault_info
fault
;
/* Floating point registers (VFP and Advanced SIMD/NEON) */
struct
vfp_hard_struc
t
vfp_guest
;
struct
vfp_hard_struc
t
*
vfp_host
;
kvm_kernel_vfp_
t
vfp_guest
;
kvm_kernel_vfp_
t
*
vfp_host
;
/* VGIC state */
struct
vgic_cpu
vgic_cpu
;
...
...
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
/* Interrupt related fields */
u32
irq_lines
;
/* IRQ and FIQ levels */
/* Hyp exception information */
u32
hyp_pc
;
/* PC when exception was taken from Hyp mode */
/* Cache some mmu pages needed inside spinlock regions */
struct
kvm_mmu_memory_cache
mmu_page_cache
;
...
...
@@ -181,4 +185,26 @@ struct kvm_one_reg;
int
kvm_arm_coproc_get_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
kvm_arm_coproc_set_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
);
int
handle_exit
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
,
int
exception_index
);
static
inline
void
__cpu_init_hyp_mode
(
unsigned
long
long
pgd_ptr
,
unsigned
long
hyp_stack_ptr
,
unsigned
long
vector_ptr
)
{
unsigned
long
pgd_low
,
pgd_high
;
pgd_low
=
(
pgd_ptr
&
((
1ULL
<<
32
)
-
1
));
pgd_high
=
(
pgd_ptr
>>
32ULL
);
/*
* Call initialization code, and switch to the full blown
* HYP code. The init code doesn't need to preserve these registers as
* r1-r3 and r12 are already callee save according to the AAPCS.
* Note that we slightly misuse the prototype by casing the pgd_low to
* a void *.
*/
kvm_call_hyp
((
void
*
)
pgd_low
,
pgd_high
,
hyp_stack_ptr
,
vector_ptr
);
}
#endif
/* __ARM_KVM_HOST_H__ */
arch/arm/include/asm/kvm_mmu.h
View file @
0098fc39
...
...
@@ -19,6 +19,18 @@
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/idmap.h>
/*
* We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1).
*/
#define HYP_PAGE_OFFSET_MASK (~0UL)
#define HYP_PAGE_OFFSET PAGE_OFFSET
#define KERN_TO_HYP(kva) (kva)
int
create_hyp_mappings
(
void
*
from
,
void
*
to
);
int
create_hyp_io_mappings
(
void
*
from
,
void
*
to
,
phys_addr_t
);
void
free_hyp_pmds
(
void
);
...
...
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
int
kvm_mmu_init
(
void
);
void
kvm_clear_hyp_idmap
(
void
);
static
inline
void
kvm_set_pte
(
pte_t
*
pte
,
pte_t
new_pte
)
{
pte_val
(
*
pte
)
=
new_pte
;
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
*/
flush_pmd_entry
(
pte
);
}
static
inline
bool
kvm_is_write_fault
(
unsigned
long
hsr
)
{
unsigned
long
hsr_ec
=
hsr
>>
HSR_EC_SHIFT
;
...
...
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
return
true
;
}
static
inline
void
kvm_clean_pgd
(
pgd_t
*
pgd
)
{
clean_dcache_area
(
pgd
,
PTRS_PER_S2_PGD
*
sizeof
(
pgd_t
));
}
static
inline
void
kvm_clean_pmd_entry
(
pmd_t
*
pmd
)
{
clean_pmd_entry
(
pmd
);
}
static
inline
void
kvm_clean_pte
(
pte_t
*
pte
)
{
clean_pte_table
(
pte
);
}
static
inline
void
kvm_set_s2pte_writable
(
pte_t
*
pte
)
{
pte_val
(
*
pte
)
|=
L_PTE_S2_RDWR
;
}
struct
kvm
;
static
inline
void
coherent_icache_guest_page
(
struct
kvm
*
kvm
,
gfn_t
gfn
)
{
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if
(
icache_is_pipt
())
{
unsigned
long
hva
=
gfn_to_hva
(
kvm
,
gfn
);
__cpuc_coherent_user_range
(
hva
,
hva
+
PAGE_SIZE
);
}
else
if
(
!
icache_is_vivt_asid_tagged
())
{
/* any kind of VIPT cache */
__flush_icache_all
();
}
}
#endif
/* __ARM_KVM_MMU_H__ */
arch/arm/include/asm/kvm_vgic.h
View file @
0098fc39
...
...
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
...
...
arch/arm/include/uapi/asm/kvm.h
View file @
0098fc39
...
...
@@ -53,12 +53,12 @@
#define KVM_ARM_FIQ_spsr fiq_regs[7]
struct
kvm_regs
{
struct
pt_regs
usr_regs
;
/* R0_usr - R14_usr, PC, CPSR */
__u32
svc_regs
[
3
];
/* SP_svc, LR_svc, SPSR_svc */
__u32
abt_regs
[
3
];
/* SP_abt, LR_abt, SPSR_abt */
__u32
und_regs
[
3
];
/* SP_und, LR_und, SPSR_und */
__u32
irq_regs
[
3
];
/* SP_irq, LR_irq, SPSR_irq */
__u32
fiq_regs
[
8
];
/* R8_fiq - R14_fiq, SPSR_fiq */
struct
pt_regs
usr_regs
;
/* R0_usr - R14_usr, PC, CPSR */
unsigned
long
svc_regs
[
3
];
/* SP_svc, LR_svc, SPSR_svc */
unsigned
long
abt_regs
[
3
];
/* SP_abt, LR_abt, SPSR_abt */
unsigned
long
und_regs
[
3
];
/* SP_und, LR_und, SPSR_und */
unsigned
long
irq_regs
[
3
];
/* SP_irq, LR_irq, SPSR_irq */
unsigned
long
fiq_regs
[
8
];
/* R8_fiq - R14_fiq, SPSR_fiq */
};
/* Supported Processor Types */
...
...
arch/arm/kernel/asm-offsets.c
View file @
0098fc39
...
...
@@ -165,10 +165,10 @@ int main(void)
DEFINE
(
VCPU_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
regs
.
usr_regs
.
ARM_pc
));
DEFINE
(
VCPU_CPSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
regs
.
usr_regs
.
ARM_cpsr
));
DEFINE
(
VCPU_IRQ_LINES
,
offsetof
(
struct
kvm_vcpu
,
arch
.
irq_lines
));
DEFINE
(
VCPU_HSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hsr
));
DEFINE
(
VCPU_HxFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hxfar
));
DEFINE
(
VCPU_HPFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hpfar
));
DEFINE
(
VCPU_HYP_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hyp_pc
));
DEFINE
(
VCPU_HSR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hsr
));
DEFINE
(
VCPU_HxFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hxfar
));
DEFINE
(
VCPU_HPFAR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hpfar
));
DEFINE
(
VCPU_HYP_PC
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
hyp_pc
));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE
(
VCPU_VGIC_CPU
,
offsetof
(
struct
kvm_vcpu
,
arch
.
vgic_cpu
));
DEFINE
(
VGIC_CPU_HCR
,
offsetof
(
struct
vgic_cpu
,
vgic_hcr
));
...
...
arch/arm/kvm/Makefile
View file @
0098fc39
...
...
@@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
kvm-arm-y
=
$(
addprefix
../../../virt/kvm/, kvm_main.o coalesced_mmio.o
)
obj-y
+=
kvm-arm.o init.o interrupts.o
obj-y
+=
arm.o guest.o mmu.o emulate.o reset.o
obj-y
+=
arm.o
handle_exit.o
guest.o mmu.o emulate.o reset.o
obj-y
+=
coproc.o coproc_a15.o mmio.o psci.o
obj-$(CONFIG_KVM_ARM_VGIC)
+=
vgic.o
obj-$(CONFIG_KVM_ARM_TIMER)
+=
arch_timer.o
arch/arm/kvm/arm.c
View file @
0098fc39
...
...
@@ -30,11 +30,9 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
#include <asm/unified.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
#include <asm/cputype.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/virt.h>
...
...
@@ -44,14 +42,13 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h>
#include <asm/opcodes.h>
#ifdef REQUIRES_VIRT
__asm__
(
".arch_extension virt"
);
#endif
static
DEFINE_PER_CPU
(
unsigned
long
,
kvm_arm_hyp_stack_page
);
static
struct
vfp_hard_struc
t
__percpu
*
kvm_host_vfp_state
;
static
kvm_kernel_vfp_
t
__percpu
*
kvm_host_vfp_state
;
static
unsigned
long
hyp_default_vectors
;
/* Per-CPU variable containing the currently running vcpu. */
...
...
@@ -303,22 +300,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return
0
;
}
int
__attribute_const__
kvm_target_cpu
(
void
)
{
unsigned
long
implementor
=
read_cpuid_implementor
();
unsigned
long
part_number
=
read_cpuid_part_number
();
if
(
implementor
!=
ARM_CPU_IMP_ARM
)
return
-
EINVAL
;
switch
(
part_number
)
{
case
ARM_CPU_PART_CORTEX_A15
:
return
KVM_ARM_TARGET_CORTEX_A15
;
default:
return
-
EINVAL
;
}
}
int
kvm_arch_vcpu_init
(
struct
kvm_vcpu
*
vcpu
)
{
int
ret
;
...
...
@@ -481,163 +462,6 @@ static void update_vttbr(struct kvm *kvm)
spin_unlock
(
&
kvm_vmid_lock
);
}
static
int
handle_svc_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* SVC called from Hyp mode should never get here */
kvm_debug
(
"SVC called from Hyp mode shouldn't go here
\n
"
);
BUG
();
return
-
EINVAL
;
/* Squash warning */
}
static
int
handle_hvc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
trace_kvm_hvc
(
*
vcpu_pc
(
vcpu
),
*
vcpu_reg
(
vcpu
,
0
),
vcpu
->
arch
.
hsr
&
HSR_HVC_IMM_MASK
);
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_smc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_pabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* The hypervisor should never cause aborts */
kvm_err
(
"Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)
\n
"
,
vcpu
->
arch
.
hxfar
,
vcpu
->
arch
.
hsr
);
return
-
EFAULT
;
}
static
int
handle_dabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* This is either an error in the ws. code or an external abort */
kvm_err
(
"Data Abort taken from Hyp mode at %#08x (HSR: %#08x)
\n
"
,
vcpu
->
arch
.
hxfar
,
vcpu
->
arch
.
hsr
);
return
-
EFAULT
;
}
typedef
int
(
*
exit_handle_fn
)(
struct
kvm_vcpu
*
,
struct
kvm_run
*
);
static
exit_handle_fn
arm_exit_handlers
[]
=
{
[
HSR_EC_WFI
]
=
kvm_handle_wfi
,
[
HSR_EC_CP15_32
]
=
kvm_handle_cp15_32
,
[
HSR_EC_CP15_64
]
=
kvm_handle_cp15_64
,
[
HSR_EC_CP14_MR
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP14_LS
]
=
kvm_handle_cp14_load_store
,
[
HSR_EC_CP14_64
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP_0_13
]
=
kvm_handle_cp_0_13_access
,
[
HSR_EC_CP10_ID
]
=
kvm_handle_cp10_id
,
[
HSR_EC_SVC_HYP
]
=
handle_svc_hyp
,
[
HSR_EC_HVC
]
=
handle_hvc
,
[
HSR_EC_SMC
]
=
handle_smc
,
[
HSR_EC_IABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_IABT_HYP
]
=
handle_pabt_hyp
,
[
HSR_EC_DABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_DABT_HYP
]
=
handle_dabt_hyp
,
};
/*
* A conditional instruction is allowed to trap, even though it
* wouldn't be executed. So let's re-implement the hardware, in
* software!
*/
static
bool
kvm_condition_valid
(
struct
kvm_vcpu
*
vcpu
)
{
unsigned
long
cpsr
,
cond
,
insn
;
/*
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
* catch undefined instructions, and then we won't get past
* the arm_exit_handlers test anyway.
*/
BUG_ON
(((
vcpu
->
arch
.
hsr
&
HSR_EC
)
>>
HSR_EC_SHIFT
)
==
0
);
/* Top two bits non-zero? Unconditional. */
if
(
vcpu
->
arch
.
hsr
>>
30
)
return
true
;
cpsr
=
*
vcpu_cpsr
(
vcpu
);
/* Is condition field valid? */
if
((
vcpu
->
arch
.
hsr
&
HSR_CV
)
>>
HSR_CV_SHIFT
)
cond
=
(
vcpu
->
arch
.
hsr
&
HSR_COND
)
>>
HSR_COND_SHIFT
;
else
{
/* This can happen in Thumb mode: examine IT state. */
unsigned
long
it
;
it
=
((
cpsr
>>
8
)
&
0xFC
)
|
((
cpsr
>>
25
)
&
0x3
);
/* it == 0 => unconditional. */
if
(
it
==
0
)
return
true
;
/* The cond for this insn works out as the top 4 bits. */
cond
=
(
it
>>
4
);
}
/* Shift makes it look like an ARM-mode instruction */
insn
=
cond
<<
28
;
return
arm_check_condition
(
insn
,
cpsr
)
!=
ARM_OPCODE_CONDTEST_FAIL
;
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to QEMU.
*/
static
int
handle_exit
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
,
int
exception_index
)
{
unsigned
long
hsr_ec
;
switch
(
exception_index
)
{
case
ARM_EXCEPTION_IRQ
:
return
1
;
case
ARM_EXCEPTION_UNDEFINED
:
kvm_err
(
"Undefined exception in Hyp mode at: %#08x
\n
"
,
vcpu
->
arch
.
hyp_pc
);
BUG
();
panic
(
"KVM: Hypervisor undefined exception!
\n
"
);
case
ARM_EXCEPTION_DATA_ABORT
:
case
ARM_EXCEPTION_PREF_ABORT
:
case
ARM_EXCEPTION_HVC
:
hsr_ec
=
(
vcpu
->
arch
.
hsr
&
HSR_EC
)
>>
HSR_EC_SHIFT
;
if
(
hsr_ec
>=
ARRAY_SIZE
(
arm_exit_handlers
)
||
!
arm_exit_handlers
[
hsr_ec
])
{
kvm_err
(
"Unkown exception class: %#08lx, "
"hsr: %#08x
\n
"
,
hsr_ec
,
(
unsigned
int
)
vcpu
->
arch
.
hsr
);
BUG
();
}
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if
(
!
kvm_condition_valid
(
vcpu
))
{
bool
is_wide
=
vcpu
->
arch
.
hsr
&
HSR_IL
;
kvm_skip_instr
(
vcpu
,
is_wide
);
return
1
;
}
return
arm_exit_handlers
[
hsr_ec
](
vcpu
,
run
);
default:
kvm_pr_unimpl
(
"Unsupported exception type: %d"
,
exception_index
);
run
->
exit_reason
=
KVM_EXIT_INTERNAL_ERROR
;
return
0
;
}
}
static
int
kvm_vcpu_first_run_init
(
struct
kvm_vcpu
*
vcpu
)
{
if
(
likely
(
vcpu
->
arch
.
has_run_once
))
...
...
@@ -972,7 +796,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
static
void
cpu_init_hyp_mode
(
void
*
vector
)
{
unsigned
long
long
pgd_ptr
;
unsigned
long
pgd_low
,
pgd_high
;
unsigned
long
hyp_stack_ptr
;
unsigned
long
stack_page
;
unsigned
long
vector_ptr
;
...
...
@@ -981,20 +804,11 @@ static void cpu_init_hyp_mode(void *vector)
__hyp_set_vectors
((
unsigned
long
)
vector
);
pgd_ptr
=
(
unsigned
long
long
)
kvm_mmu_get_httbr
();
pgd_low
=
(
pgd_ptr
&
((
1ULL
<<
32
)
-
1
));
pgd_high
=
(
pgd_ptr
>>
32ULL
);
stack_page
=
__get_cpu_var
(
kvm_arm_hyp_stack_page
);
hyp_stack_ptr
=
stack_page
+
PAGE_SIZE
;
vector_ptr
=
(
unsigned
long
)
__kvm_hyp_vector
;
/*
* Call initialization code, and switch to the full blown
* HYP code. The init code doesn't need to preserve these registers as
* r1-r3 and r12 are already callee save according to the AAPCS.
* Note that we slightly misuse the prototype by casing the pgd_low to
* a void *.
*/
kvm_call_hyp
((
void
*
)
pgd_low
,
pgd_high
,
hyp_stack_ptr
,
vector_ptr
);
__cpu_init_hyp_mode
(
pgd_ptr
,
hyp_stack_ptr
,
vector_ptr
);
}
/**
...
...
@@ -1077,7 +891,7 @@ static int init_hyp_mode(void)
/*
* Map the host VFP structures
*/
kvm_host_vfp_state
=
alloc_percpu
(
struct
vfp_hard_struc
t
);
kvm_host_vfp_state
=
alloc_percpu
(
kvm_kernel_vfp_
t
);
if
(
!
kvm_host_vfp_state
)
{
err
=
-
ENOMEM
;
kvm_err
(
"Cannot allocate host VFP state
\n
"
);
...
...
@@ -1085,7 +899,7 @@ static int init_hyp_mode(void)
}
for_each_possible_cpu
(
cpu
)
{
struct
vfp_hard_struc
t
*
vfp
;
kvm_kernel_vfp_
t
*
vfp
;
vfp
=
per_cpu_ptr
(
kvm_host_vfp_state
,
cpu
);
err
=
create_hyp_mappings
(
vfp
,
vfp
+
1
);
...
...
arch/arm/kvm/coproc.c
View file @
0098fc39
...
...
@@ -76,7 +76,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
const
struct
coproc_params
*
p
,
const
struct
coproc_reg
*
r
)
{
u
32
val
;
u
nsigned
long
val
;
int
cpu
;
cpu
=
get_cpu
();
...
...
@@ -293,12 +293,12 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
if
(
likely
(
r
->
access
(
vcpu
,
params
,
r
)))
{
/* Skip instruction, since it was emulated */
kvm_skip_instr
(
vcpu
,
(
vcpu
->
arch
.
hsr
>>
25
)
&
1
);
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
)
);
return
1
;
}
/* If access function fails, it should complain. */
}
else
{
kvm_err
(
"Unsupported guest CP15 access at: %08x
\n
"
,
kvm_err
(
"Unsupported guest CP15 access at: %08
l
x
\n
"
,
*
vcpu_pc
(
vcpu
));
print_cp_instr
(
params
);
}
...
...
@@ -315,14 +315,14 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct
coproc_params
params
;
params
.
CRm
=
(
vcpu
->
arch
.
hsr
>>
1
)
&
0xf
;
params
.
Rt1
=
(
vcpu
->
arch
.
hsr
>>
5
)
&
0xf
;
params
.
is_write
=
((
vcpu
->
arch
.
hsr
&
1
)
==
0
);
params
.
CRm
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
1
)
&
0xf
;
params
.
Rt1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
5
)
&
0xf
;
params
.
is_write
=
((
kvm_vcpu_get_hsr
(
vcpu
)
&
1
)
==
0
);
params
.
is_64bit
=
true
;
params
.
Op1
=
(
vcpu
->
arch
.
hsr
>>
16
)
&
0xf
;
params
.
Op1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
16
)
&
0xf
;
params
.
Op2
=
0
;
params
.
Rt2
=
(
vcpu
->
arch
.
hsr
>>
10
)
&
0xf
;
params
.
Rt2
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
10
)
&
0xf
;
params
.
CRn
=
0
;
return
emulate_cp15
(
vcpu
,
&
params
);
...
...
@@ -347,14 +347,14 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct
coproc_params
params
;
params
.
CRm
=
(
vcpu
->
arch
.
hsr
>>
1
)
&
0xf
;
params
.
Rt1
=
(
vcpu
->
arch
.
hsr
>>
5
)
&
0xf
;
params
.
is_write
=
((
vcpu
->
arch
.
hsr
&
1
)
==
0
);
params
.
CRm
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
1
)
&
0xf
;
params
.
Rt1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
5
)
&
0xf
;
params
.
is_write
=
((
kvm_vcpu_get_hsr
(
vcpu
)
&
1
)
==
0
);
params
.
is_64bit
=
false
;
params
.
CRn
=
(
vcpu
->
arch
.
hsr
>>
10
)
&
0xf
;
params
.
Op1
=
(
vcpu
->
arch
.
hsr
>>
14
)
&
0x7
;
params
.
Op2
=
(
vcpu
->
arch
.
hsr
>>
17
)
&
0x7
;
params
.
CRn
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
10
)
&
0xf
;
params
.
Op1
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
14
)
&
0x7
;
params
.
Op2
=
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
17
)
&
0x7
;
params
.
Rt2
=
0
;
return
emulate_cp15
(
vcpu
,
&
params
);
...
...
arch/arm/kvm/coproc.h
View file @
0098fc39
...
...
@@ -84,7 +84,7 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
static
inline
bool
write_to_read_only
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_params
*
params
)
{
kvm_debug
(
"CP15 write to read-only register at: %08x
\n
"
,
kvm_debug
(
"CP15 write to read-only register at: %08
l
x
\n
"
,
*
vcpu_pc
(
vcpu
));
print_cp_instr
(
params
);
return
false
;
...
...
@@ -93,7 +93,7 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
static
inline
bool
read_from_write_only
(
struct
kvm_vcpu
*
vcpu
,
const
struct
coproc_params
*
params
)
{
kvm_debug
(
"CP15 read to write-only register at: %08x
\n
"
,
kvm_debug
(
"CP15 read to write-only register at: %08
l
x
\n
"
,
*
vcpu_pc
(
vcpu
));
print_cp_instr
(
params
);
return
false
;
...
...
arch/arm/kvm/emulate.c
View file @
0098fc39
...
...
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include "trace.h"
...
...
@@ -109,10 +110,10 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
*/
u
32
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
)
u
nsigned
long
*
vcpu_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
)
{
u
32
*
reg_array
=
(
u32
*
)
&
vcpu
->
arch
.
regs
;
u
32
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
u
nsigned
long
*
reg_array
=
(
unsigned
long
*
)
&
vcpu
->
arch
.
regs
;
u
nsigned
long
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
switch
(
mode
)
{
case
USR_MODE
...
SVC_MODE
:
...
...
@@ -141,9 +142,9 @@ u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
u
32
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
)
u
nsigned
long
*
vcpu_spsr
(
struct
kvm_vcpu
*
vcpu
)
{
u
32
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
u
nsigned
long
mode
=
*
vcpu_cpsr
(
vcpu
)
&
MODE_MASK
;
switch
(
mode
)
{
case
SVC_MODE
:
return
&
vcpu
->
arch
.
regs
.
KVM_ARM_SVC_spsr
;
...
...
@@ -160,20 +161,48 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
}
}
/**
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
* Simply sets the wait_for_interrupts flag on the vcpu structure, which will
* halt execution of world-switches and schedule other host processes until
* there is an incoming IRQ or FIQ to the VM.
/*
* A conditional instruction is allowed to trap, even though it
* wouldn't be executed. So let's re-implement the hardware, in
* software!
*/
int
kvm_handle_wfi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
bool
kvm_condition_valid
(
struct
kvm_vcpu
*
vcpu
)
{
trace_kvm_wfi
(
*
vcpu_pc
(
vcpu
));
kvm_vcpu_block
(
vcpu
);
return
1
;
unsigned
long
cpsr
,
cond
,
insn
;
/*
* Exception Code 0 can only happen if we set HCR.TGE to 1, to
* catch undefined instructions, and then we won't get past
* the arm_exit_handlers test anyway.
*/
BUG_ON
(
!
kvm_vcpu_trap_get_class
(
vcpu
));
/* Top two bits non-zero? Unconditional. */
if
(
kvm_vcpu_get_hsr
(
vcpu
)
>>
30
)
return
true
;
cpsr
=
*
vcpu_cpsr
(
vcpu
);
/* Is condition field valid? */
if
((
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_CV
)
>>
HSR_CV_SHIFT
)
cond
=
(
kvm_vcpu_get_hsr
(
vcpu
)
&
HSR_COND
)
>>
HSR_COND_SHIFT
;
else
{
/* This can happen in Thumb mode: examine IT state. */
unsigned
long
it
;
it
=
((
cpsr
>>
8
)
&
0xFC
)
|
((
cpsr
>>
25
)
&
0x3
);
/* it == 0 => unconditional. */
if
(
it
==
0
)
return
true
;
/* The cond for this insn works out as the top 4 bits. */
cond
=
(
it
>>
4
);
}
/* Shift makes it look like an ARM-mode instruction */
insn
=
cond
<<
28
;
return
arm_check_condition
(
insn
,
cpsr
)
!=
ARM_OPCODE_CONDTEST_FAIL
;
}
/**
...
...
@@ -257,9 +286,9 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
*/
void
kvm_inject_undefined
(
struct
kvm_vcpu
*
vcpu
)
{
u
32
new_lr_value
;
u
32
new_spsr_value
;
u
32
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u
nsigned
long
new_lr_value
;
u
nsigned
long
new_spsr_value
;
u
nsigned
long
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u32
sctlr
=
vcpu
->
arch
.
cp15
[
c1_SCTLR
];
bool
is_thumb
=
(
cpsr
&
PSR_T_BIT
);
u32
vect_offset
=
4
;
...
...
@@ -291,9 +320,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
*/
static
void
inject_abt
(
struct
kvm_vcpu
*
vcpu
,
bool
is_pabt
,
unsigned
long
addr
)
{
u
32
new_lr_value
;
u
32
new_spsr_value
;
u
32
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u
nsigned
long
new_lr_value
;
u
nsigned
long
new_spsr_value
;
u
nsigned
long
cpsr
=
*
vcpu_cpsr
(
vcpu
);
u32
sctlr
=
vcpu
->
arch
.
cp15
[
c1_SCTLR
];
bool
is_thumb
=
(
cpsr
&
PSR_T_BIT
);
u32
vect_offset
;
...
...
arch/arm/kvm/guest.c
View file @
0098fc39
...
...
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
...
...
@@ -180,6 +181,22 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return
-
EINVAL
;
}
int
__attribute_const__
kvm_target_cpu
(
void
)
{
unsigned
long
implementor
=
read_cpuid_implementor
();
unsigned
long
part_number
=
read_cpuid_part_number
();
if
(
implementor
!=
ARM_CPU_IMP_ARM
)
return
-
EINVAL
;
switch
(
part_number
)
{
case
ARM_CPU_PART_CORTEX_A15
:
return
KVM_ARM_TARGET_CORTEX_A15
;
default:
return
-
EINVAL
;
}
}
int
kvm_vcpu_set_target
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_vcpu_init
*
init
)
{
...
...
arch/arm/kvm/handle_exit.c
0 → 100644
View file @
0098fc39
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <trace/events/kvm.h>
#include "trace.h"
#include "trace.h"
typedef
int
(
*
exit_handle_fn
)(
struct
kvm_vcpu
*
,
struct
kvm_run
*
);
static
int
handle_svc_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* SVC called from Hyp mode should never get here */
kvm_debug
(
"SVC called from Hyp mode shouldn't go here
\n
"
);
BUG
();
return
-
EINVAL
;
/* Squash warning */
}
static
int
handle_hvc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
trace_kvm_hvc
(
*
vcpu_pc
(
vcpu
),
*
vcpu_reg
(
vcpu
,
0
),
kvm_vcpu_hvc_get_imm
(
vcpu
));
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_smc
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
if
(
kvm_psci_call
(
vcpu
))
return
1
;
kvm_inject_undefined
(
vcpu
);
return
1
;
}
static
int
handle_pabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* The hypervisor should never cause aborts */
kvm_err
(
"Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)
\n
"
,
kvm_vcpu_get_hfar
(
vcpu
),
kvm_vcpu_get_hsr
(
vcpu
));
return
-
EFAULT
;
}
static
int
handle_dabt_hyp
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
/* This is either an error in the ws. code or an external abort */
kvm_err
(
"Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)
\n
"
,
kvm_vcpu_get_hfar
(
vcpu
),
kvm_vcpu_get_hsr
(
vcpu
));
return
-
EFAULT
;
}
/**
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
* Simply sets the wait_for_interrupts flag on the vcpu structure, which will
* halt execution of world-switches and schedule other host processes until
* there is an incoming IRQ or FIQ to the VM.
*/
static
int
kvm_handle_wfi
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
trace_kvm_wfi
(
*
vcpu_pc
(
vcpu
));
kvm_vcpu_block
(
vcpu
);
return
1
;
}
static
exit_handle_fn
arm_exit_handlers
[]
=
{
[
HSR_EC_WFI
]
=
kvm_handle_wfi
,
[
HSR_EC_CP15_32
]
=
kvm_handle_cp15_32
,
[
HSR_EC_CP15_64
]
=
kvm_handle_cp15_64
,
[
HSR_EC_CP14_MR
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP14_LS
]
=
kvm_handle_cp14_load_store
,
[
HSR_EC_CP14_64
]
=
kvm_handle_cp14_access
,
[
HSR_EC_CP_0_13
]
=
kvm_handle_cp_0_13_access
,
[
HSR_EC_CP10_ID
]
=
kvm_handle_cp10_id
,
[
HSR_EC_SVC_HYP
]
=
handle_svc_hyp
,
[
HSR_EC_HVC
]
=
handle_hvc
,
[
HSR_EC_SMC
]
=
handle_smc
,
[
HSR_EC_IABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_IABT_HYP
]
=
handle_pabt_hyp
,
[
HSR_EC_DABT
]
=
kvm_handle_guest_abort
,
[
HSR_EC_DABT_HYP
]
=
handle_dabt_hyp
,
};
static
exit_handle_fn
kvm_get_exit_handler
(
struct
kvm_vcpu
*
vcpu
)
{
u8
hsr_ec
=
kvm_vcpu_trap_get_class
(
vcpu
);
if
(
hsr_ec
>=
ARRAY_SIZE
(
arm_exit_handlers
)
||
!
arm_exit_handlers
[
hsr_ec
])
{
kvm_err
(
"Unkown exception class: hsr: %#08x
\n
"
,
(
unsigned
int
)
kvm_vcpu_get_hsr
(
vcpu
));
BUG
();
}
return
arm_exit_handlers
[
hsr_ec
];
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int
handle_exit
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
,
int
exception_index
)
{
exit_handle_fn
exit_handler
;
switch
(
exception_index
)
{
case
ARM_EXCEPTION_IRQ
:
return
1
;
case
ARM_EXCEPTION_UNDEFINED
:
kvm_err
(
"Undefined exception in Hyp mode at: %#08lx
\n
"
,
kvm_vcpu_get_hyp_pc
(
vcpu
));
BUG
();
panic
(
"KVM: Hypervisor undefined exception!
\n
"
);
case
ARM_EXCEPTION_DATA_ABORT
:
case
ARM_EXCEPTION_PREF_ABORT
:
case
ARM_EXCEPTION_HVC
:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if
(
!
kvm_condition_valid
(
vcpu
))
{
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
return
1
;
}
exit_handler
=
kvm_get_exit_handler
(
vcpu
);
return
exit_handler
(
vcpu
,
run
);
default:
kvm_pr_unimpl
(
"Unsupported exception type: %d"
,
exception_index
);
run
->
exit_reason
=
KVM_EXIT_INTERNAL_ERROR
;
return
0
;
}
}
arch/arm/kvm/interrupts.S
View file @
0098fc39
...
...
@@ -35,15 +35,18 @@ __kvm_hyp_code_start:
/********************************************************************
*
Flush
per
-
VMID
TLBs
*
*
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
)
;
*
void
__kvm_tlb_flush_vmid
_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
)
;
*
*
We
rely
on
the
hardware
to
broadcast
the
TLB
invalidation
to
all
CPUs
*
inside
the
inner
-
shareable
domain
(
which
is
the
case
for
all
v7
*
implementations
)
.
If
we
come
across
a
non
-
IS
SMP
implementation
,
we
'll
*
have
to
use
an
IPI
based
mechanism
.
Until
then
,
we
stick
to
the
simple
*
hardware
assisted
version
.
*
*
As
v7
does
not
support
flushing
per
IPA
,
just
nuke
the
whole
TLB
*
instead
,
ignoring
the
ipa
value
.
*/
ENTRY
(
__kvm_tlb_flush_vmid
)
ENTRY
(
__kvm_tlb_flush_vmid
_ipa
)
push
{
r2
,
r3
}
add
r0
,
r0
,
#
KVM_VTTBR
...
...
@@ -60,7 +63,7 @@ ENTRY(__kvm_tlb_flush_vmid)
pop
{
r2
,
r3
}
bx
lr
ENDPROC
(
__kvm_tlb_flush_vmid
)
ENDPROC
(
__kvm_tlb_flush_vmid
_ipa
)
/********************************************************************
*
Flush
TLBs
and
instruction
caches
of
all
CPUs
inside
the
inner
-
shareable
...
...
@@ -235,9 +238,9 @@ ENTRY(kvm_call_hyp)
*
instruction
is
issued
since
all
traps
are
disabled
when
running
the
host
*
kernel
as
per
the
Hyp
-
mode
initialization
at
boot
time
.
*
*
HVC
instructions
cause
a
trap
to
the
vector
page
+
offset
0x1
8
(
see
hyp_hvc
*
HVC
instructions
cause
a
trap
to
the
vector
page
+
offset
0x1
4
(
see
hyp_hvc
*
below
)
when
the
HVC
instruction
is
called
from
SVC
mode
(
i
.
e
.
a
guest
or
the
*
host
kernel
)
and
they
cause
a
trap
to
the
vector
page
+
offset
0x
c
when
HVC
*
host
kernel
)
and
they
cause
a
trap
to
the
vector
page
+
offset
0x
8
when
HVC
*
instructions
are
called
from
within
Hyp
-
mode
.
*
*
Hyp
-
ABI
:
Calling
HYP
-
mode
functions
from
host
(
in
SVC
mode
)
:
...
...
arch/arm/kvm/mmio.c
View file @
0098fc39
...
...
@@ -33,16 +33,16 @@
*/
int
kvm_handle_mmio_return
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_run
*
run
)
{
__u32
*
dest
;
unsigned
long
*
dest
;
unsigned
int
len
;
int
mask
;
if
(
!
run
->
mmio
.
is_write
)
{
dest
=
vcpu_reg
(
vcpu
,
vcpu
->
arch
.
mmio_decode
.
rt
);
memset
(
dest
,
0
,
sizeof
(
int
))
;
*
dest
=
0
;
len
=
run
->
mmio
.
len
;
if
(
len
>
4
)
if
(
len
>
sizeof
(
unsigned
long
)
)
return
-
EINVAL
;
memcpy
(
dest
,
run
->
mmio
.
data
,
len
);
...
...
@@ -50,7 +50,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_mmio
(
KVM_TRACE_MMIO_READ
,
len
,
run
->
mmio
.
phys_addr
,
*
((
u64
*
)
run
->
mmio
.
data
));
if
(
vcpu
->
arch
.
mmio_decode
.
sign_extend
&&
len
<
4
)
{
if
(
vcpu
->
arch
.
mmio_decode
.
sign_extend
&&
len
<
sizeof
(
unsigned
long
))
{
mask
=
1U
<<
((
len
*
8
)
-
1
);
*
dest
=
(
*
dest
^
mask
)
-
mask
;
}
...
...
@@ -65,40 +66,29 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned
long
rt
,
len
;
bool
is_write
,
sign_extend
;
if
(
(
vcpu
->
arch
.
hsr
>>
8
)
&
1
)
{
if
(
kvm_vcpu_dabt_isextabt
(
vcpu
)
)
{
/* cache operation on I/O addr, tell guest unsupported */
kvm_inject_dabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_dabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
return
1
;
}
if
(
(
vcpu
->
arch
.
hsr
>>
7
)
&
1
)
{
if
(
kvm_vcpu_dabt_iss1tw
(
vcpu
)
)
{
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_dabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
return
1
;
}
switch
((
vcpu
->
arch
.
hsr
>>
22
)
&
0x3
)
{
case
0
:
len
=
1
;
break
;
case
1
:
len
=
2
;
break
;
case
2
:
len
=
4
;
break
;
default:
kvm_err
(
"Hardware is weird: SAS 0b11 is reserved
\n
"
);
return
-
EFAULT
;
}
len
=
kvm_vcpu_dabt_get_as
(
vcpu
);
if
(
unlikely
(
len
<
0
))
return
len
;
is_write
=
vcpu
->
arch
.
hsr
&
HSR_WNR
;
sign_extend
=
vcpu
->
arch
.
hsr
&
HSR_SSE
;
rt
=
(
vcpu
->
arch
.
hsr
&
HSR_SRT_MASK
)
>>
HSR_SRT_SHIFT
;
is_write
=
kvm_vcpu_dabt_iswrite
(
vcpu
)
;
sign_extend
=
kvm_vcpu_dabt_issext
(
vcpu
)
;
rt
=
kvm_vcpu_dabt_get_rd
(
vcpu
)
;
if
(
kvm_vcpu_reg_is_pc
(
vcpu
,
rt
))
{
/* IO memory trying to read/write pc */
kvm_inject_pabt
(
vcpu
,
vcpu
->
arch
.
hxfar
);
kvm_inject_pabt
(
vcpu
,
kvm_vcpu_get_hfar
(
vcpu
)
);
return
1
;
}
...
...
@@ -112,7 +102,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
kvm_skip_instr
(
vcpu
,
(
vcpu
->
arch
.
hsr
>>
25
)
&
1
);
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
)
);
return
0
;
}
...
...
@@ -130,7 +120,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
* space do its magic.
*/
if
(
vcpu
->
arch
.
hsr
&
HSR_ISV
)
{
if
(
kvm_vcpu_dabt_isvalid
(
vcpu
)
)
{
ret
=
decode_hsr
(
vcpu
,
fault_ipa
,
&
mmio
);
if
(
ret
)
return
ret
;
...
...
arch/arm/kvm/mmu.c
View file @
0098fc39
This diff is collapsed.
Click to expand it.
arch/arm/kvm/vgic.c
View file @
0098fc39
...
...
@@ -1484,7 +1484,7 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
if
(
addr
&
~
KVM_PHYS_MASK
)
return
-
E2BIG
;
if
(
addr
&
~
PAGE_MASK
)
if
(
addr
&
(
SZ_4K
-
1
)
)
return
-
EINVAL
;
mutex_lock
(
&
kvm
->
lock
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment