Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
300dca68
Commit
300dca68
authored
Jul 28, 2020
by
Marc Zyngier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'kvm-arm64/pre-nv-5.9' into kvmarm-master/next-WIP
Signed-off-by:
Marc Zyngier
<
maz@kernel.org
>
parents
c199a009
41ce82f6
Changes
30
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
696 additions
and
425 deletions
+696
-425
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpucaps.h
+2
-1
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_asm.h
+5
-3
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
+8
-29
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_host.h
+56
-15
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/kvm_mmu.h
+8
-8
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable-hwdef.h
+2
-0
arch/arm64/include/asm/stage2_pgtable.h
arch/arm64/include/asm/stage2_pgtable.h
+9
-0
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/sysreg.h
+1
-0
arch/arm64/include/asm/tlbflush.h
arch/arm64/include/asm/tlbflush.h
+45
-0
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/asm-offsets.c
+1
-2
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpufeature.c
+11
-0
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arch_timer.c
+123
-34
arch/arm64/kvm/arm.c
arch/arm64/kvm/arm.c
+14
-26
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/fpsimd.c
+2
-4
arch/arm64/kvm/guest.c
arch/arm64/kvm/guest.c
+65
-14
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/entry.S
+1
-2
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
+10
-12
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/switch.h
+20
-18
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+71
-81
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/nvhe/switch.c
+3
-3
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/nvhe/tlb.c
+18
-18
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
+1
-1
arch/arm64/kvm/hyp/vhe/tlb.c
arch/arm64/kvm/hyp/vhe/tlb.c
+15
-14
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/inject_fault.c
+1
-1
arch/arm64/kvm/mmu.c
arch/arm64/kvm/mmu.c
+159
-122
arch/arm64/kvm/regmap.c
arch/arm64/kvm/regmap.c
+33
-4
arch/arm64/kvm/reset.c
arch/arm64/kvm/reset.c
+1
-1
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.c
+2
-0
arch/arm64/kvm/trace_arm.h
arch/arm64/kvm/trace_arm.h
+4
-4
include/kvm/arm_arch_timer.h
include/kvm/arm_arch_timer.h
+5
-8
No files found.
arch/arm64/include/asm/cpucaps.h
View file @
300dca68
...
...
@@ -62,7 +62,8 @@
#define ARM64_HAS_GENERIC_AUTH 52
#define ARM64_HAS_32BIT_EL1 53
#define ARM64_BTI 54
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_NCAPS 5
5
#define ARM64_NCAPS 5
6
#endif
/* __ASM_CPUCAPS_H */
arch/arm64/include/asm/kvm_asm.h
View file @
300dca68
...
...
@@ -95,6 +95,7 @@ extern void *__nvhe_undefined_symbol;
struct
kvm
;
struct
kvm_vcpu
;
struct
kvm_s2_mmu
;
DECLARE_KVM_NVHE_SYM
(
__kvm_hyp_init
);
DECLARE_KVM_HYP_SYM
(
__kvm_hyp_vector
);
...
...
@@ -108,9 +109,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#endif
extern
void
__kvm_flush_vm_context
(
void
);
extern
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
);
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
);
extern
void
__kvm_tlb_flush_local_vmid
(
struct
kvm_vcpu
*
vcpu
);
extern
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm_s2_mmu
*
mmu
,
phys_addr_t
ipa
,
int
level
);
extern
void
__kvm_tlb_flush_vmid
(
struct
kvm_s2_mmu
*
mmu
);
extern
void
__kvm_tlb_flush_local_vmid
(
struct
kvm_s2_mmu
*
mmu
);
extern
void
__kvm_timer_set_cntvoff
(
u64
cntvoff
);
...
...
arch/arm64/include/asm/kvm_emulate.h
View file @
300dca68
...
...
@@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
static
__always_inline
unsigned
long
*
vcpu_pc
(
const
struct
kvm_vcpu
*
vcpu
)
{
return
(
unsigned
long
*
)
&
vcpu_gp_regs
(
vcpu
)
->
regs
.
pc
;
}
static
inline
unsigned
long
*
__vcpu_elr_el1
(
const
struct
kvm_vcpu
*
vcpu
)
{
return
(
unsigned
long
*
)
&
vcpu_gp_regs
(
vcpu
)
->
elr_el1
;
}
static
inline
unsigned
long
vcpu_read_elr_el1
(
const
struct
kvm_vcpu
*
vcpu
)
{
if
(
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
return
read_sysreg_el1
(
SYS_ELR
);
else
return
*
__vcpu_elr_el1
(
vcpu
);
}
static
inline
void
vcpu_write_elr_el1
(
const
struct
kvm_vcpu
*
vcpu
,
unsigned
long
v
)
{
if
(
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
write_sysreg_el1
(
v
,
SYS_ELR
);
else
*
__vcpu_elr_el1
(
vcpu
)
=
v
;
return
(
unsigned
long
*
)
&
vcpu_gp_regs
(
vcpu
)
->
pc
;
}
static
__always_inline
unsigned
long
*
vcpu_cpsr
(
const
struct
kvm_vcpu
*
vcpu
)
{
return
(
unsigned
long
*
)
&
vcpu_gp_regs
(
vcpu
)
->
regs
.
pstate
;
return
(
unsigned
long
*
)
&
vcpu_gp_regs
(
vcpu
)
->
pstate
;
}
static
__always_inline
bool
vcpu_mode_is_32bit
(
const
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
static
__always_inline
unsigned
long
vcpu_get_reg
(
const
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
)
{
return
(
reg_num
==
31
)
?
0
:
vcpu_gp_regs
(
vcpu
)
->
regs
.
regs
[
reg_num
];
return
(
reg_num
==
31
)
?
0
:
vcpu_gp_regs
(
vcpu
)
->
regs
[
reg_num
];
}
static
__always_inline
void
vcpu_set_reg
(
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
,
unsigned
long
val
)
{
if
(
reg_num
!=
31
)
vcpu_gp_regs
(
vcpu
)
->
regs
.
regs
[
reg_num
]
=
val
;
vcpu_gp_regs
(
vcpu
)
->
regs
[
reg_num
]
=
val
;
}
static
inline
unsigned
long
vcpu_read_spsr
(
const
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
if
(
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
return
read_sysreg_el1
(
SYS_SPSR
);
else
return
vcpu_gp_regs
(
vcpu
)
->
spsr
[
KVM_SPSR_EL1
]
;
return
__vcpu_sys_reg
(
vcpu
,
SPSR_EL1
)
;
}
static
inline
void
vcpu_write_spsr
(
struct
kvm_vcpu
*
vcpu
,
unsigned
long
v
)
...
...
@@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
if
(
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
write_sysreg_el1
(
v
,
SYS_SPSR
);
else
vcpu_gp_regs
(
vcpu
)
->
spsr
[
KVM_SPSR_EL1
]
=
v
;
__vcpu_sys_reg
(
vcpu
,
SPSR_EL1
)
=
v
;
}
/*
...
...
@@ -519,11 +498,11 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
static
__always_inline
void
__kvm_skip_instr
(
struct
kvm_vcpu
*
vcpu
)
{
*
vcpu_pc
(
vcpu
)
=
read_sysreg_el2
(
SYS_ELR
);
vcpu
->
arch
.
ctxt
.
gp_regs
.
regs
.
pstate
=
read_sysreg_el2
(
SYS_SPSR
);
vcpu
_gp_regs
(
vcpu
)
->
pstate
=
read_sysreg_el2
(
SYS_SPSR
);
kvm_skip_instr
(
vcpu
,
kvm_vcpu_trap_il_is32bit
(
vcpu
));
write_sysreg_el2
(
vcpu
->
arch
.
ctxt
.
gp_regs
.
regs
.
pstate
,
SYS_SPSR
);
write_sysreg_el2
(
vcpu
_gp_regs
(
vcpu
)
->
pstate
,
SYS_SPSR
);
write_sysreg_el2
(
*
vcpu_pc
(
vcpu
),
SYS_ELR
);
}
...
...
arch/arm64/include/asm/kvm_host.h
View file @
300dca68
...
...
@@ -66,19 +66,34 @@ struct kvm_vmid {
u32
vmid
;
};
struct
kvm_
arch
{
struct
kvm_
s2_mmu
{
struct
kvm_vmid
vmid
;
/* stage2 entry level table */
pgd_t
*
pgd
;
phys_addr_t
pgd_phys
;
/* VTCR_EL2 value for this VM */
u64
vtcr
;
/*
* stage2 entry level table
*
* Two kvm_s2_mmu structures in the same VM can point to the same
* pgd here. This happens when running a guest using a
* translation regime that isn't affected by its own stage-2
* translation, such as a non-VHE hypervisor running at vEL2, or
* for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
* canonical stage-2 page tables.
*/
pgd_t
*
pgd
;
phys_addr_t
pgd_phys
;
/* The last vcpu id that ran on each physical CPU */
int
__percpu
*
last_vcpu_ran
;
struct
kvm
*
kvm
;
};
struct
kvm_arch
{
struct
kvm_s2_mmu
mmu
;
/* VTCR_EL2 value for this VM */
u64
vtcr
;
/* The maximum number of vCPUs depends on the used GIC model */
int
max_vcpus
;
...
...
@@ -170,6 +185,16 @@ enum vcpu_sysreg {
APGAKEYLO_EL1
,
APGAKEYHI_EL1
,
ELR_EL1
,
SP_EL1
,
SPSR_EL1
,
CNTVOFF_EL2
,
CNTV_CVAL_EL0
,
CNTV_CTL_EL0
,
CNTP_CVAL_EL0
,
CNTP_CTL_EL0
,
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2
,
/* Domain Access Control Register */
IFSR32_EL2
,
/* Instruction Fault Status Register */
...
...
@@ -221,7 +246,15 @@ enum vcpu_sysreg {
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
struct
kvm_cpu_context
{
struct
kvm_regs
gp_regs
;
struct
user_pt_regs
regs
;
/* sp = sp_el0 */
u64
spsr_abt
;
u64
spsr_und
;
u64
spsr_irq
;
u64
spsr_fiq
;
struct
user_fpsimd_state
fp_regs
;
union
{
u64
sys_regs
[
NR_SYS_REGS
];
u32
copro
[
NR_COPRO_REGS
];
...
...
@@ -254,6 +287,9 @@ struct kvm_vcpu_arch {
void
*
sve_state
;
unsigned
int
sve_max_vl
;
/* Stage 2 paging state used by the hardware on next switch */
struct
kvm_s2_mmu
*
hw_mmu
;
/* HYP configuration */
u64
hcr_el2
;
u32
mdcr_el2
;
...
...
@@ -384,15 +420,20 @@ struct kvm_vcpu_arch {
system_supports_generic_auth()) && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.
gp_
regs)
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
/*
* Only use __vcpu_sys_reg if you know you want the memory backed version of a
* register, and not the one most recently accessed by a running VCPU. For
* example, for userspace access or for system registers that are never context
* switched, but only emulated.
* Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
* memory backed version of a register, and not the one most recently
* accessed by a running VCPU. For example, for userspace access or
* for system registers that are never context switched, but only
* emulated.
*/
#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
#define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
u64
vcpu_read_sys_reg
(
const
struct
kvm_vcpu
*
vcpu
,
int
reg
);
void
vcpu_write_sys_reg
(
struct
kvm_vcpu
*
vcpu
,
u64
val
,
int
reg
);
...
...
@@ -538,7 +579,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
static
inline
void
kvm_init_host_cpu_context
(
struct
kvm_cpu_context
*
cpu_ctxt
)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
c
pu_ctxt
->
sys_regs
[
MPIDR_EL1
]
=
read_cpuid_mpidr
();
c
txt_sys_reg
(
cpu_ctxt
,
MPIDR_EL1
)
=
read_cpuid_mpidr
();
}
static
inline
bool
kvm_arch_requires_vhe
(
void
)
...
...
arch/arm64/include/asm/kvm_mmu.h
View file @
300dca68
...
...
@@ -134,8 +134,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void
free_hyp_pgds
(
void
);
void
stage2_unmap_vm
(
struct
kvm
*
kvm
);
int
kvm_
alloc_stage2_pgd
(
struct
kvm
*
kvm
);
void
kvm_free_stage2_pgd
(
struct
kvm
*
kvm
);
int
kvm_
init_stage2_mmu
(
struct
kvm
*
kvm
,
struct
kvm_s2_mmu
*
mmu
);
void
kvm_free_stage2_pgd
(
struct
kvm
_s2_mmu
*
mmu
);
int
kvm_phys_addr_ioremap
(
struct
kvm
*
kvm
,
phys_addr_t
guest_ipa
,
phys_addr_t
pa
,
unsigned
long
size
,
bool
writable
);
...
...
@@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
return
vttbr_baddr_mask
(
kvm_phys_shift
(
kvm
),
kvm_stage2_levels
(
kvm
));
}
static
__always_inline
u64
kvm_get_vttbr
(
struct
kvm
*
kvm
)
static
__always_inline
u64
kvm_get_vttbr
(
struct
kvm
_s2_mmu
*
mmu
)
{
struct
kvm_vmid
*
vmid
=
&
kvm
->
arch
.
vmid
;
struct
kvm_vmid
*
vmid
=
&
mmu
->
vmid
;
u64
vmid_field
,
baddr
;
u64
cnp
=
system_supports_cnp
()
?
VTTBR_CNP_BIT
:
0
;
baddr
=
kvm
->
arch
.
pgd_phys
;
baddr
=
mmu
->
pgd_phys
;
vmid_field
=
(
u64
)
vmid
->
vmid
<<
VTTBR_VMID_SHIFT
;
return
kvm_phys_to_vttbr
(
baddr
)
|
vmid_field
|
cnp
;
}
...
...
@@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
* Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled.
*/
static
__always_inline
void
__load_guest_stage2
(
struct
kvm
*
kvm
)
static
__always_inline
void
__load_guest_stage2
(
struct
kvm
_s2_mmu
*
mmu
)
{
write_sysreg
(
k
vm
->
arch
.
vtcr
,
vtcr_el2
);
write_sysreg
(
kvm_get_vttbr
(
kvm
),
vttbr_el2
);
write_sysreg
(
k
ern_hyp_va
(
mmu
->
kvm
)
->
arch
.
vtcr
,
vtcr_el2
);
write_sysreg
(
kvm_get_vttbr
(
mmu
),
vttbr_el2
);
/*
* ARM errata 1165522 and 1530923 require the actual execution of the
...
...
arch/arm64/include/asm/pgtable-hwdef.h
View file @
300dca68
...
...
@@ -178,10 +178,12 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6)
/* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6)
/* HAP[2:1] */
#define PTE_S2_XN (_AT(pteval_t, 2) << 53)
/* XN[1:0] */
#define PTE_S2_SW_RESVD (_AT(pteval_t, 15) << 55)
/* Reserved for SW */
#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6)
/* HAP[2:1] */
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6)
/* HAP[2:1] */
#define PMD_S2_XN (_AT(pmdval_t, 2) << 53)
/* XN[1:0] */
#define PMD_S2_SW_RESVD (_AT(pmdval_t, 15) << 55)
/* Reserved for SW */
#define PUD_S2_RDONLY (_AT(pudval_t, 1) << 6)
/* HAP[2:1] */
#define PUD_S2_RDWR (_AT(pudval_t, 3) << 6)
/* HAP[2:1] */
...
...
arch/arm64/include/asm/stage2_pgtable.h
View file @
300dca68
...
...
@@ -256,4 +256,13 @@ stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
return
(
boundary
-
1
<
end
-
1
)
?
boundary
:
end
;
}
/*
* Level values for the ARMv8.4-TTL extension, mapping PUD/PMD/PTE and
* the architectural page-table level.
*/
#define S2_NO_LEVEL_HINT 0
#define S2_PUD_LEVEL 1
#define S2_PMD_LEVEL 2
#define S2_PTE_LEVEL 3
#endif
/* __ARM64_S2_PGTABLE_H_ */
arch/arm64/include/asm/sysreg.h
View file @
300dca68
...
...
@@ -746,6 +746,7 @@
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16
...
...
arch/arm64/include/asm/tlbflush.h
View file @
300dca68
...
...
@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <asm/cputype.h>
...
...
@@ -59,6 +60,50 @@
__ta; \
})
/*
* Level-based TLBI operations.
*
* When ARMv8.4-TTL exists, TLBI operations take an additional hint for
* the level at which the invalidation must take place. If the level is
* wrong, no invalidation may take place. In the case where the level
* cannot be easily determined, a 0 value for the level parameter will
* perform a non-hinted invalidation.
*
* For Stage-2 invalidation, use the level values provided to that effect
* in asm/stage2_pgtable.h.
*/
#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
#define TLBI_TTL_TG_4K 1
#define TLBI_TTL_TG_16K 2
#define TLBI_TTL_TG_64K 3
#define __tlbi_level(op, addr, level) \
do { \
u64 arg = addr; \
\
if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
level) { \
u64 ttl = level & 3; \
\
switch (PAGE_SIZE) { \
case SZ_4K: \
ttl |= TLBI_TTL_TG_4K << 2; \
break; \
case SZ_16K: \
ttl |= TLBI_TTL_TG_16K << 2; \
break; \
case SZ_64K: \
ttl |= TLBI_TTL_TG_64K << 2; \
break; \
} \
\
arg &= ~TLBI_TTL_MASK; \
arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
} \
\
__tlbi(op, arg); \
} while(0)
/*
* TLB Invalidation
* ================
...
...
arch/arm64/kernel/asm-offsets.c
View file @
300dca68
...
...
@@ -102,13 +102,12 @@ int main(void)
DEFINE
(
VCPU_FAULT_DISR
,
offsetof
(
struct
kvm_vcpu
,
arch
.
fault
.
disr_el1
));
DEFINE
(
VCPU_WORKAROUND_FLAGS
,
offsetof
(
struct
kvm_vcpu
,
arch
.
workaround_flags
));
DEFINE
(
VCPU_HCR_EL2
,
offsetof
(
struct
kvm_vcpu
,
arch
.
hcr_el2
));
DEFINE
(
CPU_
GP_REGS
,
offsetof
(
struct
kvm_cpu_context
,
gp_
regs
));
DEFINE
(
CPU_
USER_PT_REGS
,
offsetof
(
struct
kvm_cpu_context
,
regs
));
DEFINE
(
CPU_APIAKEYLO_EL1
,
offsetof
(
struct
kvm_cpu_context
,
sys_regs
[
APIAKEYLO_EL1
]));
DEFINE
(
CPU_APIBKEYLO_EL1
,
offsetof
(
struct
kvm_cpu_context
,
sys_regs
[
APIBKEYLO_EL1
]));
DEFINE
(
CPU_APDAKEYLO_EL1
,
offsetof
(
struct
kvm_cpu_context
,
sys_regs
[
APDAKEYLO_EL1
]));
DEFINE
(
CPU_APDBKEYLO_EL1
,
offsetof
(
struct
kvm_cpu_context
,
sys_regs
[
APDBKEYLO_EL1
]));
DEFINE
(
CPU_APGAKEYLO_EL1
,
offsetof
(
struct
kvm_cpu_context
,
sys_regs
[
APGAKEYLO_EL1
]));
DEFINE
(
CPU_USER_PT_REGS
,
offsetof
(
struct
kvm_regs
,
regs
));
DEFINE
(
HOST_CONTEXT_VCPU
,
offsetof
(
struct
kvm_cpu_context
,
__hyp_running_vcpu
));
DEFINE
(
HOST_DATA_CONTEXT
,
offsetof
(
struct
kvm_host_data
,
host_ctxt
));
#endif
...
...
arch/arm64/kernel/cpufeature.c
View file @
300dca68
...
...
@@ -323,6 +323,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
static
const
struct
arm64_ftr_bits
ftr_id_aa64mmfr2
[]
=
{
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_NONSTRICT
,
FTR_LOWER_SAFE
,
ID_AA64MMFR2_E0PD_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64MMFR2_TTL_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64MMFR2_FWB_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_VISIBLE
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64MMFR2_AT_SHIFT
,
4
,
0
),
ARM64_FTR_BITS
(
FTR_HIDDEN
,
FTR_STRICT
,
FTR_LOWER_SAFE
,
ID_AA64MMFR2_LVA_SHIFT
,
4
,
0
),
...
...
@@ -1882,6 +1883,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.
matches
=
has_cpuid_feature
,
.
cpu_enable
=
cpu_has_fwb
,
},
{
.
desc
=
"ARMv8.4 Translation Table Level"
,
.
type
=
ARM64_CPUCAP_SYSTEM_FEATURE
,
.
capability
=
ARM64_HAS_ARMv8_4_TTL
,
.
sys_reg
=
SYS_ID_AA64MMFR2_EL1
,
.
sign
=
FTR_UNSIGNED
,
.
field_pos
=
ID_AA64MMFR2_TTL_SHIFT
,
.
min_field_value
=
1
,
.
matches
=
has_cpuid_feature
,
},
#ifdef CONFIG_ARM64_HW_AFDBM
{
/*
...
...
arch/arm64/kvm/arch_timer.c
View file @
300dca68
...
...
@@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct
arch_timer_context
*
timer
,
enum
kvm_arch_timer_regs
treg
);
u32
timer_get_ctl
(
struct
arch_timer_context
*
ctxt
)
{
struct
kvm_vcpu
*
vcpu
=
ctxt
->
vcpu
;
switch
(
arch_timer_ctx_index
(
ctxt
))
{
case
TIMER_VTIMER
:
return
__vcpu_sys_reg
(
vcpu
,
CNTV_CTL_EL0
);
case
TIMER_PTIMER
:
return
__vcpu_sys_reg
(
vcpu
,
CNTP_CTL_EL0
);
default:
WARN_ON
(
1
);
return
0
;
}
}
u64
timer_get_cval
(
struct
arch_timer_context
*
ctxt
)
{
struct
kvm_vcpu
*
vcpu
=
ctxt
->
vcpu
;
switch
(
arch_timer_ctx_index
(
ctxt
))
{
case
TIMER_VTIMER
:
return
__vcpu_sys_reg
(
vcpu
,
CNTV_CVAL_EL0
);
case
TIMER_PTIMER
:
return
__vcpu_sys_reg
(
vcpu
,
CNTP_CVAL_EL0
);
default:
WARN_ON
(
1
);
return
0
;
}
}
static
u64
timer_get_offset
(
struct
arch_timer_context
*
ctxt
)
{
struct
kvm_vcpu
*
vcpu
=
ctxt
->
vcpu
;
switch
(
arch_timer_ctx_index
(
ctxt
))
{
case
TIMER_VTIMER
:
return
__vcpu_sys_reg
(
vcpu
,
CNTVOFF_EL2
);
default:
return
0
;
}
}
static
void
timer_set_ctl
(
struct
arch_timer_context
*
ctxt
,
u32
ctl
)
{
struct
kvm_vcpu
*
vcpu
=
ctxt
->
vcpu
;
switch
(
arch_timer_ctx_index
(
ctxt
))
{
case
TIMER_VTIMER
:
__vcpu_sys_reg
(
vcpu
,
CNTV_CTL_EL0
)
=
ctl
;
break
;
case
TIMER_PTIMER
:
__vcpu_sys_reg
(
vcpu
,
CNTP_CTL_EL0
)
=
ctl
;
break
;
default:
WARN_ON
(
1
);
}
}
static
void
timer_set_cval
(
struct
arch_timer_context
*
ctxt
,
u64
cval
)
{
struct
kvm_vcpu
*
vcpu
=
ctxt
->
vcpu
;
switch
(
arch_timer_ctx_index
(
ctxt
))
{
case
TIMER_VTIMER
:
__vcpu_sys_reg
(
vcpu
,
CNTV_CVAL_EL0
)
=
cval
;
break
;
case
TIMER_PTIMER
:
__vcpu_sys_reg
(
vcpu
,
CNTP_CVAL_EL0
)
=
cval
;
break
;
default:
WARN_ON
(
1
);
}
}
static
void
timer_set_offset
(
struct
arch_timer_context
*
ctxt
,
u64
offset
)
{
struct
kvm_vcpu
*
vcpu
=
ctxt
->
vcpu
;
switch
(
arch_timer_ctx_index
(
ctxt
))
{
case
TIMER_VTIMER
:
__vcpu_sys_reg
(
vcpu
,
CNTVOFF_EL2
)
=
offset
;
break
;
default:
WARN
(
offset
,
"timer %ld
\n
"
,
arch_timer_ctx_index
(
ctxt
));
}
}
u64
kvm_phys_timer_read
(
void
)
{
return
timecounter
->
cc
->
read
(
timecounter
->
cc
);
...
...
@@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{
u64
cval
,
now
;
cval
=
timer_
ctx
->
cnt_cval
;
now
=
kvm_phys_timer_read
()
-
timer_
ctx
->
cntvoff
;
cval
=
timer_
get_cval
(
timer_ctx
)
;
now
=
kvm_phys_timer_read
()
-
timer_
get_offset
(
timer_ctx
)
;
if
(
now
<
cval
)
{
u64
ns
;
...
...
@@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{
WARN_ON
(
timer_ctx
&&
timer_ctx
->
loaded
);
return
timer_ctx
&&
!
(
timer_ctx
->
cnt_ctl
&
ARCH_TIMER_CTRL_IT_MASK
)
&
&
(
timer_ctx
->
cnt_ctl
&
ARCH_TIMER_CTRL_ENABLE
);
((
timer_get_ctl
(
timer_ctx
)
&
(
ARCH_TIMER_CTRL_IT_MASK
|
ARCH_TIMER_CTRL_ENABLE
))
==
ARCH_TIMER_CTRL_ENABLE
);
}
/*
...
...
@@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
if
(
!
kvm_timer_irq_can_fire
(
timer_ctx
))
return
false
;
cval
=
timer_
ctx
->
cnt_cval
;
now
=
kvm_phys_timer_read
()
-
timer_
ctx
->
cntvoff
;
cval
=
timer_
get_cval
(
timer_ctx
)
;
now
=
kvm_phys_timer_read
()
-
timer_
get_offset
(
timer_ctx
)
;
return
cval
<=
now
;
}
...
...
@@ -350,8 +437,8 @@ static void timer_save_state(struct arch_timer_context *ctx)
switch
(
index
)
{
case
TIMER_VTIMER
:
ctx
->
cnt_ctl
=
read_sysreg_el0
(
SYS_CNTV_CTL
);
ctx
->
cnt_cval
=
read_sysreg_el0
(
SYS_CNTV_CVAL
);
timer_set_ctl
(
ctx
,
read_sysreg_el0
(
SYS_CNTV_CTL
)
);
timer_set_cval
(
ctx
,
read_sysreg_el0
(
SYS_CNTV_CVAL
)
);
/* Disable the timer */
write_sysreg_el0
(
0
,
SYS_CNTV_CTL
);
...
...
@@ -359,8 +446,8 @@ static void timer_save_state(struct arch_timer_context *ctx)
break
;
case
TIMER_PTIMER
:
ctx
->
cnt_ctl
=
read_sysreg_el0
(
SYS_CNTP_CTL
);
ctx
->
cnt_cval
=
read_sysreg_el0
(
SYS_CNTP_CVAL
);
timer_set_ctl
(
ctx
,
read_sysreg_el0
(
SYS_CNTP_CTL
)
);
timer_set_cval
(
ctx
,
read_sysreg_el0
(
SYS_CNTP_CVAL
)
);
/* Disable the timer */
write_sysreg_el0
(
0
,
SYS_CNTP_CTL
);
...
...
@@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx)
switch
(
index
)
{
case
TIMER_VTIMER
:
write_sysreg_el0
(
ctx
->
cnt_cval
,
SYS_CNTV_CVAL
);
write_sysreg_el0
(
timer_get_cval
(
ctx
)
,
SYS_CNTV_CVAL
);
isb
();
write_sysreg_el0
(
ctx
->
cnt_ctl
,
SYS_CNTV_CTL
);
write_sysreg_el0
(
timer_get_ctl
(
ctx
)
,
SYS_CNTV_CTL
);
break
;
case
TIMER_PTIMER
:
write_sysreg_el0
(
ctx
->
cnt_cval
,
SYS_CNTP_CVAL
);
write_sysreg_el0
(
timer_get_cval
(
ctx
)
,
SYS_CNTP_CVAL
);
isb
();
write_sysreg_el0
(
ctx
->
cnt_ctl
,
SYS_CNTP_CTL
);
write_sysreg_el0
(
timer_get_ctl
(
ctx
)
,
SYS_CNTP_CTL
);
break
;
case
NR_KVM_TIMERS
:
BUG
();
...
...
@@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_load_nogic
(
vcpu
);
}
set_cntvoff
(
map
.
direct_vtimer
->
cntvoff
);
set_cntvoff
(
timer_get_offset
(
map
.
direct_vtimer
)
);
kvm_timer_unblocking
(
vcpu
);
...
...
@@ -615,7 +702,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
}
}
void
kvm_timer_sync_
hwstate
(
struct
kvm_vcpu
*
vcpu
)
void
kvm_timer_sync_
user
(
struct
kvm_vcpu
*
vcpu
)
{
struct
arch_timer_cpu
*
timer
=
vcpu_timer
(
vcpu
);
...
...
@@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
* resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture.
*/
vcpu_vtimer
(
vcpu
)
->
cnt_ctl
=
0
;
vcpu_ptimer
(
vcpu
)
->
cnt_ctl
=
0
;
timer_set_ctl
(
vcpu_vtimer
(
vcpu
),
0
)
;
timer_set_ctl
(
vcpu_ptimer
(
vcpu
),
0
)
;
if
(
timer
->
enabled
)
{
kvm_timer_update_irq
(
vcpu
,
false
,
vcpu_vtimer
(
vcpu
));
...
...
@@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
mutex_lock
(
&
kvm
->
lock
);
kvm_for_each_vcpu
(
i
,
tmp
,
kvm
)
vcpu_vtimer
(
tmp
)
->
cntvoff
=
cntvoff
;
timer_set_offset
(
vcpu_vtimer
(
tmp
),
cntvoff
)
;
/*
* When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well.
*/
vcpu_vtimer
(
vcpu
)
->
cntvoff
=
cntvoff
;
timer_set_offset
(
vcpu_vtimer
(
vcpu
),
cntvoff
)
;
mutex_unlock
(
&
kvm
->
lock
);
}
...
...
@@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
struct
arch_timer_context
*
vtimer
=
vcpu_vtimer
(
vcpu
);
struct
arch_timer_context
*
ptimer
=
vcpu_ptimer
(
vcpu
);
vtimer
->
vcpu
=
vcpu
;
ptimer
->
vcpu
=
vcpu
;
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff
(
vcpu
,
kvm_phys_timer_read
());
ptimer
->
cntvoff
=
0
;
timer_set_offset
(
ptimer
,
0
)
;
hrtimer_init
(
&
timer
->
bg_timer
,
CLOCK_MONOTONIC
,
HRTIMER_MODE_ABS_HARD
);
timer
->
bg_timer
.
function
=
kvm_bg_timer_expire
;
...
...
@@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
vtimer
->
host_timer_irq_flags
=
host_vtimer_irq_flags
;
ptimer
->
host_timer_irq_flags
=
host_ptimer_irq_flags
;
vtimer
->
vcpu
=
vcpu
;
ptimer
->
vcpu
=
vcpu
;
}
static
void
kvm_timer_init_interrupt
(
void
*
info
)
...
...
@@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
* UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
* regardless of ENABLE bit for our implementation convenience.
*/
u32
ctl
=
timer_get_ctl
(
timer
);
if
(
!
kvm_timer_compute_delta
(
timer
))
return
timer
->
cnt_ctl
|
ARCH_TIMER_CTRL_IT_STAT
;
else
return
timer
->
cnt_
ctl
;
ctl
|=
ARCH_TIMER_CTRL_IT_STAT
;
return
ctl
;
}
u64
kvm_arm_timer_get_reg
(
struct
kvm_vcpu
*
vcpu
,
u64
regid
)
...
...
@@ -795,8 +884,8 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
switch
(
treg
)
{
case
TIMER_REG_TVAL
:
val
=
timer
->
cnt_cval
-
kvm_phys_timer_read
()
+
timer
->
cntvoff
;
val
&
=
lower_32_bits
(
val
);
val
=
timer
_get_cval
(
timer
)
-
kvm_phys_timer_read
()
+
timer_get_offset
(
timer
)
;
val
=
lower_32_bits
(
val
);
break
;
case
TIMER_REG_CTL
:
...
...
@@ -804,11 +893,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
break
;
case
TIMER_REG_CVAL
:
val
=
timer
->
cnt_cval
;
val
=
timer
_get_cval
(
timer
)
;
break
;
case
TIMER_REG_CNT
:
val
=
kvm_phys_timer_read
()
-
timer
->
cntvoff
;
val
=
kvm_phys_timer_read
()
-
timer
_get_offset
(
timer
)
;
break
;
default:
...
...
@@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
{
switch
(
treg
)
{
case
TIMER_REG_TVAL
:
timer
->
cnt_cval
=
kvm_phys_timer_read
()
-
timer
->
cntvoff
+
(
s32
)
val
;
timer
_set_cval
(
timer
,
kvm_phys_timer_read
()
-
timer_get_offset
(
timer
)
+
(
s32
)
val
)
;
break
;
case
TIMER_REG_CTL
:
timer
->
cnt_ctl
=
val
&
~
ARCH_TIMER_CTRL_IT_STAT
;
timer
_set_ctl
(
timer
,
val
&
~
ARCH_TIMER_CTRL_IT_STAT
)
;
break
;
case
TIMER_REG_CVAL
:
timer
->
cnt_cval
=
val
;
timer
_set_cval
(
timer
,
val
)
;
break
;
default:
...
...
arch/arm64/kvm/arm.c
View file @
300dca68
...
...
@@ -106,22 +106,15 @@ static int kvm_arm_default_max_vcpus(void)
*/
int
kvm_arch_init_vm
(
struct
kvm
*
kvm
,
unsigned
long
type
)
{
int
ret
,
cpu
;
int
ret
;
ret
=
kvm_arm_setup_stage2
(
kvm
,
type
);
if
(
ret
)
return
ret
;
kvm
->
arch
.
last_vcpu_ran
=
alloc_percpu
(
typeof
(
*
kvm
->
arch
.
last_vcpu_ran
));
if
(
!
kvm
->
arch
.
last_vcpu_ran
)
return
-
ENOMEM
;
for_each_possible_cpu
(
cpu
)
*
per_cpu_ptr
(
kvm
->
arch
.
last_vcpu_ran
,
cpu
)
=
-
1
;
ret
=
kvm_alloc_stage2_pgd
(
kvm
);
ret
=
kvm_init_stage2_mmu
(
kvm
,
&
kvm
->
arch
.
mmu
);
if
(
ret
)
goto
out_fail_alloc
;
return
ret
;
ret
=
create_hyp_mappings
(
kvm
,
kvm
+
1
,
PAGE_HYP
);
if
(
ret
)
...
...
@@ -129,18 +122,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_vgic_early_init
(
kvm
);
/* Mark the initial VMID generation invalid */
kvm
->
arch
.
vmid
.
vmid_gen
=
0
;
/* The maximum number of VCPUs is limited by the host's GIC model */
kvm
->
arch
.
max_vcpus
=
kvm_arm_default_max_vcpus
();
return
ret
;
out_free_stage2_pgd:
kvm_free_stage2_pgd
(
kvm
);
out_fail_alloc:
free_percpu
(
kvm
->
arch
.
last_vcpu_ran
);
kvm
->
arch
.
last_vcpu_ran
=
NULL
;
kvm_free_stage2_pgd
(
&
kvm
->
arch
.
mmu
);
return
ret
;
}
...
...
@@ -160,9 +147,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_vgic_destroy
(
kvm
);
free_percpu
(
kvm
->
arch
.
last_vcpu_ran
);
kvm
->
arch
.
last_vcpu_ran
=
NULL
;
for
(
i
=
0
;
i
<
KVM_MAX_VCPUS
;
++
i
)
{
if
(
kvm
->
vcpus
[
i
])
{
kvm_vcpu_destroy
(
kvm
->
vcpus
[
i
]);
...
...
@@ -279,6 +263,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_arm_pvtime_vcpu_init
(
&
vcpu
->
arch
);
vcpu
->
arch
.
hw_mmu
=
&
vcpu
->
kvm
->
arch
.
mmu
;
err
=
kvm_vgic_vcpu_init
(
vcpu
);
if
(
err
)
return
err
;
...
...
@@ -334,16 +320,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void
kvm_arch_vcpu_load
(
struct
kvm_vcpu
*
vcpu
,
int
cpu
)
{
struct
kvm_s2_mmu
*
mmu
;
int
*
last_ran
;
last_ran
=
this_cpu_ptr
(
vcpu
->
kvm
->
arch
.
last_vcpu_ran
);
mmu
=
vcpu
->
arch
.
hw_mmu
;
last_ran
=
this_cpu_ptr
(
mmu
->
last_vcpu_ran
);
/*
* We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness.
*/
if
(
*
last_ran
!=
vcpu
->
vcpu_id
)
{
kvm_call_hyp
(
__kvm_tlb_flush_local_vmid
,
vcp
u
);
kvm_call_hyp
(
__kvm_tlb_flush_local_vmid
,
mm
u
);
*
last_ran
=
vcpu
->
vcpu_id
;
}
...
...
@@ -680,7 +668,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
cond_resched
();
update_vmid
(
&
vcpu
->
kvm
->
arch
.
vmid
);
update_vmid
(
&
vcpu
->
arch
.
hw_mmu
->
vmid
);
check_vcpu_requests
(
vcpu
);
...
...
@@ -729,13 +717,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
smp_store_mb
(
vcpu
->
mode
,
IN_GUEST_MODE
);
if
(
ret
<=
0
||
need_new_vmid_gen
(
&
vcpu
->
kvm
->
arch
.
vmid
)
||
if
(
ret
<=
0
||
need_new_vmid_gen
(
&
vcpu
->
arch
.
hw_mmu
->
vmid
)
||
kvm_request_pending
(
vcpu
))
{
vcpu
->
mode
=
OUTSIDE_GUEST_MODE
;
isb
();
/* Ensure work in x_flush_hwstate is committed */
kvm_pmu_sync_hwstate
(
vcpu
);
if
(
static_branch_unlikely
(
&
userspace_irqchip_in_use
))
kvm_timer_sync_
hwstate
(
vcpu
);
kvm_timer_sync_
user
(
vcpu
);
kvm_vgic_sync_hwstate
(
vcpu
);
local_irq_enable
();
preempt_enable
();
...
...
@@ -780,7 +768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* timer virtual interrupt state.
*/
if
(
static_branch_unlikely
(
&
userspace_irqchip_in_use
))
kvm_timer_sync_
hwstate
(
vcpu
);
kvm_timer_sync_
user
(
vcpu
);
kvm_arch_vcpu_ctxsync_fp
(
vcpu
);
...
...
arch/arm64/kvm/fpsimd.c
View file @
300dca68
...
...
@@ -85,7 +85,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE
(
!
irqs_disabled
());
if
(
vcpu
->
arch
.
flags
&
KVM_ARM64_FP_ENABLED
)
{
fpsimd_bind_state_to_cpu
(
&
vcpu
->
arch
.
ctxt
.
gp_regs
.
fp_regs
,
fpsimd_bind_state_to_cpu
(
&
vcpu
->
arch
.
ctxt
.
fp_regs
,
vcpu
->
arch
.
sve_state
,
vcpu
->
arch
.
sve_max_vl
);
...
...
@@ -109,12 +109,10 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save
(
flags
);
if
(
vcpu
->
arch
.
flags
&
KVM_ARM64_FP_ENABLED
)
{
u64
*
guest_zcr
=
&
vcpu
->
arch
.
ctxt
.
sys_regs
[
ZCR_EL1
];
fpsimd_save_and_flush_cpu_state
();
if
(
guest_has_sve
)
*
guest_zcr
=
read_sysreg_s
(
SYS_ZCR_EL12
);
__vcpu_sys_reg
(
vcpu
,
ZCR_EL1
)
=
read_sysreg_s
(
SYS_ZCR_EL12
);
}
else
if
(
host_has_sve
)
{
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
...
...
arch/arm64/kvm/guest.c
View file @
300dca68
...
...
@@ -101,19 +101,69 @@ static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
return
size
;
}
static
int
validate_core_offset
(
const
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
)
static
void
*
core_reg_addr
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
)
{
u64
off
=
core_reg_offset_from_id
(
reg
->
id
);
int
size
=
core_reg_size_from_offset
(
vcpu
,
off
);
if
(
size
<
0
)
return
-
EINVA
L
;
return
NUL
L
;
if
(
KVM_REG_SIZE
(
reg
->
id
)
!=
size
)
return
-
EINVA
L
;
return
NUL
L
;
return
0
;
switch
(
off
)
{
case
KVM_REG_ARM_CORE_REG
(
regs
.
regs
[
0
])
...
KVM_REG_ARM_CORE_REG
(
regs
.
regs
[
30
]):
off
-=
KVM_REG_ARM_CORE_REG
(
regs
.
regs
[
0
]);
off
/=
2
;
return
&
vcpu
->
arch
.
ctxt
.
regs
.
regs
[
off
];
case
KVM_REG_ARM_CORE_REG
(
regs
.
sp
):
return
&
vcpu
->
arch
.
ctxt
.
regs
.
sp
;
case
KVM_REG_ARM_CORE_REG
(
regs
.
pc
):
return
&
vcpu
->
arch
.
ctxt
.
regs
.
pc
;
case
KVM_REG_ARM_CORE_REG
(
regs
.
pstate
):
return
&
vcpu
->
arch
.
ctxt
.
regs
.
pstate
;
case
KVM_REG_ARM_CORE_REG
(
sp_el1
):
return
__ctxt_sys_reg
(
&
vcpu
->
arch
.
ctxt
,
SP_EL1
);
case
KVM_REG_ARM_CORE_REG
(
elr_el1
):
return
__ctxt_sys_reg
(
&
vcpu
->
arch
.
ctxt
,
ELR_EL1
);
case
KVM_REG_ARM_CORE_REG
(
spsr
[
KVM_SPSR_EL1
]):
return
__ctxt_sys_reg
(
&
vcpu
->
arch
.
ctxt
,
SPSR_EL1
);
case
KVM_REG_ARM_CORE_REG
(
spsr
[
KVM_SPSR_ABT
]):
return
&
vcpu
->
arch
.
ctxt
.
spsr_abt
;
case
KVM_REG_ARM_CORE_REG
(
spsr
[
KVM_SPSR_UND
]):
return
&
vcpu
->
arch
.
ctxt
.
spsr_und
;
case
KVM_REG_ARM_CORE_REG
(
spsr
[
KVM_SPSR_IRQ
]):
return
&
vcpu
->
arch
.
ctxt
.
spsr_irq
;
case
KVM_REG_ARM_CORE_REG
(
spsr
[
KVM_SPSR_FIQ
]):
return
&
vcpu
->
arch
.
ctxt
.
spsr_fiq
;
case
KVM_REG_ARM_CORE_REG
(
fp_regs
.
vregs
[
0
])
...
KVM_REG_ARM_CORE_REG
(
fp_regs
.
vregs
[
31
]):
off
-=
KVM_REG_ARM_CORE_REG
(
fp_regs
.
vregs
[
0
]);
off
/=
4
;
return
&
vcpu
->
arch
.
ctxt
.
fp_regs
.
vregs
[
off
];
case
KVM_REG_ARM_CORE_REG
(
fp_regs
.
fpsr
):
return
&
vcpu
->
arch
.
ctxt
.
fp_regs
.
fpsr
;
case
KVM_REG_ARM_CORE_REG
(
fp_regs
.
fpcr
):
return
&
vcpu
->
arch
.
ctxt
.
fp_regs
.
fpcr
;
default:
return
NULL
;
}
}
static
int
get_core_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
)
...
...
@@ -125,8 +175,8 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
* off the index in the "array".
*/
__u32
__user
*
uaddr
=
(
__u32
__user
*
)(
unsigned
long
)
reg
->
addr
;
struct
kvm_regs
*
regs
=
vcpu_gp_regs
(
vcpu
);
int
nr_regs
=
sizeof
(
*
regs
)
/
sizeof
(
__u32
)
;
int
nr_regs
=
sizeof
(
struct
kvm_regs
)
/
sizeof
(
__u32
);
void
*
addr
;
u32
off
;
/* Our ID is an index into the kvm_regs struct. */
...
...
@@ -135,10 +185,11 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(
off
+
(
KVM_REG_SIZE
(
reg
->
id
)
/
sizeof
(
__u32
)))
>=
nr_regs
)
return
-
ENOENT
;
if
(
validate_core_offset
(
vcpu
,
reg
))
addr
=
core_reg_addr
(
vcpu
,
reg
);
if
(
!
addr
)
return
-
EINVAL
;
if
(
copy_to_user
(
uaddr
,
((
u32
*
)
regs
)
+
off
,
KVM_REG_SIZE
(
reg
->
id
)))
if
(
copy_to_user
(
uaddr
,
addr
,
KVM_REG_SIZE
(
reg
->
id
)))
return
-
EFAULT
;
return
0
;
...
...
@@ -147,10 +198,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static
int
set_core_reg
(
struct
kvm_vcpu
*
vcpu
,
const
struct
kvm_one_reg
*
reg
)
{
__u32
__user
*
uaddr
=
(
__u32
__user
*
)(
unsigned
long
)
reg
->
addr
;
struct
kvm_regs
*
regs
=
vcpu_gp_regs
(
vcpu
);
int
nr_regs
=
sizeof
(
*
regs
)
/
sizeof
(
__u32
);
int
nr_regs
=
sizeof
(
struct
kvm_regs
)
/
sizeof
(
__u32
);
__uint128_t
tmp
;
void
*
valp
=
&
tmp
;
void
*
valp
=
&
tmp
,
*
addr
;
u64
off
;
int
err
=
0
;
...
...
@@ -160,7 +210,8 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(
off
+
(
KVM_REG_SIZE
(
reg
->
id
)
/
sizeof
(
__u32
)))
>=
nr_regs
)
return
-
ENOENT
;
if
(
validate_core_offset
(
vcpu
,
reg
))
addr
=
core_reg_addr
(
vcpu
,
reg
);
if
(
!
addr
)
return
-
EINVAL
;
if
(
KVM_REG_SIZE
(
reg
->
id
)
>
sizeof
(
tmp
))
...
...
@@ -198,7 +249,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
}
memcpy
(
(
u32
*
)
regs
+
off
,
valp
,
KVM_REG_SIZE
(
reg
->
id
));
memcpy
(
addr
,
valp
,
KVM_REG_SIZE
(
reg
->
id
));
if
(
*
vcpu_cpsr
(
vcpu
)
&
PSR_MODE32_BIT
)
{
int
i
;
...
...
arch/arm64/kvm/hyp/entry.S
View file @
300dca68
...
...
@@ -16,8 +16,7 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_ptrauth.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
#define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
.
text
...
...
arch/arm64/kvm/hyp/include/hyp/debug-sr.h
View file @
300dca68
...
...
@@ -88,9 +88,8 @@
default: write_debug(ptr[0], reg, 0); \
}
static
inline
void
__debug_save_state
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_guest_debug_arch
*
dbg
,
struct
kvm_cpu_context
*
ctxt
)
static
void
__debug_save_state
(
struct
kvm_guest_debug_arch
*
dbg
,
struct
kvm_cpu_context
*
ctxt
)
{
u64
aa64dfr0
;
int
brps
,
wrps
;
...
...
@@ -104,12 +103,11 @@ static inline void __debug_save_state(struct kvm_vcpu *vcpu,
save_debug
(
dbg
->
dbg_wcr
,
dbgwcr
,
wrps
);
save_debug
(
dbg
->
dbg_wvr
,
dbgwvr
,
wrps
);
ctxt
->
sys_regs
[
MDCCINT_EL1
]
=
read_sysreg
(
mdccint_el1
);
ctxt
_sys_reg
(
ctxt
,
MDCCINT_EL1
)
=
read_sysreg
(
mdccint_el1
);
}
static
inline
void
__debug_restore_state
(
struct
kvm_vcpu
*
vcpu
,
struct
kvm_guest_debug_arch
*
dbg
,
struct
kvm_cpu_context
*
ctxt
)
static
void
__debug_restore_state
(
struct
kvm_guest_debug_arch
*
dbg
,
struct
kvm_cpu_context
*
ctxt
)
{
u64
aa64dfr0
;
int
brps
,
wrps
;
...
...
@@ -124,7 +122,7 @@ static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
restore_debug
(
dbg
->
dbg_wcr
,
dbgwcr
,
wrps
);
restore_debug
(
dbg
->
dbg_wvr
,
dbgwvr
,
wrps
);
write_sysreg
(
ctxt
->
sys_regs
[
MDCCINT_EL1
]
,
mdccint_el1
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
MDCCINT_EL1
)
,
mdccint_el1
);
}
static
inline
void
__debug_switch_to_guest_common
(
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -142,8 +140,8 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
__debug_save_state
(
vcpu
,
host_dbg
,
host_ctxt
);
__debug_restore_state
(
vcpu
,
guest_dbg
,
guest_ctxt
);
__debug_save_state
(
host_dbg
,
host_ctxt
);
__debug_restore_state
(
guest_dbg
,
guest_ctxt
);
}
static
inline
void
__debug_switch_to_host_common
(
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -161,8 +159,8 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
host_dbg
=
&
vcpu
->
arch
.
host_debug_state
.
regs
;
guest_dbg
=
kern_hyp_va
(
vcpu
->
arch
.
debug_ptr
);
__debug_save_state
(
vcpu
,
guest_dbg
,
guest_ctxt
);
__debug_restore_state
(
vcpu
,
host_dbg
,
host_ctxt
);
__debug_save_state
(
guest_dbg
,
guest_ctxt
);
__debug_restore_state
(
host_dbg
,
host_ctxt
);
vcpu
->
arch
.
flags
&=
~
KVM_ARM64_DEBUG_DIRTY
;
}
...
...
arch/arm64/kvm/hyp/include/hyp/switch.h
View file @
300dca68
...
...
@@ -53,7 +53,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
if
(
!
vcpu_el1_is_32bit
(
vcpu
))
return
;
vcpu
->
arch
.
ctxt
.
sys_regs
[
FPEXC32_EL2
]
=
read_sysreg
(
fpexc32_el2
);
__vcpu_sys_reg
(
vcpu
,
FPEXC32_EL2
)
=
read_sysreg
(
fpexc32_el2
);
}
static
inline
void
__activate_traps_fpsimd32
(
struct
kvm_vcpu
*
vcpu
)
...
...
@@ -122,9 +122,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
}
}
static
inline
void
__activate_vm
(
struct
kvm
*
kvm
)
static
inline
void
__activate_vm
(
struct
kvm
_s2_mmu
*
mmu
)
{
__load_guest_stage2
(
kvm
);
__load_guest_stage2
(
mmu
);
}
static
inline
bool
__translate_far_to_hpfar
(
u64
far
,
u64
*
hpfar
)
...
...
@@ -266,17 +266,16 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
if
(
sve_guest
)
{
sve_load_state
(
vcpu_sve_pffr
(
vcpu
),
&
vcpu
->
arch
.
ctxt
.
gp_regs
.
fp_regs
.
fpsr
,
&
vcpu
->
arch
.
ctxt
.
fp_regs
.
fpsr
,
sve_vq_from_vl
(
vcpu
->
arch
.
sve_max_vl
)
-
1
);
write_sysreg_s
(
vcpu
->
arch
.
ctxt
.
sys_regs
[
ZCR_EL1
]
,
SYS_ZCR_EL12
);
write_sysreg_s
(
__vcpu_sys_reg
(
vcpu
,
ZCR_EL1
)
,
SYS_ZCR_EL12
);
}
else
{
__fpsimd_restore_state
(
&
vcpu
->
arch
.
ctxt
.
gp_regs
.
fp_regs
);
__fpsimd_restore_state
(
&
vcpu
->
arch
.
ctxt
.
fp_regs
);
}
/* Skip restoring fpexc32 for AArch64 guests */
if
(
!
(
read_sysreg
(
hcr_el2
)
&
HCR_RW
))
write_sysreg
(
vcpu
->
arch
.
ctxt
.
sys_regs
[
FPEXC32_EL2
],
fpexc32_el2
);
write_sysreg
(
__vcpu_sys_reg
(
vcpu
,
FPEXC32_EL2
),
fpexc32_el2
);
vcpu
->
arch
.
flags
|=
KVM_ARM64_FP_ENABLED
;
...
...
@@ -365,11 +364,14 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
return
false
;
}
#define __ptrauth_save_key(regs, key) \
({ \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
})
#define __ptrauth_save_key(ctxt, key) \
do { \
u64 __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
} while(0)
static
inline
bool
__hyp_handle_ptrauth
(
struct
kvm_vcpu
*
vcpu
)
{
...
...
@@ -381,11 +383,11 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
return
false
;
ctxt
=
&
__hyp_this_cpu_ptr
(
kvm_host_data
)
->
host_ctxt
;
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APIA
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APIB
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APDA
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APDB
);
__ptrauth_save_key
(
ctxt
->
sys_regs
,
APGA
);
__ptrauth_save_key
(
ctxt
,
APIA
);
__ptrauth_save_key
(
ctxt
,
APIB
);
__ptrauth_save_key
(
ctxt
,
APDA
);
__ptrauth_save_key
(
ctxt
,
APDB
);
__ptrauth_save_key
(
ctxt
,
APGA
);
vcpu_ptrauth_enable
(
vcpu
);
...
...
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
View file @
300dca68
...
...
@@ -17,95 +17,95 @@
static
inline
void
__sysreg_save_common_state
(
struct
kvm_cpu_context
*
ctxt
)
{
ctxt
->
sys_regs
[
MDSCR_EL1
]
=
read_sysreg
(
mdscr_el1
);
ctxt
_sys_reg
(
ctxt
,
MDSCR_EL1
)
=
read_sysreg
(
mdscr_el1
);
}
static
inline
void
__sysreg_save_user_state
(
struct
kvm_cpu_context
*
ctxt
)
{
ctxt
->
sys_regs
[
TPIDR_EL0
]
=
read_sysreg
(
tpidr_el0
);
ctxt
->
sys_regs
[
TPIDRRO_EL0
]
=
read_sysreg
(
tpidrro_el0
);
ctxt
_sys_reg
(
ctxt
,
TPIDR_EL0
)
=
read_sysreg
(
tpidr_el0
);
ctxt
_sys_reg
(
ctxt
,
TPIDRRO_EL0
)
=
read_sysreg
(
tpidrro_el0
);
}
static
inline
void
__sysreg_save_el1_state
(
struct
kvm_cpu_context
*
ctxt
)
{
ctxt
->
sys_regs
[
CSSELR_EL1
]
=
read_sysreg
(
csselr_el1
);
ctxt
->
sys_regs
[
SCTLR_EL1
]
=
read_sysreg_el1
(
SYS_SCTLR
);
ctxt
->
sys_regs
[
CPACR_EL1
]
=
read_sysreg_el1
(
SYS_CPACR
);
ctxt
->
sys_regs
[
TTBR0_EL1
]
=
read_sysreg_el1
(
SYS_TTBR0
);
ctxt
->
sys_regs
[
TTBR1_EL1
]
=
read_sysreg_el1
(
SYS_TTBR1
);
ctxt
->
sys_regs
[
TCR_EL1
]
=
read_sysreg_el1
(
SYS_TCR
);
ctxt
->
sys_regs
[
ESR_EL1
]
=
read_sysreg_el1
(
SYS_ESR
);
ctxt
->
sys_regs
[
AFSR0_EL1
]
=
read_sysreg_el1
(
SYS_AFSR0
);
ctxt
->
sys_regs
[
AFSR1_EL1
]
=
read_sysreg_el1
(
SYS_AFSR1
);
ctxt
->
sys_regs
[
FAR_EL1
]
=
read_sysreg_el1
(
SYS_FAR
);
ctxt
->
sys_regs
[
MAIR_EL1
]
=
read_sysreg_el1
(
SYS_MAIR
);
ctxt
->
sys_regs
[
VBAR_EL1
]
=
read_sysreg_el1
(
SYS_VBAR
);
ctxt
->
sys_regs
[
CONTEXTIDR_EL1
]
=
read_sysreg_el1
(
SYS_CONTEXTIDR
);
ctxt
->
sys_regs
[
AMAIR_EL1
]
=
read_sysreg_el1
(
SYS_AMAIR
);
ctxt
->
sys_regs
[
CNTKCTL_EL1
]
=
read_sysreg_el1
(
SYS_CNTKCTL
);
ctxt
->
sys_regs
[
PAR_EL1
]
=
read_sysreg
(
par_el1
);
ctxt
->
sys_regs
[
TPIDR_EL1
]
=
read_sysreg
(
tpidr_el1
);
ctxt
->
gp_regs
.
sp_el1
=
read_sysreg
(
sp_el1
);
ctxt
->
gp_regs
.
elr_el1
=
read_sysreg_el1
(
SYS_ELR
);
ctxt
->
gp_regs
.
spsr
[
KVM_SPSR_EL1
]
=
read_sysreg_el1
(
SYS_SPSR
);
ctxt
_sys_reg
(
ctxt
,
CSSELR_EL1
)
=
read_sysreg
(
csselr_el1
);
ctxt
_sys_reg
(
ctxt
,
SCTLR_EL1
)
=
read_sysreg_el1
(
SYS_SCTLR
);
ctxt
_sys_reg
(
ctxt
,
CPACR_EL1
)
=
read_sysreg_el1
(
SYS_CPACR
);
ctxt
_sys_reg
(
ctxt
,
TTBR0_EL1
)
=
read_sysreg_el1
(
SYS_TTBR0
);
ctxt
_sys_reg
(
ctxt
,
TTBR1_EL1
)
=
read_sysreg_el1
(
SYS_TTBR1
);
ctxt
_sys_reg
(
ctxt
,
TCR_EL1
)
=
read_sysreg_el1
(
SYS_TCR
);
ctxt
_sys_reg
(
ctxt
,
ESR_EL1
)
=
read_sysreg_el1
(
SYS_ESR
);
ctxt
_sys_reg
(
ctxt
,
AFSR0_EL1
)
=
read_sysreg_el1
(
SYS_AFSR0
);
ctxt
_sys_reg
(
ctxt
,
AFSR1_EL1
)
=
read_sysreg_el1
(
SYS_AFSR1
);
ctxt
_sys_reg
(
ctxt
,
FAR_EL1
)
=
read_sysreg_el1
(
SYS_FAR
);
ctxt
_sys_reg
(
ctxt
,
MAIR_EL1
)
=
read_sysreg_el1
(
SYS_MAIR
);
ctxt
_sys_reg
(
ctxt
,
VBAR_EL1
)
=
read_sysreg_el1
(
SYS_VBAR
);
ctxt
_sys_reg
(
ctxt
,
CONTEXTIDR_EL1
)
=
read_sysreg_el1
(
SYS_CONTEXTIDR
);
ctxt
_sys_reg
(
ctxt
,
AMAIR_EL1
)
=
read_sysreg_el1
(
SYS_AMAIR
);
ctxt
_sys_reg
(
ctxt
,
CNTKCTL_EL1
)
=
read_sysreg_el1
(
SYS_CNTKCTL
);
ctxt
_sys_reg
(
ctxt
,
PAR_EL1
)
=
read_sysreg
(
par_el1
);
ctxt
_sys_reg
(
ctxt
,
TPIDR_EL1
)
=
read_sysreg
(
tpidr_el1
);
ctxt
_sys_reg
(
ctxt
,
SP_EL1
)
=
read_sysreg
(
sp_el1
);
ctxt
_sys_reg
(
ctxt
,
ELR_EL1
)
=
read_sysreg_el1
(
SYS_ELR
);
ctxt
_sys_reg
(
ctxt
,
SPSR_EL1
)
=
read_sysreg_el1
(
SYS_SPSR
);
}
static
inline
void
__sysreg_save_el2_return_state
(
struct
kvm_cpu_context
*
ctxt
)
{
ctxt
->
gp_regs
.
regs
.
pc
=
read_sysreg_el2
(
SYS_ELR
);
ctxt
->
gp_regs
.
regs
.
pstate
=
read_sysreg_el2
(
SYS_SPSR
);
ctxt
->
regs
.
pc
=
read_sysreg_el2
(
SYS_ELR
);
ctxt
->
regs
.
pstate
=
read_sysreg_el2
(
SYS_SPSR
);
if
(
cpus_have_final_cap
(
ARM64_HAS_RAS_EXTN
))
ctxt
->
sys_regs
[
DISR_EL1
]
=
read_sysreg_s
(
SYS_VDISR_EL2
);
ctxt
_sys_reg
(
ctxt
,
DISR_EL1
)
=
read_sysreg_s
(
SYS_VDISR_EL2
);
}
static
inline
void
__sysreg_restore_common_state
(
struct
kvm_cpu_context
*
ctxt
)
{
write_sysreg
(
ctxt
->
sys_regs
[
MDSCR_EL1
],
mdscr_el1
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
MDSCR_EL1
),
mdscr_el1
);
}
static
inline
void
__sysreg_restore_user_state
(
struct
kvm_cpu_context
*
ctxt
)
{
write_sysreg
(
ctxt
->
sys_regs
[
TPIDR_EL0
],
tpidr_el0
);
write_sysreg
(
ctxt
->
sys_regs
[
TPIDRRO_EL0
]
,
tpidrro_el0
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
TPIDR_EL0
),
tpidr_el0
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
TPIDRRO_EL0
)
,
tpidrro_el0
);
}
static
inline
void
__sysreg_restore_el1_state
(
struct
kvm_cpu_context
*
ctxt
)
{
write_sysreg
(
ctxt
->
sys_regs
[
MPIDR_EL1
],
vmpidr_el2
);
write_sysreg
(
ctxt
->
sys_regs
[
CSSELR_EL1
]
,
csselr_el1
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
MPIDR_EL1
),
vmpidr_el2
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
CSSELR_EL1
)
,
csselr_el1
);
if
(
has_vhe
()
||
!
cpus_have_final_cap
(
ARM64_WORKAROUND_SPECULATIVE_AT
))
{
write_sysreg_el1
(
ctxt
->
sys_regs
[
SCTLR_EL1
]
,
SYS_SCTLR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
TCR_EL1
]
,
SYS_TCR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
SCTLR_EL1
)
,
SYS_SCTLR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
TCR_EL1
)
,
SYS_TCR
);
}
else
if
(
!
ctxt
->
__hyp_running_vcpu
)
{
/*
* Must only be done for guest registers, hence the context
* test. We're coming from the host, so SCTLR.M is already
* set. Pairs with nVHE's __activate_traps().
*/
write_sysreg_el1
((
ctxt
->
sys_regs
[
TCR_EL1
]
|
write_sysreg_el1
((
ctxt
_sys_reg
(
ctxt
,
TCR_EL1
)
|
TCR_EPD1_MASK
|
TCR_EPD0_MASK
),
SYS_TCR
);
isb
();
}
write_sysreg_el1
(
ctxt
->
sys_regs
[
CPACR_EL1
]
,
SYS_CPACR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
TTBR0_EL1
]
,
SYS_TTBR0
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
TTBR1_EL1
]
,
SYS_TTBR1
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
ESR_EL1
]
,
SYS_ESR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
AFSR0_EL1
]
,
SYS_AFSR0
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
AFSR1_EL1
]
,
SYS_AFSR1
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
FAR_EL1
]
,
SYS_FAR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
MAIR_EL1
]
,
SYS_MAIR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
VBAR_EL1
]
,
SYS_VBAR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
CONTEXTIDR_EL1
],
SYS_CONTEXTIDR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
AMAIR_EL1
]
,
SYS_AMAIR
);
write_sysreg_el1
(
ctxt
->
sys_regs
[
CNTKCTL_EL1
],
SYS_CNTKCTL
);
write_sysreg
(
ctxt
->
sys_regs
[
PAR_EL1
],
par_el1
);
write_sysreg
(
ctxt
->
sys_regs
[
TPIDR_EL1
],
tpidr_el1
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
CPACR_EL1
)
,
SYS_CPACR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
TTBR0_EL1
)
,
SYS_TTBR0
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
TTBR1_EL1
)
,
SYS_TTBR1
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
ESR_EL1
)
,
SYS_ESR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
AFSR0_EL1
)
,
SYS_AFSR0
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
AFSR1_EL1
)
,
SYS_AFSR1
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
FAR_EL1
)
,
SYS_FAR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
MAIR_EL1
)
,
SYS_MAIR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
VBAR_EL1
)
,
SYS_VBAR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
CONTEXTIDR_EL1
),
SYS_CONTEXTIDR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
AMAIR_EL1
)
,
SYS_AMAIR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
CNTKCTL_EL1
),
SYS_CNTKCTL
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
PAR_EL1
),
par_el1
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
TPIDR_EL1
),
tpidr_el1
);
if
(
!
has_vhe
()
&&
cpus_have_final_cap
(
ARM64_WORKAROUND_SPECULATIVE_AT
)
&&
...
...
@@ -120,19 +120,19 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
* deconfigured and disabled. We can now restore the host's
* S1 configuration: SCTLR, and only then TCR.
*/
write_sysreg_el1
(
ctxt
->
sys_regs
[
SCTLR_EL1
]
,
SYS_SCTLR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
SCTLR_EL1
)
,
SYS_SCTLR
);
isb
();
write_sysreg_el1
(
ctxt
->
sys_regs
[
TCR_EL1
]
,
SYS_TCR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
TCR_EL1
)
,
SYS_TCR
);
}
write_sysreg
(
ctxt
->
gp_regs
.
sp_el1
,
sp_el1
);
write_sysreg_el1
(
ctxt
->
gp_regs
.
elr_el1
,
SYS_ELR
);
write_sysreg_el1
(
ctxt
->
gp_regs
.
spsr
[
KVM_SPSR_EL1
],
SYS_SPSR
);
write_sysreg
(
ctxt
_sys_reg
(
ctxt
,
SP_EL1
),
sp_el1
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
ELR_EL1
),
SYS_ELR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
SPSR_EL1
),
SYS_SPSR
);
}
static
inline
void
__sysreg_restore_el2_return_state
(
struct
kvm_cpu_context
*
ctxt
)
{
u64
pstate
=
ctxt
->
gp_regs
.
regs
.
pstate
;
u64
pstate
=
ctxt
->
regs
.
pstate
;
u64
mode
=
pstate
&
PSR_AA32_MODE_MASK
;
/*
...
...
@@ -149,55 +149,45 @@ static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctx
if
(
!
(
mode
&
PSR_MODE32_BIT
)
&&
mode
>=
PSR_MODE_EL2t
)
pstate
=
PSR_MODE_EL2h
|
PSR_IL_BIT
;
write_sysreg_el2
(
ctxt
->
gp_regs
.
regs
.
pc
,
SYS_ELR
);
write_sysreg_el2
(
ctxt
->
regs
.
pc
,
SYS_ELR
);
write_sysreg_el2
(
pstate
,
SYS_SPSR
);
if
(
cpus_have_final_cap
(
ARM64_HAS_RAS_EXTN
))
write_sysreg_s
(
ctxt
->
sys_regs
[
DISR_EL1
]
,
SYS_VDISR_EL2
);
write_sysreg_s
(
ctxt
_sys_reg
(
ctxt
,
DISR_EL1
)
,
SYS_VDISR_EL2
);
}
static
inline
void
__sysreg32_save_state
(
struct
kvm_vcpu
*
vcpu
)
{
u64
*
spsr
,
*
sysreg
;
if
(
!
vcpu_el1_is_32bit
(
vcpu
))
return
;
spsr
=
vcpu
->
arch
.
ctxt
.
gp_regs
.
spsr
;
sysreg
=
vcpu
->
arch
.
ctxt
.
sys_regs
;
spsr
[
KVM_SPSR_ABT
]
=
read_sysreg
(
spsr_abt
);
spsr
[
KVM_SPSR_UND
]
=
read_sysreg
(
spsr_und
);
spsr
[
KVM_SPSR_IRQ
]
=
read_sysreg
(
spsr_irq
);
spsr
[
KVM_SPSR_FIQ
]
=
read_sysreg
(
spsr_fiq
);
vcpu
->
arch
.
ctxt
.
spsr_abt
=
read_sysreg
(
spsr_abt
);
vcpu
->
arch
.
ctxt
.
spsr_und
=
read_sysreg
(
spsr_und
);
vcpu
->
arch
.
ctxt
.
spsr_irq
=
read_sysreg
(
spsr_irq
);
vcpu
->
arch
.
ctxt
.
spsr_fiq
=
read_sysreg
(
spsr_fiq
);
sysreg
[
DACR32_EL2
]
=
read_sysreg
(
dacr32_el2
);
sysreg
[
IFSR32_EL2
]
=
read_sysreg
(
ifsr32_el2
);
__vcpu_sys_reg
(
vcpu
,
DACR32_EL2
)
=
read_sysreg
(
dacr32_el2
);
__vcpu_sys_reg
(
vcpu
,
IFSR32_EL2
)
=
read_sysreg
(
ifsr32_el2
);
if
(
has_vhe
()
||
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
)
sysreg
[
DBGVCR32_EL2
]
=
read_sysreg
(
dbgvcr32_el2
);
__vcpu_sys_reg
(
vcpu
,
DBGVCR32_EL2
)
=
read_sysreg
(
dbgvcr32_el2
);
}
static
inline
void
__sysreg32_restore_state
(
struct
kvm_vcpu
*
vcpu
)
{
u64
*
spsr
,
*
sysreg
;
if
(
!
vcpu_el1_is_32bit
(
vcpu
))
return
;
spsr
=
vcpu
->
arch
.
ctxt
.
gp_regs
.
spsr
;
sysreg
=
vcpu
->
arch
.
ctxt
.
sys_regs
;
write_sysreg
(
spsr
[
KVM_SPSR_ABT
],
spsr_abt
);
write_sysreg
(
spsr
[
KVM_SPSR_UND
],
spsr_und
);
write_sysreg
(
spsr
[
KVM_SPSR_IRQ
],
spsr_irq
);
write_sysreg
(
spsr
[
KVM_SPSR_FIQ
],
spsr_fiq
);
write_sysreg
(
vcpu
->
arch
.
ctxt
.
spsr_abt
,
spsr_abt
);
write_sysreg
(
vcpu
->
arch
.
ctxt
.
spsr_und
,
spsr_und
);
write_sysreg
(
vcpu
->
arch
.
ctxt
.
spsr_irq
,
spsr_irq
);
write_sysreg
(
vcpu
->
arch
.
ctxt
.
spsr_fiq
,
spsr_fiq
);
write_sysreg
(
sysreg
[
DACR32_EL2
]
,
dacr32_el2
);
write_sysreg
(
sysreg
[
IFSR32_EL2
]
,
ifsr32_el2
);
write_sysreg
(
__vcpu_sys_reg
(
vcpu
,
DACR32_EL2
)
,
dacr32_el2
);
write_sysreg
(
__vcpu_sys_reg
(
vcpu
,
IFSR32_EL2
)
,
ifsr32_el2
);
if
(
has_vhe
()
||
vcpu
->
arch
.
flags
&
KVM_ARM64_DEBUG_DIRTY
)
write_sysreg
(
sysreg
[
DBGVCR32_EL2
]
,
dbgvcr32_el2
);
write_sysreg
(
__vcpu_sys_reg
(
vcpu
,
DBGVCR32_EL2
)
,
dbgvcr32_el2
);
}
#endif
/* __ARM64_KVM_HYP_SYSREG_SR_H__ */
arch/arm64/kvm/hyp/nvhe/switch.c
View file @
300dca68
...
...
@@ -52,9 +52,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
* configured and enabled. We can now restore the guest's S1
* configuration: SCTLR, and only then TCR.
*/
write_sysreg_el1
(
ctxt
->
sys_regs
[
SCTLR_EL1
]
,
SYS_SCTLR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
SCTLR_EL1
)
,
SYS_SCTLR
);
isb
();
write_sysreg_el1
(
ctxt
->
sys_regs
[
TCR_EL1
]
,
SYS_TCR
);
write_sysreg_el1
(
ctxt
_sys_reg
(
ctxt
,
TCR_EL1
)
,
SYS_TCR
);
}
}
...
...
@@ -194,7 +194,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg32_restore_state
(
vcpu
);
__sysreg_restore_state_nvhe
(
guest_ctxt
);
__activate_vm
(
kern_hyp_va
(
vcpu
->
kvm
));
__activate_vm
(
kern_hyp_va
(
vcpu
->
arch
.
hw_mmu
));
__activate_traps
(
vcpu
);
__hyp_vgic_restore_state
(
vcpu
);
...
...
arch/arm64/kvm/hyp/nvhe/tlb.c
View file @
300dca68
...
...
@@ -12,7 +12,8 @@ struct tlb_inv_context {
u64
tcr
;
};
static
void
__tlb_switch_to_guest
(
struct
kvm
*
kvm
,
struct
tlb_inv_context
*
cxt
)
static
void
__tlb_switch_to_guest
(
struct
kvm_s2_mmu
*
mmu
,
struct
tlb_inv_context
*
cxt
)
{
if
(
cpus_have_final_cap
(
ARM64_WORKAROUND_SPECULATIVE_AT
))
{
u64
val
;
...
...
@@ -30,12 +31,10 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
isb
();
}
/* __load_guest_stage2() includes an ISB for the workaround. */
__load_guest_stage2
(
kvm
);
asm
(
ALTERNATIVE
(
"isb"
,
"nop"
,
ARM64_WORKAROUND_SPECULATIVE_AT
));
__load_guest_stage2
(
mmu
);
}
static
void
__tlb_switch_to_host
(
struct
kvm
*
kvm
,
struct
tlb_inv_context
*
cxt
)
static
void
__tlb_switch_to_host
(
struct
tlb_inv_context
*
cxt
)
{
write_sysreg
(
0
,
vttbr_el2
);
...
...
@@ -47,15 +46,16 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
}
}
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
)
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm_s2_mmu
*
mmu
,
phys_addr_t
ipa
,
int
level
)
{
struct
tlb_inv_context
cxt
;
dsb
(
ishst
);
/* Switch to requested VMID */
kvm
=
kern_hyp_va
(
kvm
);
__tlb_switch_to_guest
(
kvm
,
&
cxt
);
mmu
=
kern_hyp_va
(
mmu
);
__tlb_switch_to_guest
(
mmu
,
&
cxt
);
/*
* We could do so much better if we had the VA as well.
...
...
@@ -63,7 +63,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* whole of Stage-1. Weep...
*/
ipa
>>=
12
;
__tlbi
(
ipas2e1is
,
ipa
);
__tlbi
_level
(
ipas2e1is
,
ipa
,
level
);
/*
* We have to ensure completion of the invalidation at Stage-2,
...
...
@@ -98,39 +98,39 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if
(
icache_is_vpipt
())
__flush_icache_all
();
__tlb_switch_to_host
(
kvm
,
&
cxt
);
__tlb_switch_to_host
(
&
cxt
);
}
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
)
void
__kvm_tlb_flush_vmid
(
struct
kvm
_s2_mmu
*
mmu
)
{
struct
tlb_inv_context
cxt
;
dsb
(
ishst
);
/* Switch to requested VMID */
kvm
=
kern_hyp_va
(
kvm
);
__tlb_switch_to_guest
(
kvm
,
&
cxt
);
mmu
=
kern_hyp_va
(
mmu
);
__tlb_switch_to_guest
(
mmu
,
&
cxt
);
__tlbi
(
vmalls12e1is
);
dsb
(
ish
);
isb
();
__tlb_switch_to_host
(
kvm
,
&
cxt
);
__tlb_switch_to_host
(
&
cxt
);
}
void
__kvm_tlb_flush_local_vmid
(
struct
kvm_
vcpu
*
vcp
u
)
void
__kvm_tlb_flush_local_vmid
(
struct
kvm_
s2_mmu
*
mm
u
)
{
struct
kvm
*
kvm
=
kern_hyp_va
(
kern_hyp_va
(
vcpu
)
->
kvm
);
struct
tlb_inv_context
cxt
;
/* Switch to requested VMID */
__tlb_switch_to_guest
(
kvm
,
&
cxt
);
mmu
=
kern_hyp_va
(
mmu
);
__tlb_switch_to_guest
(
mmu
,
&
cxt
);
__tlbi
(
vmalle1
);
dsb
(
nsh
);
isb
();
__tlb_switch_to_host
(
kvm
,
&
cxt
);
__tlb_switch_to_host
(
&
cxt
);
}
void
__kvm_flush_vm_context
(
void
)
...
...
arch/arm64/kvm/hyp/vhe/switch.c
View file @
300dca68
...
...
@@ -125,7 +125,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
* stage 2 translation, and __activate_traps clear HCR_EL2.TGE
* (among other things).
*/
__activate_vm
(
vcpu
->
kvm
);
__activate_vm
(
vcpu
->
arch
.
hw_mmu
);
__activate_traps
(
vcpu
);
sysreg_restore_guest_state_vhe
(
guest_ctxt
);
...
...
arch/arm64/kvm/hyp/vhe/tlb.c
View file @
300dca68
...
...
@@ -16,7 +16,8 @@ struct tlb_inv_context {
u64
sctlr
;
};
static
void
__tlb_switch_to_guest
(
struct
kvm
*
kvm
,
struct
tlb_inv_context
*
cxt
)
static
void
__tlb_switch_to_guest
(
struct
kvm_s2_mmu
*
mmu
,
struct
tlb_inv_context
*
cxt
)
{
u64
val
;
...
...
@@ -52,14 +53,14 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
* place before clearing TGE. __load_guest_stage2() already
* has an ISB in order to deal with this.
*/
__load_guest_stage2
(
kvm
);
__load_guest_stage2
(
mmu
);
val
=
read_sysreg
(
hcr_el2
);
val
&=
~
HCR_TGE
;
write_sysreg
(
val
,
hcr_el2
);
isb
();
}
static
void
__tlb_switch_to_host
(
struct
kvm
*
kvm
,
struct
tlb_inv_context
*
cxt
)
static
void
__tlb_switch_to_host
(
struct
tlb_inv_context
*
cxt
)
{
/*
* We're done with the TLB operation, let's restore the host's
...
...
@@ -78,14 +79,15 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
local_irq_restore
(
cxt
->
flags
);
}
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
)
void
__kvm_tlb_flush_vmid_ipa
(
struct
kvm_s2_mmu
*
mmu
,
phys_addr_t
ipa
,
int
level
)
{
struct
tlb_inv_context
cxt
;
dsb
(
ishst
);
/* Switch to requested VMID */
__tlb_switch_to_guest
(
kvm
,
&
cxt
);
__tlb_switch_to_guest
(
mmu
,
&
cxt
);
/*
* We could do so much better if we had the VA as well.
...
...
@@ -93,7 +95,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* whole of Stage-1. Weep...
*/
ipa
>>=
12
;
__tlbi
(
ipas2e1is
,
ipa
);
__tlbi
_level
(
ipas2e1is
,
ipa
,
level
);
/*
* We have to ensure completion of the invalidation at Stage-2,
...
...
@@ -106,38 +108,37 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
dsb
(
ish
);
isb
();
__tlb_switch_to_host
(
kvm
,
&
cxt
);
__tlb_switch_to_host
(
&
cxt
);
}
void
__kvm_tlb_flush_vmid
(
struct
kvm
*
kvm
)
void
__kvm_tlb_flush_vmid
(
struct
kvm
_s2_mmu
*
mmu
)
{
struct
tlb_inv_context
cxt
;
dsb
(
ishst
);
/* Switch to requested VMID */
__tlb_switch_to_guest
(
kvm
,
&
cxt
);
__tlb_switch_to_guest
(
mmu
,
&
cxt
);
__tlbi
(
vmalls12e1is
);
dsb
(
ish
);
isb
();
__tlb_switch_to_host
(
kvm
,
&
cxt
);
__tlb_switch_to_host
(
&
cxt
);
}
void
__kvm_tlb_flush_local_vmid
(
struct
kvm_
vcpu
*
vcp
u
)
void
__kvm_tlb_flush_local_vmid
(
struct
kvm_
s2_mmu
*
mm
u
)
{
struct
kvm
*
kvm
=
vcpu
->
kvm
;
struct
tlb_inv_context
cxt
;
/* Switch to requested VMID */
__tlb_switch_to_guest
(
kvm
,
&
cxt
);
__tlb_switch_to_guest
(
mmu
,
&
cxt
);
__tlbi
(
vmalle1
);
dsb
(
nsh
);
isb
();
__tlb_switch_to_host
(
kvm
,
&
cxt
);
__tlb_switch_to_host
(
&
cxt
);
}
void
__kvm_flush_vm_context
(
void
)
...
...
arch/arm64/kvm/inject_fault.c
View file @
300dca68
...
...
@@ -64,7 +64,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
case
PSR_MODE_EL1h
:
vbar
=
vcpu_read_sys_reg
(
vcpu
,
VBAR_EL1
);
sctlr
=
vcpu_read_sys_reg
(
vcpu
,
SCTLR_EL1
);
vcpu_write_
elr_el1
(
vcpu
,
*
vcpu_pc
(
vcpu
)
);
vcpu_write_
sys_reg
(
vcpu
,
*
vcpu_pc
(
vcpu
),
ELR_EL1
);
break
;
default:
/* Don't do that */
...
...
arch/arm64/kvm/mmu.c
View file @
300dca68
...
...
@@ -55,12 +55,13 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
*/
void
kvm_flush_remote_tlbs
(
struct
kvm
*
kvm
)
{
kvm_call_hyp
(
__kvm_tlb_flush_vmid
,
kvm
);
kvm_call_hyp
(
__kvm_tlb_flush_vmid
,
&
kvm
->
arch
.
mmu
);
}
static
void
kvm_tlb_flush_vmid_ipa
(
struct
kvm
*
kvm
,
phys_addr_t
ipa
)
static
void
kvm_tlb_flush_vmid_ipa
(
struct
kvm_s2_mmu
*
mmu
,
phys_addr_t
ipa
,
int
level
)
{
kvm_call_hyp
(
__kvm_tlb_flush_vmid_ipa
,
kvm
,
ipa
);
kvm_call_hyp
(
__kvm_tlb_flush_vmid_ipa
,
mmu
,
ipa
,
level
);
}
/*
...
...
@@ -90,37 +91,39 @@ static bool kvm_is_device_pfn(unsigned long pfn)
/**
* stage2_dissolve_pmd() - clear and flush huge PMD entry
* @
kvm: pointer to kvm structure.
* @
mmu: pointer to mmu structure to operate on
* @addr: IPA
* @pmd: pmd pointer for IPA
*
* Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
*/
static
void
stage2_dissolve_pmd
(
struct
kvm
*
kvm
,
phys_addr_t
addr
,
pmd_t
*
pmd
)
static
void
stage2_dissolve_pmd
(
struct
kvm
_s2_mmu
*
mmu
,
phys_addr_t
addr
,
pmd_t
*
pmd
)
{
if
(
!
pmd_thp_or_huge
(
*
pmd
))
return
;
pmd_clear
(
pmd
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PMD_LEVEL
);
put_page
(
virt_to_page
(
pmd
));
}
/**
* stage2_dissolve_pud() - clear and flush huge PUD entry
* @
kvm: pointer to kvm structure.
* @
mmu: pointer to mmu structure to operate on
* @addr: IPA
* @pud: pud pointer for IPA
*
* Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
*/
static
void
stage2_dissolve_pud
(
struct
kvm
*
kvm
,
phys_addr_t
addr
,
pud_t
*
pudp
)
static
void
stage2_dissolve_pud
(
struct
kvm
_s2_mmu
*
mmu
,
phys_addr_t
addr
,
pud_t
*
pudp
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
if
(
!
stage2_pud_huge
(
kvm
,
*
pudp
))
return
;
stage2_pud_clear
(
kvm
,
pudp
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PUD_LEVEL
);
put_page
(
virt_to_page
(
pudp
));
}
...
...
@@ -156,40 +159,44 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return
p
;
}
static
void
clear_stage2_pgd_entry
(
struct
kvm
*
kvm
,
pgd_t
*
pgd
,
phys_addr_t
addr
)
static
void
clear_stage2_pgd_entry
(
struct
kvm
_s2_mmu
*
mmu
,
pgd_t
*
pgd
,
phys_addr_t
addr
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
p4d_t
*
p4d_table
__maybe_unused
=
stage2_p4d_offset
(
kvm
,
pgd
,
0UL
);
stage2_pgd_clear
(
kvm
,
pgd
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_NO_LEVEL_HINT
);
stage2_p4d_free
(
kvm
,
p4d_table
);
put_page
(
virt_to_page
(
pgd
));
}
static
void
clear_stage2_p4d_entry
(
struct
kvm
*
kvm
,
p4d_t
*
p4d
,
phys_addr_t
addr
)
static
void
clear_stage2_p4d_entry
(
struct
kvm
_s2_mmu
*
mmu
,
p4d_t
*
p4d
,
phys_addr_t
addr
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pud_table
__maybe_unused
=
stage2_pud_offset
(
kvm
,
p4d
,
0
);
stage2_p4d_clear
(
kvm
,
p4d
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_NO_LEVEL_HINT
);
stage2_pud_free
(
kvm
,
pud_table
);
put_page
(
virt_to_page
(
p4d
));
}
static
void
clear_stage2_pud_entry
(
struct
kvm
*
kvm
,
pud_t
*
pud
,
phys_addr_t
addr
)
static
void
clear_stage2_pud_entry
(
struct
kvm
_s2_mmu
*
mmu
,
pud_t
*
pud
,
phys_addr_t
addr
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pmd_t
*
pmd_table
__maybe_unused
=
stage2_pmd_offset
(
kvm
,
pud
,
0
);
VM_BUG_ON
(
stage2_pud_huge
(
kvm
,
*
pud
));
stage2_pud_clear
(
kvm
,
pud
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_NO_LEVEL_HINT
);
stage2_pmd_free
(
kvm
,
pmd_table
);
put_page
(
virt_to_page
(
pud
));
}
static
void
clear_stage2_pmd_entry
(
struct
kvm
*
kvm
,
pmd_t
*
pmd
,
phys_addr_t
addr
)
static
void
clear_stage2_pmd_entry
(
struct
kvm
_s2_mmu
*
mmu
,
pmd_t
*
pmd
,
phys_addr_t
addr
)
{
pte_t
*
pte_table
=
pte_offset_kernel
(
pmd
,
0
);
VM_BUG_ON
(
pmd_thp_or_huge
(
*
pmd
));
pmd_clear
(
pmd
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_NO_LEVEL_HINT
);
free_page
((
unsigned
long
)
pte_table
);
put_page
(
virt_to_page
(
pmd
));
}
...
...
@@ -255,7 +262,7 @@ static inline void kvm_pgd_populate(pgd_t *pgdp, p4d_t *p4dp)
* we then fully enforce cacheability of RAM, no matter what the guest
* does.
*/
static
void
unmap_stage2_ptes
(
struct
kvm
*
kvm
,
pmd_t
*
pmd
,
static
void
unmap_stage2_ptes
(
struct
kvm
_s2_mmu
*
mmu
,
pmd_t
*
pmd
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
phys_addr_t
start_addr
=
addr
;
...
...
@@ -267,7 +274,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
pte_t
old_pte
=
*
pte
;
kvm_set_pte
(
pte
,
__pte
(
0
));
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PTE_LEVEL
);
/* No need to invalidate the cache for device mappings */
if
(
!
kvm_is_device_pfn
(
pte_pfn
(
old_pte
)))
...
...
@@ -277,13 +284,14 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
}
}
while
(
pte
++
,
addr
+=
PAGE_SIZE
,
addr
!=
end
);
if
(
stage2_pte_table_empty
(
kvm
,
start_pte
))
clear_stage2_pmd_entry
(
kvm
,
pmd
,
start_addr
);
if
(
stage2_pte_table_empty
(
mmu
->
kvm
,
start_pte
))
clear_stage2_pmd_entry
(
mmu
,
pmd
,
start_addr
);
}
static
void
unmap_stage2_pmds
(
struct
kvm
*
kvm
,
pud_t
*
pud
,
static
void
unmap_stage2_pmds
(
struct
kvm
_s2_mmu
*
mmu
,
pud_t
*
pud
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
phys_addr_t
next
,
start_addr
=
addr
;
pmd_t
*
pmd
,
*
start_pmd
;
...
...
@@ -295,24 +303,25 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
pmd_t
old_pmd
=
*
pmd
;
pmd_clear
(
pmd
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PMD_LEVEL
);
kvm_flush_dcache_pmd
(
old_pmd
);
put_page
(
virt_to_page
(
pmd
));
}
else
{
unmap_stage2_ptes
(
kvm
,
pmd
,
addr
,
next
);
unmap_stage2_ptes
(
mmu
,
pmd
,
addr
,
next
);
}
}
}
while
(
pmd
++
,
addr
=
next
,
addr
!=
end
);
if
(
stage2_pmd_table_empty
(
kvm
,
start_pmd
))
clear_stage2_pud_entry
(
kvm
,
pud
,
start_addr
);
clear_stage2_pud_entry
(
mmu
,
pud
,
start_addr
);
}
static
void
unmap_stage2_puds
(
struct
kvm
*
kvm
,
p4d_t
*
p4d
,
static
void
unmap_stage2_puds
(
struct
kvm
_s2_mmu
*
mmu
,
p4d_t
*
p4d
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
phys_addr_t
next
,
start_addr
=
addr
;
pud_t
*
pud
,
*
start_pud
;
...
...
@@ -324,22 +333,23 @@ static void unmap_stage2_puds(struct kvm *kvm, p4d_t *p4d,
pud_t
old_pud
=
*
pud
;
stage2_pud_clear
(
kvm
,
pud
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PUD_LEVEL
);
kvm_flush_dcache_pud
(
old_pud
);
put_page
(
virt_to_page
(
pud
));
}
else
{
unmap_stage2_pmds
(
kvm
,
pud
,
addr
,
next
);
unmap_stage2_pmds
(
mmu
,
pud
,
addr
,
next
);
}
}
}
while
(
pud
++
,
addr
=
next
,
addr
!=
end
);
if
(
stage2_pud_table_empty
(
kvm
,
start_pud
))
clear_stage2_p4d_entry
(
kvm
,
p4d
,
start_addr
);
clear_stage2_p4d_entry
(
mmu
,
p4d
,
start_addr
);
}
static
void
unmap_stage2_p4ds
(
struct
kvm
*
kvm
,
pgd_t
*
pgd
,
static
void
unmap_stage2_p4ds
(
struct
kvm
_s2_mmu
*
mmu
,
pgd_t
*
pgd
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
phys_addr_t
next
,
start_addr
=
addr
;
p4d_t
*
p4d
,
*
start_p4d
;
...
...
@@ -347,11 +357,11 @@ static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd,
do
{
next
=
stage2_p4d_addr_end
(
kvm
,
addr
,
end
);
if
(
!
stage2_p4d_none
(
kvm
,
*
p4d
))
unmap_stage2_puds
(
kvm
,
p4d
,
addr
,
next
);
unmap_stage2_puds
(
mmu
,
p4d
,
addr
,
next
);
}
while
(
p4d
++
,
addr
=
next
,
addr
!=
end
);
if
(
stage2_p4d_table_empty
(
kvm
,
start_p4d
))
clear_stage2_pgd_entry
(
kvm
,
pgd
,
start_addr
);
clear_stage2_pgd_entry
(
mmu
,
pgd
,
start_addr
);
}
/**
...
...
@@ -365,8 +375,9 @@ static void unmap_stage2_p4ds(struct kvm *kvm, pgd_t *pgd,
* destroying the VM), otherwise another faulting VCPU may come in and mess
* with things behind our backs.
*/
static
void
unmap_stage2_range
(
struct
kvm
*
kvm
,
phys_addr_t
start
,
u64
size
)
static
void
unmap_stage2_range
(
struct
kvm
_s2_mmu
*
mmu
,
phys_addr_t
start
,
u64
size
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pgd_t
*
pgd
;
phys_addr_t
addr
=
start
,
end
=
start
+
size
;
phys_addr_t
next
;
...
...
@@ -374,18 +385,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
assert_spin_locked
(
&
kvm
->
mmu_lock
);
WARN_ON
(
size
&
~
PAGE_MASK
);
pgd
=
kvm
->
arch
.
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
pgd
=
mmu
->
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
do
{
/*
* Make sure the page table is still active, as another thread
* could have possibly freed the page table, while we released
* the lock.
*/
if
(
!
READ_ONCE
(
kvm
->
arch
.
pgd
))
if
(
!
READ_ONCE
(
mmu
->
pgd
))
break
;
next
=
stage2_pgd_addr_end
(
kvm
,
addr
,
end
);
if
(
!
stage2_pgd_none
(
kvm
,
*
pgd
))
unmap_stage2_p4ds
(
kvm
,
pgd
,
addr
,
next
);
unmap_stage2_p4ds
(
mmu
,
pgd
,
addr
,
next
);
/*
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
...
...
@@ -395,7 +406,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
}
while
(
pgd
++
,
addr
=
next
,
addr
!=
end
);
}
static
void
stage2_flush_ptes
(
struct
kvm
*
kvm
,
pmd_t
*
pmd
,
static
void
stage2_flush_ptes
(
struct
kvm
_s2_mmu
*
mmu
,
pmd_t
*
pmd
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
pte_t
*
pte
;
...
...
@@ -407,9 +418,10 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
}
while
(
pte
++
,
addr
+=
PAGE_SIZE
,
addr
!=
end
);
}
static
void
stage2_flush_pmds
(
struct
kvm
*
kvm
,
pud_t
*
pud
,
static
void
stage2_flush_pmds
(
struct
kvm
_s2_mmu
*
mmu
,
pud_t
*
pud
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pmd_t
*
pmd
;
phys_addr_t
next
;
...
...
@@ -420,14 +432,15 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
if
(
pmd_thp_or_huge
(
*
pmd
))
kvm_flush_dcache_pmd
(
*
pmd
);
else
stage2_flush_ptes
(
kvm
,
pmd
,
addr
,
next
);
stage2_flush_ptes
(
mmu
,
pmd
,
addr
,
next
);
}
}
while
(
pmd
++
,
addr
=
next
,
addr
!=
end
);
}
static
void
stage2_flush_puds
(
struct
kvm
*
kvm
,
p4d_t
*
p4d
,
static
void
stage2_flush_puds
(
struct
kvm
_s2_mmu
*
mmu
,
p4d_t
*
p4d
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pud
;
phys_addr_t
next
;
...
...
@@ -438,14 +451,15 @@ static void stage2_flush_puds(struct kvm *kvm, p4d_t *p4d,
if
(
stage2_pud_huge
(
kvm
,
*
pud
))
kvm_flush_dcache_pud
(
*
pud
);
else
stage2_flush_pmds
(
kvm
,
pud
,
addr
,
next
);
stage2_flush_pmds
(
mmu
,
pud
,
addr
,
next
);
}
}
while
(
pud
++
,
addr
=
next
,
addr
!=
end
);
}
static
void
stage2_flush_p4ds
(
struct
kvm
*
kvm
,
pgd_t
*
pgd
,
static
void
stage2_flush_p4ds
(
struct
kvm
_s2_mmu
*
mmu
,
pgd_t
*
pgd
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
p4d_t
*
p4d
;
phys_addr_t
next
;
...
...
@@ -453,23 +467,24 @@ static void stage2_flush_p4ds(struct kvm *kvm, pgd_t *pgd,
do
{
next
=
stage2_p4d_addr_end
(
kvm
,
addr
,
end
);
if
(
!
stage2_p4d_none
(
kvm
,
*
p4d
))
stage2_flush_puds
(
kvm
,
p4d
,
addr
,
next
);
stage2_flush_puds
(
mmu
,
p4d
,
addr
,
next
);
}
while
(
p4d
++
,
addr
=
next
,
addr
!=
end
);
}
static
void
stage2_flush_memslot
(
struct
kvm
*
kvm
,
struct
kvm_memory_slot
*
memslot
)
{
struct
kvm_s2_mmu
*
mmu
=
&
kvm
->
arch
.
mmu
;
phys_addr_t
addr
=
memslot
->
base_gfn
<<
PAGE_SHIFT
;
phys_addr_t
end
=
addr
+
PAGE_SIZE
*
memslot
->
npages
;
phys_addr_t
next
;
pgd_t
*
pgd
;
pgd
=
kvm
->
arch
.
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
pgd
=
mmu
->
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
do
{
next
=
stage2_pgd_addr_end
(
kvm
,
addr
,
end
);
if
(
!
stage2_pgd_none
(
kvm
,
*
pgd
))
stage2_flush_p4ds
(
kvm
,
pgd
,
addr
,
next
);
stage2_flush_p4ds
(
mmu
,
pgd
,
addr
,
next
);
if
(
next
!=
end
)
cond_resched_lock
(
&
kvm
->
mmu_lock
);
...
...
@@ -996,21 +1011,23 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
}
/**
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
* @kvm: The KVM struct pointer for the VM.
* kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
* @kvm: The pointer to the KVM structure
* @mmu: The pointer to the s2 MMU structure
*
* Allocates only the stage-2 HW PGD level table(s) of size defined by
* stage2_pgd_size(kvm).
* stage2_pgd_size(
mmu->
kvm).
*
* Note we don't need locking here as this is only called when the VM is
* created, which can only be done once.
*/
int
kvm_
alloc_stage2_pgd
(
struct
kvm
*
kvm
)
int
kvm_
init_stage2_mmu
(
struct
kvm
*
kvm
,
struct
kvm_s2_mmu
*
mmu
)
{
phys_addr_t
pgd_phys
;
pgd_t
*
pgd
;
int
cpu
;
if
(
kvm
->
arch
.
pgd
!=
NULL
)
{
if
(
mmu
->
pgd
!=
NULL
)
{
kvm_err
(
"kvm_arch already initialized?
\n
"
);
return
-
EINVAL
;
}
...
...
@@ -1024,8 +1041,20 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
if
(
WARN_ON
(
pgd_phys
&
~
kvm_vttbr_baddr_mask
(
kvm
)))
return
-
EINVAL
;
kvm
->
arch
.
pgd
=
pgd
;
kvm
->
arch
.
pgd_phys
=
pgd_phys
;
mmu
->
last_vcpu_ran
=
alloc_percpu
(
typeof
(
*
mmu
->
last_vcpu_ran
));
if
(
!
mmu
->
last_vcpu_ran
)
{
free_pages_exact
(
pgd
,
stage2_pgd_size
(
kvm
));
return
-
ENOMEM
;
}
for_each_possible_cpu
(
cpu
)
*
per_cpu_ptr
(
mmu
->
last_vcpu_ran
,
cpu
)
=
-
1
;
mmu
->
kvm
=
kvm
;
mmu
->
pgd
=
pgd
;
mmu
->
pgd_phys
=
pgd_phys
;
mmu
->
vmid
.
vmid_gen
=
0
;
return
0
;
}
...
...
@@ -1064,7 +1093,7 @@ static void stage2_unmap_memslot(struct kvm *kvm,
if
(
!
(
vma
->
vm_flags
&
VM_PFNMAP
))
{
gpa_t
gpa
=
addr
+
(
vm_start
-
memslot
->
userspace_addr
);
unmap_stage2_range
(
kvm
,
gpa
,
vm_end
-
vm_start
);
unmap_stage2_range
(
&
kvm
->
arch
.
mmu
,
gpa
,
vm_end
-
vm_start
);
}
hva
=
vm_end
;
}
while
(
hva
<
reg_end
);
...
...
@@ -1096,39 +1125,34 @@ void stage2_unmap_vm(struct kvm *kvm)
srcu_read_unlock
(
&
kvm
->
srcu
,
idx
);
}
/**
* kvm_free_stage2_pgd - free all stage-2 tables
* @kvm: The KVM struct pointer for the VM.
*
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
*/
void
kvm_free_stage2_pgd
(
struct
kvm
*
kvm
)
void
kvm_free_stage2_pgd
(
struct
kvm_s2_mmu
*
mmu
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
void
*
pgd
=
NULL
;
spin_lock
(
&
kvm
->
mmu_lock
);
if
(
kvm
->
arch
.
pgd
)
{
unmap_stage2_range
(
kvm
,
0
,
kvm_phys_size
(
kvm
));
pgd
=
READ_ONCE
(
kvm
->
arch
.
pgd
);
kvm
->
arch
.
pgd
=
NULL
;
kvm
->
arch
.
pgd_phys
=
0
;
if
(
mmu
->
pgd
)
{
unmap_stage2_range
(
mmu
,
0
,
kvm_phys_size
(
kvm
));
pgd
=
READ_ONCE
(
mmu
->
pgd
);
mmu
->
pgd
=
NULL
;
}
spin_unlock
(
&
kvm
->
mmu_lock
);
/* Free the HW pgd, one page at a time */
if
(
pgd
)
if
(
pgd
)
{
free_pages_exact
(
pgd
,
stage2_pgd_size
(
kvm
));
free_percpu
(
mmu
->
last_vcpu_ran
);
}
}
static
p4d_t
*
stage2_get_p4d
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
static
p4d_t
*
stage2_get_p4d
(
struct
kvm
_s2_mmu
*
mmu
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pgd_t
*
pgd
;
p4d_t
*
p4d
;
pgd
=
kvm
->
arch
.
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
pgd
=
mmu
->
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
if
(
stage2_pgd_none
(
kvm
,
*
pgd
))
{
if
(
!
cache
)
return
NULL
;
...
...
@@ -1140,13 +1164,14 @@ static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
return
stage2_p4d_offset
(
kvm
,
pgd
,
addr
);
}
static
pud_t
*
stage2_get_pud
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
static
pud_t
*
stage2_get_pud
(
struct
kvm
_s2_mmu
*
mmu
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
p4d_t
*
p4d
;
pud_t
*
pud
;
p4d
=
stage2_get_p4d
(
kvm
,
cache
,
addr
);
p4d
=
stage2_get_p4d
(
mmu
,
cache
,
addr
);
if
(
stage2_p4d_none
(
kvm
,
*
p4d
))
{
if
(
!
cache
)
return
NULL
;
...
...
@@ -1158,13 +1183,14 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
return
stage2_pud_offset
(
kvm
,
p4d
,
addr
);
}
static
pmd_t
*
stage2_get_pmd
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
static
pmd_t
*
stage2_get_pmd
(
struct
kvm
_s2_mmu
*
mmu
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pud
=
stage2_get_pud
(
kvm
,
cache
,
addr
);
pud
=
stage2_get_pud
(
mmu
,
cache
,
addr
);
if
(
!
pud
||
stage2_pud_huge
(
kvm
,
*
pud
))
return
NULL
;
...
...
@@ -1179,13 +1205,14 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
return
stage2_pmd_offset
(
kvm
,
pud
,
addr
);
}
static
int
stage2_set_pmd_huge
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
,
const
pmd_t
*
new_pmd
)
static
int
stage2_set_pmd_huge
(
struct
kvm_s2_mmu
*
mmu
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
,
const
pmd_t
*
new_pmd
)
{
pmd_t
*
pmd
,
old_pmd
;
retry:
pmd
=
stage2_get_pmd
(
kvm
,
cache
,
addr
);
pmd
=
stage2_get_pmd
(
mmu
,
cache
,
addr
);
VM_BUG_ON
(
!
pmd
);
old_pmd
=
*
pmd
;
...
...
@@ -1218,7 +1245,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
* get handled accordingly.
*/
if
(
!
pmd_thp_or_huge
(
old_pmd
))
{
unmap_stage2_range
(
kvm
,
addr
&
S2_PMD_MASK
,
S2_PMD_SIZE
);
unmap_stage2_range
(
mmu
,
addr
&
S2_PMD_MASK
,
S2_PMD_SIZE
);
goto
retry
;
}
/*
...
...
@@ -1234,7 +1261,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
*/
WARN_ON_ONCE
(
pmd_pfn
(
old_pmd
)
!=
pmd_pfn
(
*
new_pmd
));
pmd_clear
(
pmd
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PMD_LEVEL
);
}
else
{
get_page
(
virt_to_page
(
pmd
));
}
...
...
@@ -1243,13 +1270,15 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
return
0
;
}
static
int
stage2_set_pud_huge
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
static
int
stage2_set_pud_huge
(
struct
kvm_s2_mmu
*
mmu
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
,
const
pud_t
*
new_pudp
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pudp
,
old_pud
;
retry:
pudp
=
stage2_get_pud
(
kvm
,
cache
,
addr
);
pudp
=
stage2_get_pud
(
mmu
,
cache
,
addr
);
VM_BUG_ON
(
!
pudp
);
old_pud
=
*
pudp
;
...
...
@@ -1268,13 +1297,13 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
* the range for this block and retry.
*/
if
(
!
stage2_pud_huge
(
kvm
,
old_pud
))
{
unmap_stage2_range
(
kvm
,
addr
&
S2_PUD_MASK
,
S2_PUD_SIZE
);
unmap_stage2_range
(
mmu
,
addr
&
S2_PUD_MASK
,
S2_PUD_SIZE
);
goto
retry
;
}
WARN_ON_ONCE
(
kvm_pud_pfn
(
old_pud
)
!=
kvm_pud_pfn
(
*
new_pudp
));
stage2_pud_clear
(
kvm
,
pudp
);
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PUD_LEVEL
);
}
else
{
get_page
(
virt_to_page
(
pudp
));
}
...
...
@@ -1289,9 +1318,10 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
* leaf-entry is returned in the appropriate level variable - pudpp,
* pmdpp, ptepp.
*/
static
bool
stage2_get_leaf_entry
(
struct
kvm
*
kvm
,
phys_addr_t
addr
,
static
bool
stage2_get_leaf_entry
(
struct
kvm
_s2_mmu
*
mmu
,
phys_addr_t
addr
,
pud_t
**
pudpp
,
pmd_t
**
pmdpp
,
pte_t
**
ptepp
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pudp
;
pmd_t
*
pmdp
;
pte_t
*
ptep
;
...
...
@@ -1300,7 +1330,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
*
pmdpp
=
NULL
;
*
ptepp
=
NULL
;
pudp
=
stage2_get_pud
(
kvm
,
NULL
,
addr
);
pudp
=
stage2_get_pud
(
mmu
,
NULL
,
addr
);
if
(
!
pudp
||
stage2_pud_none
(
kvm
,
*
pudp
)
||
!
stage2_pud_present
(
kvm
,
*
pudp
))
return
false
;
...
...
@@ -1326,14 +1356,14 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return
true
;
}
static
bool
stage2_is_exec
(
struct
kvm
*
kvm
,
phys_addr_t
addr
)
static
bool
stage2_is_exec
(
struct
kvm
_s2_mmu
*
mmu
,
phys_addr_t
addr
)
{
pud_t
*
pudp
;
pmd_t
*
pmdp
;
pte_t
*
ptep
;
bool
found
;
found
=
stage2_get_leaf_entry
(
kvm
,
addr
,
&
pudp
,
&
pmdp
,
&
ptep
);
found
=
stage2_get_leaf_entry
(
mmu
,
addr
,
&
pudp
,
&
pmdp
,
&
ptep
);
if
(
!
found
)
return
false
;
...
...
@@ -1345,10 +1375,12 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return
kvm_s2pte_exec
(
ptep
);
}
static
int
stage2_set_pte
(
struct
kvm
*
kvm
,
struct
kvm_mmu_memory_cache
*
cache
,
static
int
stage2_set_pte
(
struct
kvm_s2_mmu
*
mmu
,
struct
kvm_mmu_memory_cache
*
cache
,
phys_addr_t
addr
,
const
pte_t
*
new_pte
,
unsigned
long
flags
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
,
old_pte
;
...
...
@@ -1358,7 +1390,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
VM_BUG_ON
(
logging_active
&&
!
cache
);
/* Create stage-2 page table mapping - Levels 0 and 1 */
pud
=
stage2_get_pud
(
kvm
,
cache
,
addr
);
pud
=
stage2_get_pud
(
mmu
,
cache
,
addr
);
if
(
!
pud
)
{
/*
* Ignore calls from kvm_set_spte_hva for unallocated
...
...
@@ -1372,7 +1404,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
* on to allocate page.
*/
if
(
logging_active
)
stage2_dissolve_pud
(
kvm
,
addr
,
pud
);
stage2_dissolve_pud
(
mmu
,
addr
,
pud
);
if
(
stage2_pud_none
(
kvm
,
*
pud
))
{
if
(
!
cache
)
...
...
@@ -1396,7 +1428,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
* allocate page.
*/
if
(
logging_active
)
stage2_dissolve_pmd
(
kvm
,
addr
,
pmd
);
stage2_dissolve_pmd
(
mmu
,
addr
,
pmd
);
/* Create stage-2 page mappings - Level 2 */
if
(
pmd_none
(
*
pmd
))
{
...
...
@@ -1420,7 +1452,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
return
0
;
kvm_set_pte
(
pte
,
__pte
(
0
));
kvm_tlb_flush_vmid_ipa
(
kvm
,
addr
);
kvm_tlb_flush_vmid_ipa
(
mmu
,
addr
,
S2_PTE_LEVEL
);
}
else
{
get_page
(
virt_to_page
(
pte
));
}
...
...
@@ -1486,8 +1518,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if
(
ret
)
goto
out
;
spin_lock
(
&
kvm
->
mmu_lock
);
ret
=
stage2_set_pte
(
kvm
,
&
cache
,
addr
,
&
pte
,
KVM_S2PTE_FLAG_IS_IOMAP
);
ret
=
stage2_set_pte
(
&
kvm
->
arch
.
mmu
,
&
cache
,
addr
,
&
pte
,
KVM_S2PTE_FLAG_IS_IOMAP
);
spin_unlock
(
&
kvm
->
mmu_lock
);
if
(
ret
)
goto
out
;
...
...
@@ -1526,9 +1558,10 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
* @addr: range start address
* @end: range end address
*/
static
void
stage2_wp_pmds
(
struct
kvm
*
kvm
,
pud_t
*
pud
,
static
void
stage2_wp_pmds
(
struct
kvm
_s2_mmu
*
mmu
,
pud_t
*
pud
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pmd_t
*
pmd
;
phys_addr_t
next
;
...
...
@@ -1549,13 +1582,14 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
/**
* stage2_wp_puds - write protect P4D range
* @p
gd: pointer to pg
d entry
* @p
4d: pointer to p4
d entry
* @addr: range start address
* @end: range end address
*/
static
void
stage2_wp_puds
(
struct
kvm
*
kvm
,
p4d_t
*
p4d
,
static
void
stage2_wp_puds
(
struct
kvm
_s2_mmu
*
mmu
,
p4d_t
*
p4d
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pud_t
*
pud
;
phys_addr_t
next
;
...
...
@@ -1567,7 +1601,7 @@ static void stage2_wp_puds(struct kvm *kvm, p4d_t *p4d,
if
(
!
kvm_s2pud_readonly
(
pud
))
kvm_set_s2pud_readonly
(
pud
);
}
else
{
stage2_wp_pmds
(
kvm
,
pud
,
addr
,
next
);
stage2_wp_pmds
(
mmu
,
pud
,
addr
,
next
);
}
}
}
while
(
pud
++
,
addr
=
next
,
addr
!=
end
);
...
...
@@ -1579,9 +1613,10 @@ static void stage2_wp_puds(struct kvm *kvm, p4d_t *p4d,
* @addr: range start address
* @end: range end address
*/
static
void
stage2_wp_p4ds
(
struct
kvm
*
kvm
,
pgd_t
*
pgd
,
static
void
stage2_wp_p4ds
(
struct
kvm
_s2_mmu
*
mmu
,
pgd_t
*
pgd
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
p4d_t
*
p4d
;
phys_addr_t
next
;
...
...
@@ -1589,7 +1624,7 @@ static void stage2_wp_p4ds(struct kvm *kvm, pgd_t *pgd,
do
{
next
=
stage2_p4d_addr_end
(
kvm
,
addr
,
end
);
if
(
!
stage2_p4d_none
(
kvm
,
*
p4d
))
stage2_wp_puds
(
kvm
,
p4d
,
addr
,
next
);
stage2_wp_puds
(
mmu
,
p4d
,
addr
,
next
);
}
while
(
p4d
++
,
addr
=
next
,
addr
!=
end
);
}
...
...
@@ -1599,12 +1634,13 @@ static void stage2_wp_p4ds(struct kvm *kvm, pgd_t *pgd,
* @addr: Start address of range
* @end: End address of range
*/
static
void
stage2_wp_range
(
struct
kvm
*
kvm
,
phys_addr_t
addr
,
phys_addr_t
end
)
static
void
stage2_wp_range
(
struct
kvm
_s2_mmu
*
mmu
,
phys_addr_t
addr
,
phys_addr_t
end
)
{
struct
kvm
*
kvm
=
mmu
->
kvm
;
pgd_t
*
pgd
;
phys_addr_t
next
;
pgd
=
kvm
->
arch
.
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
pgd
=
mmu
->
pgd
+
stage2_pgd_index
(
kvm
,
addr
);
do
{
/*
* Release kvm_mmu_lock periodically if the memory region is
...
...
@@ -1616,11 +1652,11 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
* the lock.
*/
cond_resched_lock
(
&
kvm
->
mmu_lock
);
if
(
!
READ_ONCE
(
kvm
->
arch
.
pgd
))
if
(
!
READ_ONCE
(
mmu
->
pgd
))
break
;
next
=
stage2_pgd_addr_end
(
kvm
,
addr
,
end
);
if
(
stage2_pgd_present
(
kvm
,
*
pgd
))
stage2_wp_p4ds
(
kvm
,
pgd
,
addr
,
next
);
stage2_wp_p4ds
(
mmu
,
pgd
,
addr
,
next
);
}
while
(
pgd
++
,
addr
=
next
,
addr
!=
end
);
}
...
...
@@ -1650,7 +1686,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
end
=
(
memslot
->
base_gfn
+
memslot
->
npages
)
<<
PAGE_SHIFT
;
spin_lock
(
&
kvm
->
mmu_lock
);
stage2_wp_range
(
kvm
,
start
,
end
);
stage2_wp_range
(
&
kvm
->
arch
.
mmu
,
start
,
end
);
spin_unlock
(
&
kvm
->
mmu_lock
);
kvm_flush_remote_tlbs
(
kvm
);
}
...
...
@@ -1674,7 +1710,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
phys_addr_t
start
=
(
base_gfn
+
__ffs
(
mask
))
<<
PAGE_SHIFT
;
phys_addr_t
end
=
(
base_gfn
+
__fls
(
mask
)
+
1
)
<<
PAGE_SHIFT
;
stage2_wp_range
(
kvm
,
start
,
end
);
stage2_wp_range
(
&
kvm
->
arch
.
mmu
,
start
,
end
);
}
/*
...
...
@@ -1837,6 +1873,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
pgprot_t
mem_type
=
PAGE_S2
;
bool
logging_active
=
memslot_is_logging
(
memslot
);
unsigned
long
vma_pagesize
,
flags
=
0
;
struct
kvm_s2_mmu
*
mmu
=
vcpu
->
arch
.
hw_mmu
;
write_fault
=
kvm_is_write_fault
(
vcpu
);
exec_fault
=
kvm_vcpu_trap_is_iabt
(
vcpu
);
...
...
@@ -1958,7 +1995,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have.
*/
needs_exec
=
exec_fault
||
(
fault_status
==
FSC_PERM
&&
stage2_is_exec
(
kvm
,
fault_ipa
));
(
fault_status
==
FSC_PERM
&&
stage2_is_exec
(
mmu
,
fault_ipa
));
if
(
vma_pagesize
==
PUD_SIZE
)
{
pud_t
new_pud
=
kvm_pfn_pud
(
pfn
,
mem_type
);
...
...
@@ -1970,7 +2007,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if
(
needs_exec
)
new_pud
=
kvm_s2pud_mkexec
(
new_pud
);
ret
=
stage2_set_pud_huge
(
kvm
,
memcache
,
fault_ipa
,
&
new_pud
);
ret
=
stage2_set_pud_huge
(
mmu
,
memcache
,
fault_ipa
,
&
new_pud
);
}
else
if
(
vma_pagesize
==
PMD_SIZE
)
{
pmd_t
new_pmd
=
kvm_pfn_pmd
(
pfn
,
mem_type
);
...
...
@@ -1982,7 +2019,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if
(
needs_exec
)
new_pmd
=
kvm_s2pmd_mkexec
(
new_pmd
);
ret
=
stage2_set_pmd_huge
(
kvm
,
memcache
,
fault_ipa
,
&
new_pmd
);
ret
=
stage2_set_pmd_huge
(
mmu
,
memcache
,
fault_ipa
,
&
new_pmd
);
}
else
{
pte_t
new_pte
=
kvm_pfn_pte
(
pfn
,
mem_type
);
...
...
@@ -1994,7 +2031,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if
(
needs_exec
)
new_pte
=
kvm_s2pte_mkexec
(
new_pte
);
ret
=
stage2_set_pte
(
kvm
,
memcache
,
fault_ipa
,
&
new_pte
,
flags
);
ret
=
stage2_set_pte
(
mmu
,
memcache
,
fault_ipa
,
&
new_pte
,
flags
);
}
out_unlock:
...
...
@@ -2023,7 +2060,7 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
spin_lock
(
&
vcpu
->
kvm
->
mmu_lock
);
if
(
!
stage2_get_leaf_entry
(
vcpu
->
kvm
,
fault_ipa
,
&
pud
,
&
pmd
,
&
pte
))
if
(
!
stage2_get_leaf_entry
(
vcpu
->
arch
.
hw_mmu
,
fault_ipa
,
&
pud
,
&
pmd
,
&
pte
))
goto
out
;
if
(
pud
)
{
/* HugeTLB */
...
...
@@ -2197,14 +2234,14 @@ static int handle_hva_to_gpa(struct kvm *kvm,
static
int
kvm_unmap_hva_handler
(
struct
kvm
*
kvm
,
gpa_t
gpa
,
u64
size
,
void
*
data
)
{
unmap_stage2_range
(
kvm
,
gpa
,
size
);
unmap_stage2_range
(
&
kvm
->
arch
.
mmu
,
gpa
,
size
);
return
0
;
}
int
kvm_unmap_hva_range
(
struct
kvm
*
kvm
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
!
kvm
->
arch
.
pgd
)
if
(
!
kvm
->
arch
.
mmu
.
pgd
)
return
0
;
trace_kvm_unmap_hva_range
(
start
,
end
);
...
...
@@ -2224,7 +2261,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
* therefore stage2_set_pte() never needs to clear out a huge PMD
* through this calling path.
*/
stage2_set_pte
(
kvm
,
NULL
,
gpa
,
pte
,
0
);
stage2_set_pte
(
&
kvm
->
arch
.
mmu
,
NULL
,
gpa
,
pte
,
0
);
return
0
;
}
...
...
@@ -2235,7 +2272,7 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm_pfn_t
pfn
=
pte_pfn
(
pte
);
pte_t
stage2_pte
;
if
(
!
kvm
->
arch
.
pgd
)
if
(
!
kvm
->
arch
.
mmu
.
pgd
)
return
0
;
trace_kvm_set_spte_hva
(
hva
);
...
...
@@ -2258,7 +2295,7 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
pte_t
*
pte
;
WARN_ON
(
size
!=
PAGE_SIZE
&&
size
!=
PMD_SIZE
&&
size
!=
PUD_SIZE
);
if
(
!
stage2_get_leaf_entry
(
kvm
,
gpa
,
&
pud
,
&
pmd
,
&
pte
))
if
(
!
stage2_get_leaf_entry
(
&
kvm
->
arch
.
mmu
,
gpa
,
&
pud
,
&
pmd
,
&
pte
))
return
0
;
if
(
pud
)
...
...
@@ -2276,7 +2313,7 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *
pte_t
*
pte
;
WARN_ON
(
size
!=
PAGE_SIZE
&&
size
!=
PMD_SIZE
&&
size
!=
PUD_SIZE
);
if
(
!
stage2_get_leaf_entry
(
kvm
,
gpa
,
&
pud
,
&
pmd
,
&
pte
))
if
(
!
stage2_get_leaf_entry
(
&
kvm
->
arch
.
mmu
,
gpa
,
&
pud
,
&
pmd
,
&
pte
))
return
0
;
if
(
pud
)
...
...
@@ -2289,7 +2326,7 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *
int
kvm_age_hva
(
struct
kvm
*
kvm
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
!
kvm
->
arch
.
pgd
)
if
(
!
kvm
->
arch
.
mmu
.
pgd
)
return
0
;
trace_kvm_age_hva
(
start
,
end
);
return
handle_hva_to_gpa
(
kvm
,
start
,
end
,
kvm_age_hva_handler
,
NULL
);
...
...
@@ -2297,7 +2334,7 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
int
kvm_test_age_hva
(
struct
kvm
*
kvm
,
unsigned
long
hva
)
{
if
(
!
kvm
->
arch
.
pgd
)
if
(
!
kvm
->
arch
.
mmu
.
pgd
)
return
0
;
trace_kvm_test_age_hva
(
hva
);
return
handle_hva_to_gpa
(
kvm
,
hva
,
hva
+
PAGE_SIZE
,
...
...
@@ -2510,7 +2547,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock
(
&
kvm
->
mmu_lock
);
if
(
ret
)
unmap_stage2_range
(
kvm
,
mem
->
guest_phys_addr
,
mem
->
memory_size
);
unmap_stage2_range
(
&
kvm
->
arch
.
mmu
,
mem
->
guest_phys_addr
,
mem
->
memory_size
);
else
stage2_flush_memslot
(
kvm
,
memslot
);
spin_unlock
(
&
kvm
->
mmu_lock
);
...
...
@@ -2529,7 +2566,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
void
kvm_arch_flush_shadow_all
(
struct
kvm
*
kvm
)
{
kvm_free_stage2_pgd
(
kvm
);
kvm_free_stage2_pgd
(
&
kvm
->
arch
.
mmu
);
}
void
kvm_arch_flush_shadow_memslot
(
struct
kvm
*
kvm
,
...
...
@@ -2539,7 +2576,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
phys_addr_t
size
=
slot
->
npages
<<
PAGE_SHIFT
;
spin_lock
(
&
kvm
->
mmu_lock
);
unmap_stage2_range
(
kvm
,
gpa
,
size
);
unmap_stage2_range
(
&
kvm
->
arch
.
mmu
,
gpa
,
size
);
spin_unlock
(
&
kvm
->
mmu_lock
);
}
...
...
arch/arm64/kvm/regmap.c
View file @
300dca68
...
...
@@ -100,7 +100,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
*/
unsigned
long
*
vcpu_reg32
(
const
struct
kvm_vcpu
*
vcpu
,
u8
reg_num
)
{
unsigned
long
*
reg_array
=
(
unsigned
long
*
)
&
vcpu
->
arch
.
ctxt
.
gp_regs
.
regs
;
unsigned
long
*
reg_array
=
(
unsigned
long
*
)
&
vcpu
->
arch
.
ctxt
.
regs
;
unsigned
long
mode
=
*
vcpu_cpsr
(
vcpu
)
&
PSR_AA32_MODE_MASK
;
switch
(
mode
)
{
...
...
@@ -147,8 +147,20 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
{
int
spsr_idx
=
vcpu_spsr32_mode
(
vcpu
);
if
(
!
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
return
vcpu_gp_regs
(
vcpu
)
->
spsr
[
spsr_idx
];
if
(
!
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
{
switch
(
spsr_idx
)
{
case
KVM_SPSR_SVC
:
return
__vcpu_sys_reg
(
vcpu
,
SPSR_EL1
);
case
KVM_SPSR_ABT
:
return
vcpu
->
arch
.
ctxt
.
spsr_abt
;
case
KVM_SPSR_UND
:
return
vcpu
->
arch
.
ctxt
.
spsr_und
;
case
KVM_SPSR_IRQ
:
return
vcpu
->
arch
.
ctxt
.
spsr_irq
;
case
KVM_SPSR_FIQ
:
return
vcpu
->
arch
.
ctxt
.
spsr_fiq
;
}
}
switch
(
spsr_idx
)
{
case
KVM_SPSR_SVC
:
...
...
@@ -171,7 +183,24 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
int
spsr_idx
=
vcpu_spsr32_mode
(
vcpu
);
if
(
!
vcpu
->
arch
.
sysregs_loaded_on_cpu
)
{
vcpu_gp_regs
(
vcpu
)
->
spsr
[
spsr_idx
]
=
v
;
switch
(
spsr_idx
)
{
case
KVM_SPSR_SVC
:
__vcpu_sys_reg
(
vcpu
,
SPSR_EL1
)
=
v
;
break
;
case
KVM_SPSR_ABT
:
vcpu
->
arch
.
ctxt
.
spsr_abt
=
v
;
break
;
case
KVM_SPSR_UND
:
vcpu
->
arch
.
ctxt
.
spsr_und
=
v
;
break
;
case
KVM_SPSR_IRQ
:
vcpu
->
arch
.
ctxt
.
spsr_irq
=
v
;
break
;
case
KVM_SPSR_FIQ
:
vcpu
->
arch
.
ctxt
.
spsr_fiq
=
v
;
break
;
}
return
;
}
...
...
arch/arm64/kvm/reset.c
View file @
300dca68
...
...
@@ -288,7 +288,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset core registers */
memset
(
vcpu_gp_regs
(
vcpu
),
0
,
sizeof
(
*
vcpu_gp_regs
(
vcpu
)));
vcpu_gp_regs
(
vcpu
)
->
regs
.
pstate
=
pstate
;
vcpu_gp_regs
(
vcpu
)
->
pstate
=
pstate
;
/* Reset system registers */
kvm_reset_sys_regs
(
vcpu
);
...
...
arch/arm64/kvm/sys_regs.c
View file @
300dca68
...
...
@@ -94,6 +94,7 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case
TPIDR_EL1
:
*
val
=
read_sysreg_s
(
SYS_TPIDR_EL1
);
break
;
case
AMAIR_EL1
:
*
val
=
read_sysreg_s
(
SYS_AMAIR_EL12
);
break
;
case
CNTKCTL_EL1
:
*
val
=
read_sysreg_s
(
SYS_CNTKCTL_EL12
);
break
;
case
ELR_EL1
:
*
val
=
read_sysreg_s
(
SYS_ELR_EL12
);
break
;
case
PAR_EL1
:
*
val
=
read_sysreg_s
(
SYS_PAR_EL1
);
break
;
case
DACR32_EL2
:
*
val
=
read_sysreg_s
(
SYS_DACR32_EL2
);
break
;
case
IFSR32_EL2
:
*
val
=
read_sysreg_s
(
SYS_IFSR32_EL2
);
break
;
...
...
@@ -133,6 +134,7 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
case
TPIDR_EL1
:
write_sysreg_s
(
val
,
SYS_TPIDR_EL1
);
break
;
case
AMAIR_EL1
:
write_sysreg_s
(
val
,
SYS_AMAIR_EL12
);
break
;
case
CNTKCTL_EL1
:
write_sysreg_s
(
val
,
SYS_CNTKCTL_EL12
);
break
;
case
ELR_EL1
:
write_sysreg_s
(
val
,
SYS_ELR_EL12
);
break
;
case
PAR_EL1
:
write_sysreg_s
(
val
,
SYS_PAR_EL1
);
break
;
case
DACR32_EL2
:
write_sysreg_s
(
val
,
SYS_DACR32_EL2
);
break
;
case
IFSR32_EL2
:
write_sysreg_s
(
val
,
SYS_IFSR32_EL2
);
break
;
...
...
arch/arm64/kvm/trace_arm.h
View file @
300dca68
...
...
@@ -301,8 +301,8 @@ TRACE_EVENT(kvm_timer_save_state,
),
TP_fast_assign
(
__entry
->
ctl
=
ctx
->
cnt_ctl
;
__entry
->
cval
=
ctx
->
cnt_cval
;
__entry
->
ctl
=
timer_get_ctl
(
ctx
)
;
__entry
->
cval
=
timer_get_cval
(
ctx
)
;
__entry
->
timer_idx
=
arch_timer_ctx_index
(
ctx
);
),
...
...
@@ -323,8 +323,8 @@ TRACE_EVENT(kvm_timer_restore_state,
),
TP_fast_assign
(
__entry
->
ctl
=
ctx
->
cnt_ctl
;
__entry
->
cval
=
ctx
->
cnt_cval
;
__entry
->
ctl
=
timer_get_ctl
(
ctx
)
;
__entry
->
cval
=
timer_get_cval
(
ctx
)
;
__entry
->
timer_idx
=
arch_timer_ctx_index
(
ctx
);
),
...
...
include/kvm/arm_arch_timer.h
View file @
300dca68
...
...
@@ -26,16 +26,9 @@ enum kvm_arch_timer_regs {
struct
arch_timer_context
{
struct
kvm_vcpu
*
vcpu
;
/* Registers: control register, timer value */
u32
cnt_ctl
;
u64
cnt_cval
;
/* Timer IRQ */
struct
kvm_irq_level
irq
;
/* Virtual offset */
u64
cntvoff
;
/* Emulated Timer (may be unused) */
struct
hrtimer
hrtimer
;
...
...
@@ -71,7 +64,7 @@ int kvm_timer_hyp_init(bool);
int
kvm_timer_enable
(
struct
kvm_vcpu
*
vcpu
);
int
kvm_timer_vcpu_reset
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_timer_vcpu_init
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_timer_sync_
hwstate
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_timer_sync_
user
(
struct
kvm_vcpu
*
vcpu
);
bool
kvm_timer_should_notify_user
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_timer_update_run
(
struct
kvm_vcpu
*
vcpu
);
void
kvm_timer_vcpu_terminate
(
struct
kvm_vcpu
*
vcpu
);
...
...
@@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum
kvm_arch_timer_regs
treg
,
u64
val
);
/* Needed for tracing */
u32
timer_get_ctl
(
struct
arch_timer_context
*
ctxt
);
u64
timer_get_cval
(
struct
arch_timer_context
*
ctxt
);
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment