Commit b73117c4 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-ppc-next' of git://github.com/agraf/linux-2.6 into kvm-queue

Conflicts:
	arch/powerpc/kvm/book3s_hv_rmhandlers.S
	arch/powerpc/kvm/booke.c
parents 77f01bdf 40688909
...@@ -1838,6 +1838,7 @@ registers, find a list below: ...@@ -1838,6 +1838,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_LPCR | 64 PPC | KVM_REG_PPC_LPCR | 64
PPC | KVM_REG_PPC_PPR | 64 PPC | KVM_REG_PPC_PPR | 64
PPC | KVM_REG_PPC_ARCH_COMPAT 32 PPC | KVM_REG_PPC_ARCH_COMPAT 32
PPC | KVM_REG_PPC_DABRX | 32
PPC | KVM_REG_PPC_TM_GPR0 | 64 PPC | KVM_REG_PPC_TM_GPR0 | 64
... ...
PPC | KVM_REG_PPC_TM_GPR31 | 64 PPC | KVM_REG_PPC_TM_GPR31 | 64
......
...@@ -343,6 +343,8 @@ config PPC_TRANSACTIONAL_MEM ...@@ -343,6 +343,8 @@ config PPC_TRANSACTIONAL_MEM
bool "Transactional Memory support for POWERPC" bool "Transactional Memory support for POWERPC"
depends on PPC_BOOK3S_64 depends on PPC_BOOK3S_64
depends on SMP depends on SMP
select ALTIVEC
select VSX
default n default n
---help--- ---help---
Support user-mode Transactional Memory on POWERPC. Support user-mode Transactional Memory on POWERPC.
......
...@@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void) ...@@ -460,5 +460,116 @@ static inline unsigned int ev_idle(void)
return r3; return r3;
} }
#ifdef CONFIG_EPAPR_PARAVIRT
static inline unsigned long epapr_hypercall(unsigned long *in,
unsigned long *out,
unsigned long nr)
{
unsigned long register r0 asm("r0");
unsigned long register r3 asm("r3") = in[0];
unsigned long register r4 asm("r4") = in[1];
unsigned long register r5 asm("r5") = in[2];
unsigned long register r6 asm("r6") = in[3];
unsigned long register r7 asm("r7") = in[4];
unsigned long register r8 asm("r8") = in[5];
unsigned long register r9 asm("r9") = in[6];
unsigned long register r10 asm("r10") = in[7];
unsigned long register r11 asm("r11") = nr;
unsigned long register r12 asm("r12");
asm volatile("bl epapr_hypercall_start"
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
"=r"(r12)
: "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
"r"(r9), "r"(r10), "r"(r11)
: "memory", "cc", "xer", "ctr", "lr");
out[0] = r4;
out[1] = r5;
out[2] = r6;
out[3] = r7;
out[4] = r8;
out[5] = r9;
out[6] = r10;
out[7] = r11;
return r3;
}
#else
static unsigned long epapr_hypercall(unsigned long *in,
unsigned long *out,
unsigned long nr)
{
return EV_UNIMPLEMENTED;
}
#endif
static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
{
unsigned long in[8];
unsigned long out[8];
unsigned long r;
r = epapr_hypercall(in, out, nr);
*r2 = out[0];
return r;
}
static inline long epapr_hypercall0(unsigned int nr)
{
unsigned long in[8];
unsigned long out[8];
return epapr_hypercall(in, out, nr);
}
static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
return epapr_hypercall(in, out, nr);
}
static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
unsigned long p2)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
in[1] = p2;
return epapr_hypercall(in, out, nr);
}
static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
in[1] = p2;
in[2] = p3;
return epapr_hypercall(in, out, nr);
}
static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
in[1] = p2;
in[2] = p3;
in[3] = p4;
return epapr_hypercall(in, out, nr);
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _EPAPR_HCALLS_H */ #endif /* _EPAPR_HCALLS_H */
...@@ -91,14 +91,17 @@ ...@@ -91,14 +91,17 @@
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900 #define BOOK3S_INTERRUPT_DECREMENTER 0x900
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980 #define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
#define BOOK3S_INTERRUPT_DOORBELL 0xa00
#define BOOK3S_INTERRUPT_SYSCALL 0xc00 #define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00 #define BOOK3S_INTERRUPT_TRACE 0xd00
#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 #define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20 #define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40 #define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
#define BOOK3S_INTERRUPT_PERFMON 0xf00 #define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20 #define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40 #define BOOK3S_INTERRUPT_VSX 0xf40
#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0 #define BOOK3S_IRQPRIO_SYSTEM_RESET 0
#define BOOK3S_IRQPRIO_DATA_SEGMENT 1 #define BOOK3S_IRQPRIO_DATA_SEGMENT 1
......
...@@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, ...@@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
extern void kvmppc_entry_trampoline(void); extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
extern void kvmppc_load_up_vsx(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
...@@ -271,16 +268,25 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) ...@@ -271,16 +268,25 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.pc; return vcpu->arch.pc;
} }
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{ {
ulong pc = kvmppc_get_pc(vcpu); return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
}
static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
{
/* Load the instruction manually if it failed to do so in the /* Load the instruction manually if it failed to do so in the
* exit path */ * exit path */
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
return vcpu->arch.last_inst; return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
vcpu->arch.last_inst;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{
return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
} }
/* /*
...@@ -290,14 +296,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) ...@@ -290,14 +296,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
*/ */
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
{ {
ulong pc = kvmppc_get_pc(vcpu) - 4; return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
/* Load the instruction manually if it failed to do so in the
* exit path */
if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
return vcpu->arch.last_inst;
} }
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
......
...@@ -88,6 +88,7 @@ struct kvmppc_host_state { ...@@ -88,6 +88,7 @@ struct kvmppc_host_state {
u8 hwthread_req; u8 hwthread_req;
u8 hwthread_state; u8 hwthread_state;
u8 host_ipi; u8 host_ipi;
u8 ptid;
struct kvm_vcpu *kvm_vcpu; struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore; struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys; unsigned long xics_phys;
......
...@@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) ...@@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
return vcpu->arch.xer; return vcpu->arch.xer;
} }
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{
/* XXX Would need to check TLB entry */
return false;
}
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.last_inst; return vcpu->arch.last_inst;
......
...@@ -288,6 +288,7 @@ struct kvmppc_vcore { ...@@ -288,6 +288,7 @@ struct kvmppc_vcore {
int n_woken; int n_woken;
int nap_count; int nap_count;
int napping_threads; int napping_threads;
int first_vcpuid;
u16 pcpu; u16 pcpu;
u16 last_cpu; u16 last_cpu;
u8 vcore_state; u8 vcore_state;
...@@ -298,10 +299,12 @@ struct kvmppc_vcore { ...@@ -298,10 +299,12 @@ struct kvmppc_vcore {
u64 stolen_tb; u64 stolen_tb;
u64 preempt_tb; u64 preempt_tb;
struct kvm_vcpu *runner; struct kvm_vcpu *runner;
struct kvm *kvm;
u64 tb_offset; /* guest timebase - host timebase */ u64 tb_offset; /* guest timebase - host timebase */
ulong lpcr; ulong lpcr;
u32 arch_compat; u32 arch_compat;
ulong pcr; ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
}; };
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
...@@ -410,8 +413,7 @@ struct kvm_vcpu_arch { ...@@ -410,8 +413,7 @@ struct kvm_vcpu_arch {
ulong gpr[32]; ulong gpr[32];
u64 fpr[32]; struct thread_fp_state fp;
u64 fpscr;
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
ulong evr[32]; ulong evr[32];
...@@ -420,12 +422,7 @@ struct kvm_vcpu_arch { ...@@ -420,12 +422,7 @@ struct kvm_vcpu_arch {
u64 acc; u64 acc;
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
vector128 vr[32]; struct thread_vr_state vr;
vector128 vscr;
#endif
#ifdef CONFIG_VSX
u64 vsr[64];
#endif #endif
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
...@@ -452,6 +449,7 @@ struct kvm_vcpu_arch { ...@@ -452,6 +449,7 @@ struct kvm_vcpu_arch {
ulong pc; ulong pc;
ulong ctr; ulong ctr;
ulong lr; ulong lr;
ulong tar;
ulong xer; ulong xer;
u32 cr; u32 cr;
...@@ -461,13 +459,30 @@ struct kvm_vcpu_arch { ...@@ -461,13 +459,30 @@ struct kvm_vcpu_arch {
ulong guest_owned_ext; ulong guest_owned_ext;
ulong purr; ulong purr;
ulong spurr; ulong spurr;
ulong ic;
ulong vtb;
ulong dscr; ulong dscr;
ulong amr; ulong amr;
ulong uamor; ulong uamor;
ulong iamr;
u32 ctrl; u32 ctrl;
u32 dabrx;
ulong dabr; ulong dabr;
ulong dawr;
ulong dawrx;
ulong ciabr;
ulong cfar; ulong cfar;
ulong ppr; ulong ppr;
ulong pspb;
ulong fscr;
ulong ebbhr;
ulong ebbrr;
ulong bescr;
ulong csigr;
ulong tacr;
ulong tcscr;
ulong acop;
ulong wort;
ulong shadow_srr1; ulong shadow_srr1;
#endif #endif
u32 vrsave; /* also USPRG0 */ u32 vrsave; /* also USPRG0 */
...@@ -502,10 +517,33 @@ struct kvm_vcpu_arch { ...@@ -502,10 +517,33 @@ struct kvm_vcpu_arch {
u32 ccr1; u32 ccr1;
u32 dbsr; u32 dbsr;
u64 mmcr[3]; u64 mmcr[5];
u32 pmc[8]; u32 pmc[8];
u32 spmc[2];
u64 siar; u64 siar;
u64 sdar; u64 sdar;
u64 sier;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u64 tfhar;
u64 texasr;
u64 tfiar;
u32 cr_tm;
u64 lr_tm;
u64 ctr_tm;
u64 amr_tm;
u64 ppr_tm;
u64 dscr_tm;
u64 tar_tm;
ulong gpr_tm[32];
struct thread_fp_state fp_tm;
struct thread_vr_state vr_tm;
u32 vrsave_tm; /* also USPRG0 */
#endif
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock; struct mutex exit_timing_lock;
...@@ -546,6 +584,7 @@ struct kvm_vcpu_arch { ...@@ -546,6 +584,7 @@ struct kvm_vcpu_arch {
#endif #endif
gpa_t paddr_accessed; gpa_t paddr_accessed;
gva_t vaddr_accessed; gva_t vaddr_accessed;
pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */ u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian; u8 mmio_is_bigendian;
...@@ -603,7 +642,6 @@ struct kvm_vcpu_arch { ...@@ -603,7 +642,6 @@ struct kvm_vcpu_arch {
struct list_head run_list; struct list_head run_list;
struct task_struct *run_task; struct task_struct *run_task;
struct kvm_run *kvm_run; struct kvm_run *kvm_run;
pgd_t *pgdir;
spinlock_t vpa_update_lock; spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa; struct kvmppc_vpa vpa;
...@@ -616,9 +654,12 @@ struct kvm_vcpu_arch { ...@@ -616,9 +654,12 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock; spinlock_t tbacct_lock;
u64 busy_stolen; u64 busy_stolen;
u64 busy_preempt; u64 busy_preempt;
unsigned long intr_msr;
#endif #endif
}; };
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
/* Values for vcpu->arch.state */ /* Values for vcpu->arch.state */
#define KVMPPC_VCPU_NOTREADY 0 #define KVMPPC_VCPU_NOTREADY 0
#define KVMPPC_VCPU_RUNNABLE 1 #define KVMPPC_VCPU_RUNNABLE 1
......
...@@ -39,10 +39,6 @@ static inline int kvm_para_available(void) ...@@ -39,10 +39,6 @@ static inline int kvm_para_available(void)
return 1; return 1;
} }
extern unsigned long kvm_hypercall(unsigned long *in,
unsigned long *out,
unsigned long nr);
#else #else
static inline int kvm_para_available(void) static inline int kvm_para_available(void)
...@@ -50,82 +46,8 @@ static inline int kvm_para_available(void) ...@@ -50,82 +46,8 @@ static inline int kvm_para_available(void)
return 0; return 0;
} }
static unsigned long kvm_hypercall(unsigned long *in,
unsigned long *out,
unsigned long nr)
{
return EV_UNIMPLEMENTED;
}
#endif #endif
static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2)
{
unsigned long in[8];
unsigned long out[8];
unsigned long r;
r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
*r2 = out[0];
return r;
}
static inline long kvm_hypercall0(unsigned int nr)
{
unsigned long in[8];
unsigned long out[8];
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
unsigned long p2)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
in[1] = p2;
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
in[1] = p2;
in[2] = p3;
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
unsigned long in[8];
unsigned long out[8];
in[0] = p1;
in[1] = p2;
in[2] = p3;
in[3] = p4;
return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
}
static inline unsigned int kvm_arch_para_features(void) static inline unsigned int kvm_arch_para_features(void)
{ {
unsigned long r; unsigned long r;
...@@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void) ...@@ -133,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void)
if (!kvm_para_available()) if (!kvm_para_available())
return 0; return 0;
if(kvm_hypercall0_1(KVM_HC_FEATURES, &r)) if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r))
return 0; return 0;
return r; return r;
......
...@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void); ...@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_bigendian); int is_default_endian);
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, unsigned int rt, unsigned int bytes,
int is_bigendian); int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_bigendian); u64 val, unsigned int bytes,
int is_default_endian);
extern int kvmppc_emulate_instruction(struct kvm_run *run, extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
...@@ -455,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void) ...@@ -455,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
trace_hardirqs_on(); trace_hardirqs_on();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/*
* To avoid races, the caller must have gone directly from having
* interrupts fully-enabled to hard-disabled.
*/
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
/* Only need to enable IRQs by hard enabling them after this */ /* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0; local_paca->irq_happened = 0;
local_paca->soft_enabled = 1; local_paca->soft_enabled = 1;
......
...@@ -223,6 +223,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, ...@@ -223,6 +223,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
#endif #endif
pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned *shift); unsigned *shift);
static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
unsigned long *pte_sizep)
{
pte_t *ptep;
unsigned long ps = *pte_sizep;
unsigned int shift;
ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
if (!ptep)
return NULL;
if (shift)
*pte_sizep = 1ul << shift;
else
*pte_sizep = PAGE_SIZE;
if (ps > *pte_sizep)
return NULL;
return ptep;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -223,17 +223,26 @@ ...@@ -223,17 +223,26 @@
#define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1 #define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4 #define SPRN_DAWR 0xB4
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3
#define CIABR_PRIV_USER 1
#define CIABR_PRIV_SUPER 2
#define CIABR_PRIV_HYPER 3
#define SPRN_DAWRX 0xBC #define SPRN_DAWRX 0xBC
#define DAWRX_USER (1UL << 0) #define DAWRX_USER __MASK(0)
#define DAWRX_KERNEL (1UL << 1) #define DAWRX_KERNEL __MASK(1)
#define DAWRX_HYP (1UL << 2) #define DAWRX_HYP __MASK(2)
#define DAWRX_WTI __MASK(3)
#define DAWRX_WT __MASK(4)
#define DAWRX_DR __MASK(5)
#define DAWRX_DW __MASK(6)
#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
#define SPRN_DABR2 0x13D /* e300 */ #define SPRN_DABR2 0x13D /* e300 */
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
#define DABRX_USER (1UL << 0) #define DABRX_USER __MASK(0)
#define DABRX_KERNEL (1UL << 1) #define DABRX_KERNEL __MASK(1)
#define DABRX_HYP (1UL << 2) #define DABRX_HYP __MASK(2)
#define DABRX_BTI (1UL << 3) #define DABRX_BTI __MASK(3)
#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER) #define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
#define SPRN_DAR 0x013 /* Data Address Register */ #define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
...@@ -260,6 +269,8 @@ ...@@ -260,6 +269,8 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
/* HFSCR and FSCR bit numbers are the same */ /* HFSCR and FSCR bit numbers are the same */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */ #define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */ #define FSCR_EBB_LG 7 /* Enable Event Based Branching */
...@@ -298,9 +309,13 @@ ...@@ -298,9 +309,13 @@
#define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */
#define LPCR_RMLS_SH (63-37) #define LPCR_RMLS_SH (63-37)
#define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */
#define LPCR_AIL 0x01800000 /* Alternate interrupt location */
#define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */ #define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */
#define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */ #define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */
#define LPCR_PECE 0x00007000 /* powersave exit cause enable */ #define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */
#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */
#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */
#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */
#define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */
#define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */
#define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */
...@@ -322,6 +337,8 @@ ...@@ -322,6 +337,8 @@
#define SPRN_PCR 0x152 /* Processor compatibility register */ #define SPRN_PCR 0x152 /* Processor compatibility register */
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ #define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ #define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */ #define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
...@@ -368,6 +385,8 @@ ...@@ -368,6 +385,8 @@
#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
#define SPRN_EAR 0x11A /* External Address Register */ #define SPRN_EAR 0x11A /* External Address Register */
#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
...@@ -427,6 +446,7 @@ ...@@ -427,6 +446,7 @@
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
#define SPRN_IABR2 0x3FA /* 83xx */ #define SPRN_IABR2 0x3FA /* 83xx */
#define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
#define SPRN_HID4 0x3F4 /* 970 HID4 */ #define SPRN_HID4 0x3F4 /* 970 HID4 */
#define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
#define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
...@@ -541,6 +561,7 @@ ...@@ -541,6 +561,7 @@
#define SPRN_PIR 0x3FF /* Processor Identification Register */ #define SPRN_PIR 0x3FF /* Processor Identification Register */
#endif #endif
#define SPRN_TIR 0x1BE /* Thread Identification Register */ #define SPRN_TIR 0x1BE /* Thread Identification Register */
#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
...@@ -682,6 +703,7 @@ ...@@ -682,6 +703,7 @@
#define SPRN_EBBHR 804 /* Event based branch handler register */ #define SPRN_EBBHR 804 /* Event based branch handler register */
#define SPRN_EBBRR 805 /* Event based branch return register */ #define SPRN_EBBRR 805 /* Event based branch return register */
#define SPRN_BESCR 806 /* Branch event status and control register */ #define SPRN_BESCR 806 /* Branch event status and control register */
#define SPRN_WORT 895 /* Workload optimization register - thread */
#define SPRN_PMC1 787 #define SPRN_PMC1 787
#define SPRN_PMC2 788 #define SPRN_PMC2 788
...@@ -698,6 +720,11 @@ ...@@ -698,6 +720,11 @@
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
#define SPRN_TACR 888
#define SPRN_TCSCR 889
#define SPRN_CSIGR 890
#define SPRN_SPMC1 892
#define SPRN_SPMC2 893
/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
#define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) #define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
......
...@@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev) ...@@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
static inline void save_tar(struct thread_struct *prev) {} static inline void save_tar(struct thread_struct *prev) {}
#endif #endif
extern void load_up_fpu(void);
extern void enable_kernel_fp(void); extern void enable_kernel_fp(void);
extern void enable_kernel_altivec(void); extern void enable_kernel_altivec(void);
extern void load_up_altivec(struct task_struct *);
extern int emulate_altivec(struct pt_regs *); extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *); extern void __giveup_vsx(struct task_struct *);
extern void giveup_vsx(struct task_struct *); extern void giveup_vsx(struct task_struct *);
......
...@@ -545,6 +545,7 @@ struct kvm_get_htab_header { ...@@ -545,6 +545,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
...@@ -553,6 +554,8 @@ struct kvm_get_htab_header { ...@@ -553,6 +554,8 @@ struct kvm_get_htab_header {
/* Architecture compatibility level */ /* Architecture compatibility level */
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
/* Transactional Memory checkpointed state: /* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs * This is all GPRs, all VSX regs and a subset of SPRs
*/ */
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
* the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor.
*/ */
#define TM_CAUSE_PERSISTENT 0x01 #define TM_CAUSE_PERSISTENT 0x01
#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */
#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */
#define TM_CAUSE_RESCHED 0xde #define TM_CAUSE_RESCHED 0xde
#define TM_CAUSE_TLBI 0xdc #define TM_CAUSE_TLBI 0xdc
#define TM_CAUSE_FAC_UNAV 0xda #define TM_CAUSE_FAC_UNAV 0xda
......
...@@ -425,18 +425,14 @@ int main(void) ...@@ -425,18 +425,14 @@ int main(void)
DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
#endif
#ifdef CONFIG_VSX
DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
#endif #endif
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
...@@ -484,16 +480,24 @@ int main(void) ...@@ -484,16 +480,24 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif #endif
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx));
DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
...@@ -502,8 +506,10 @@ int main(void) ...@@ -502,8 +506,10 @@ int main(void)
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
...@@ -511,20 +517,47 @@ int main(void) ...@@ -511,20 +517,47 @@ int main(void)
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));
DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
#endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
...@@ -589,6 +622,7 @@ int main(void) ...@@ -589,6 +622,7 @@ int main(void)
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_MMCR, host_mmcr); HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
HSTATE_FIELD(HSTATE_PMC, host_pmc); HSTATE_FIELD(HSTATE_PMC, host_pmc);
HSTATE_FIELD(HSTATE_PURR, host_purr); HSTATE_FIELD(HSTATE_PURR, host_purr);
......
...@@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data) ...@@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data)
{ {
u32 *features = data; u32 *features = data;
ulong in[8]; ulong in[8] = {0};
ulong out[8]; ulong out[8];
in[0] = KVM_MAGIC_PAGE; in[0] = KVM_MAGIC_PAGE;
in[1] = KVM_MAGIC_PAGE; in[1] = KVM_MAGIC_PAGE;
kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
*features = out[0]; *features = out[0];
} }
...@@ -711,43 +711,6 @@ static void kvm_use_magic_page(void) ...@@ -711,43 +711,6 @@ static void kvm_use_magic_page(void)
kvm_patching_worked ? "worked" : "failed"); kvm_patching_worked ? "worked" : "failed");
} }
unsigned long kvm_hypercall(unsigned long *in,
unsigned long *out,
unsigned long nr)
{
unsigned long register r0 asm("r0");
unsigned long register r3 asm("r3") = in[0];
unsigned long register r4 asm("r4") = in[1];
unsigned long register r5 asm("r5") = in[2];
unsigned long register r6 asm("r6") = in[3];
unsigned long register r7 asm("r7") = in[4];
unsigned long register r8 asm("r8") = in[5];
unsigned long register r9 asm("r9") = in[6];
unsigned long register r10 asm("r10") = in[7];
unsigned long register r11 asm("r11") = nr;
unsigned long register r12 asm("r12");
asm volatile("bl epapr_hypercall_start"
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
"=r"(r12)
: "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
"r"(r9), "r"(r10), "r"(r11)
: "memory", "cc", "xer", "ctr", "lr");
out[0] = r4;
out[1] = r5;
out[2] = r6;
out[3] = r7;
out[4] = r8;
out[5] = r9;
out[6] = r10;
out[7] = r11;
return r3;
}
EXPORT_SYMBOL_GPL(kvm_hypercall);
static __init void kvm_free_tmp(void) static __init void kvm_free_tmp(void)
{ {
free_reserved_area(&kvm_tmp[kvm_tmp_index], free_reserved_area(&kvm_tmp[kvm_tmp_index],
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void) ...@@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void)
module_init(kvmppc_44x_init); module_init(kvmppc_44x_init);
module_exit(kvmppc_44x_exit); module_exit(kvmppc_44x_exit);
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -575,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -575,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break; break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0; i = reg->id - KVM_REG_PPC_FPR0;
val = get_reg_val(reg->id, vcpu->arch.fpr[i]); val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
break; break;
case KVM_REG_PPC_FPSCR: case KVM_REG_PPC_FPSCR:
val = get_reg_val(reg->id, vcpu->arch.fpscr); val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
break; break;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
...@@ -586,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -586,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO; r = -ENXIO;
break; break;
} }
val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break; break;
case KVM_REG_PPC_VSCR: case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO; r = -ENXIO;
break; break;
} }
val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
break; break;
case KVM_REG_PPC_VRSAVE: case KVM_REG_PPC_VRSAVE:
val = get_reg_val(reg->id, vcpu->arch.vrsave); val = get_reg_val(reg->id, vcpu->arch.vrsave);
break; break;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = reg->id - KVM_REG_PPC_VSR0;
val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
case KVM_REG_PPC_DEBUG_INST: { case KVM_REG_PPC_DEBUG_INST: {
u32 opcode = INS_TW; u32 opcode = INS_TW;
r = copy_to_user((u32 __user *)(long)reg->addr, r = copy_to_user((u32 __user *)(long)reg->addr,
...@@ -654,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -654,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break; break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0; i = reg->id - KVM_REG_PPC_FPR0;
vcpu->arch.fpr[i] = set_reg_val(reg->id, val); VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
break; break;
case KVM_REG_PPC_FPSCR: case KVM_REG_PPC_FPSCR:
vcpu->arch.fpscr = set_reg_val(reg->id, val); vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
break; break;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
...@@ -665,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -665,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO; r = -ENXIO;
break; break;
} }
vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break; break;
case KVM_REG_PPC_VSCR: case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO; r = -ENXIO;
break; break;
} }
vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break; break;
case KVM_REG_PPC_VRSAVE: case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
...@@ -682,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) ...@@ -682,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
vcpu->arch.vrsave = set_reg_val(reg->id, val); vcpu->arch.vrsave = set_reg_val(reg->id, val);
break; break;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
long int i = reg->id - KVM_REG_PPC_VSR0;
vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE: case KVM_REG_PPC_ICP_STATE:
if (!vcpu->arch.icp) { if (!vcpu->arch.icp) {
...@@ -879,3 +903,9 @@ static void kvmppc_book3s_exit(void) ...@@ -879,3 +903,9 @@ static void kvmppc_book3s_exit(void)
module_init(kvmppc_book3s_init); module_init(kvmppc_book3s_init);
module_exit(kvmppc_book3s_exit); module_exit(kvmppc_book3s_exit);
/* On 32bit this is our one and only kernel module */
#ifdef CONFIG_KVM_BOOK3S_32
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
#endif
...@@ -243,6 +243,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, ...@@ -243,6 +243,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
/* Now tell our Shadow PTE code about the new page */ /* Now tell our Shadow PTE code about the new page */
pte = kvmppc_mmu_hpte_cache_next(vcpu); pte = kvmppc_mmu_hpte_cache_next(vcpu);
if (!pte) {
kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
r = -EAGAIN;
goto out;
}
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
orig_pte->may_write ? 'w' : '-', orig_pte->may_write ? 'w' : '-',
......
...@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void) ...@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
{ {
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
} }
/* /*
...@@ -562,7 +562,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -562,7 +562,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
* we just return and retry the instruction. * we just return and retry the instruction.
*/ */
if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
return RESUME_GUEST; return RESUME_GUEST;
/* /*
......
...@@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); ...@@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
#endif #endif
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
#endif
#endif #endif
This diff is collapsed.
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
****************************************************************************/ ****************************************************************************/
/* Registers: /* Registers:
* r4: vcpu pointer * none
*/ */
_GLOBAL(__kvmppc_vcore_entry) _GLOBAL(__kvmppc_vcore_entry)
...@@ -57,9 +57,11 @@ BEGIN_FTR_SECTION ...@@ -57,9 +57,11 @@ BEGIN_FTR_SECTION
std r3, HSTATE_DSCR(r13) std r3, HSTATE_DSCR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Save host DABR */ /* Save host DABR */
mfspr r3, SPRN_DABR mfspr r3, SPRN_DABR
std r3, HSTATE_DABR(r13) std r3, HSTATE_DABR(r13)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Hard-disable interrupts */ /* Hard-disable interrupts */
mfmsr r10 mfmsr r10
...@@ -69,7 +71,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -69,7 +71,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtmsrd r10,1 mtmsrd r10,1
/* Save host PMU registers */ /* Save host PMU registers */
/* R4 is live here (vcpu pointer) but not r3 or r5 */
li r3, 1 li r3, 1
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r7, SPRN_MMCR0 /* save MMCR0 */ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
...@@ -134,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ...@@ -134,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
* enters the guest with interrupts enabled. * enters the guest with interrupts enabled.
*/ */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r4, HSTATE_KVM_VCPU(r13)
ld r0, VCPU_PENDING_EXC(r4) ld r0, VCPU_PENDING_EXC(r4)
li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL) li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
and. r0, r0, r7 and. r0, r0, r7
beq 32f beq 32f
mr r31, r4
lhz r3, PACAPACAINDEX(r13) lhz r3, PACAPACAINDEX(r13)
bl smp_send_reschedule bl smp_send_reschedule
nop nop
mr r4, r31
32: 32:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -134,7 +134,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, ...@@ -134,7 +134,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unlock_rmap(rmap); unlock_rmap(rmap);
} }
static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
int writing, unsigned long *pte_sizep) int writing, unsigned long *pte_sizep)
{ {
pte_t *ptep; pte_t *ptep;
...@@ -232,7 +232,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, ...@@ -232,7 +232,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Look up the Linux PTE for the backing page */ /* Look up the Linux PTE for the backing page */
pte_size = psize; pte_size = psize;
pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); pte = lookup_linux_pte_and_update(pgdir, hva, writing,
&pte_size);
if (pte_present(pte)) { if (pte_present(pte)) {
if (writing && !pte_write(pte)) if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */ /* make the actual HPTE be read-only */
...@@ -672,7 +673,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, ...@@ -672,7 +673,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
if (memslot) { if (memslot) {
hva = __gfn_to_hva_memslot(memslot, gfn); hva = __gfn_to_hva_memslot(memslot, gfn);
pte = lookup_linux_pte(pgdir, hva, 1, &psize); pte = lookup_linux_pte_and_update(pgdir, hva,
1, &psize);
if (pte_present(pte) && !pte_write(pte)) if (pte_present(pte) && !pte_write(pte))
r = hpte_make_readonly(r); r = hpte_make_readonly(r);
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/miscdevice.h>
#include "book3s.h" #include "book3s.h"
...@@ -566,12 +567,6 @@ static inline int get_fpr_index(int i) ...@@ -566,12 +567,6 @@ static inline int get_fpr_index(int i)
void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
{ {
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/* /*
* VSX instructions can access FP and vector registers, so if * VSX instructions can access FP and vector registers, so if
...@@ -594,26 +589,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) ...@@ -594,26 +589,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
* both the traditional FP registers and the added VSX * both the traditional FP registers and the added VSX
* registers into thread.fp_state.fpr[]. * registers into thread.fp_state.fpr[].
*/ */
if (current->thread.regs->msr & MSR_FP) if (t->regs->msr & MSR_FP)
giveup_fpu(current); giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) t->fp_save_area = NULL;
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
vcpu->arch.fpscr = t->fp_state.fpscr;
#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
#endif
} }
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC) if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current); giveup_altivec(current);
memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); t->vr_save_area = NULL;
vcpu->arch.vscr = t->vr_state.vscr;
} }
#endif #endif
...@@ -661,12 +646,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ...@@ -661,12 +646,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr) ulong msr)
{ {
struct thread_struct *t = &current->thread; struct thread_struct *t = &current->thread;
u64 *vcpu_fpr = vcpu->arch.fpr;
#ifdef CONFIG_VSX
u64 *vcpu_vsx = vcpu->arch.vsr;
#endif
u64 *thread_fpr = &t->fp_state.fpr[0][0];
int i;
/* When we have paired singles, we emulate in software */ /* When we have paired singles, we emulate in software */
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
...@@ -704,27 +683,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ...@@ -704,27 +683,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#endif #endif
if (msr & MSR_FP) { if (msr & MSR_FP) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) enable_kernel_fp();
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; load_fp_state(&vcpu->arch.fp);
#ifdef CONFIG_VSX t->fp_save_area = &vcpu->arch.fp;
for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
#endif
t->fp_state.fpscr = vcpu->arch.fpscr;
t->fpexc_mode = 0;
kvmppc_load_up_fpu();
} }
if (msr & MSR_VEC) { if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); enable_kernel_altivec();
t->vr_state.vscr = vcpu->arch.vscr; load_vr_state(&vcpu->arch.vr);
t->vrsave = -1; t->vr_save_area = &vcpu->arch.vr;
kvmppc_load_up_altivec();
#endif #endif
} }
current->thread.regs->msr |= msr; t->regs->msr |= msr;
vcpu->arch.guest_owned_ext |= msr; vcpu->arch.guest_owned_ext |= msr;
kvmppc_recalc_shadow_msr(vcpu); kvmppc_recalc_shadow_msr(vcpu);
...@@ -743,11 +715,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) ...@@ -743,11 +715,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
if (!lost_ext) if (!lost_ext)
return; return;
if (lost_ext & MSR_FP) if (lost_ext & MSR_FP) {
kvmppc_load_up_fpu(); enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
}
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
if (lost_ext & MSR_VEC) if (lost_ext & MSR_VEC) {
kvmppc_load_up_altivec(); enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
}
#endif #endif
current->thread.regs->msr |= lost_ext; current->thread.regs->msr |= lost_ext;
} }
...@@ -873,6 +849,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -873,6 +849,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* We're good on these - the host merely wanted to get our attention */ /* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_DECREMENTER: case BOOK3S_INTERRUPT_DECREMENTER:
case BOOK3S_INTERRUPT_HV_DECREMENTER: case BOOK3S_INTERRUPT_HV_DECREMENTER:
case BOOK3S_INTERRUPT_DOORBELL:
vcpu->stat.dec_exits++; vcpu->stat.dec_exits++;
r = RESUME_GUEST; r = RESUME_GUEST;
break; break;
...@@ -1045,14 +1022,14 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1045,14 +1022,14 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
* and if we really did time things so badly, then we just exit * and if we really did time things so badly, then we just exit
* again due to a host external interrupt. * again due to a host external interrupt.
*/ */
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu); s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) { if (s <= 0)
local_irq_enable();
r = s; r = s;
} else { else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
} }
kvmppc_handle_lost_ext(vcpu); kvmppc_handle_lost_ext(vcpu);
} }
...@@ -1133,19 +1110,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1133,19 +1110,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior); *val = get_reg_val(id, to_book3s(vcpu)->hior);
break; break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
long int i = id - KVM_REG_PPC_VSR0;
if (!cpu_has_feature(CPU_FTR_VSX)) {
r = -ENXIO;
break;
}
val->vsxval[0] = vcpu->arch.fpr[i];
val->vsxval[1] = vcpu->arch.vsr[i];
break;
}
#endif /* CONFIG_VSX */
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -1164,19 +1128,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, ...@@ -1164,19 +1128,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true; to_book3s(vcpu)->hior_explicit = true;
break; break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
long int i = id - KVM_REG_PPC_VSR0;
if (!cpu_has_feature(CPU_FTR_VSX)) {
r = -ENXIO;
break;
}
vcpu->arch.fpr[i] = val->vsxval[0];
vcpu->arch.vsr[i] = val->vsxval[1];
break;
}
#endif /* CONFIG_VSX */
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -1274,17 +1225,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) ...@@ -1274,17 +1225,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
struct thread_fp_state fp;
int fpexc_mode;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
struct thread_vr_state vr;
unsigned long uninitialized_var(vrsave); unsigned long uninitialized_var(vrsave);
int used_vr;
#endif #endif
#ifdef CONFIG_VSX
int used_vsr;
#endif
ulong ext_msr;
/* Check if we can run the vcpu at all */ /* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
...@@ -1299,40 +1242,27 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1299,40 +1242,27 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* really did time things so badly, then we just exit again due to * really did time things so badly, then we just exit again due to
* a host external interrupt. * a host external interrupt.
*/ */
local_irq_disable();
ret = kvmppc_prepare_to_enter(vcpu); ret = kvmppc_prepare_to_enter(vcpu);
if (ret <= 0) { if (ret <= 0)
local_irq_enable();
goto out; goto out;
} /* interrupts now hard-disabled */
/* Save FPU state in stack */ /* Save FPU state in thread_struct */
if (current->thread.regs->msr & MSR_FP) if (current->thread.regs->msr & MSR_FP)
giveup_fpu(current); giveup_fpu(current);
fp = current->thread.fp_state;
fpexc_mode = current->thread.fpexc_mode;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
/* Save Altivec state in stack */ /* Save Altivec state in thread_struct */
used_vr = current->thread.used_vr; if (current->thread.regs->msr & MSR_VEC)
if (used_vr) { giveup_altivec(current);
if (current->thread.regs->msr & MSR_VEC)
giveup_altivec(current);
vr = current->thread.vr_state;
vrsave = current->thread.vrsave;
}
#endif #endif
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
/* Save VSX state in stack */ /* Save VSX state in thread_struct */
used_vsr = current->thread.used_vsr; if (current->thread.regs->msr & MSR_VSX)
if (used_vsr && (current->thread.regs->msr & MSR_VSX))
__giveup_vsx(current); __giveup_vsx(current);
#endif #endif
/* Remember the MSR with disabled extensions */
ext_msr = current->thread.regs->msr;
/* Preload FPU if it's enabled */ /* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP) if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
...@@ -1347,25 +1277,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1347,25 +1277,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Make sure we save the guest FPU/Altivec/VSX state */ /* Make sure we save the guest FPU/Altivec/VSX state */
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
current->thread.regs->msr = ext_msr;
/* Restore FPU/VSX state from stack */
current->thread.fp_state = fp;
current->thread.fpexc_mode = fpexc_mode;
#ifdef CONFIG_ALTIVEC
/* Restore Altivec state from stack */
if (used_vr && current->thread.used_vr) {
current->thread.vr_state = vr;
current->thread.vrsave = vrsave;
}
current->thread.used_vr = used_vr;
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = used_vsr;
#endif
out: out:
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
return ret; return ret;
...@@ -1606,4 +1517,6 @@ module_init(kvmppc_book3s_init_pr); ...@@ -1606,4 +1517,6 @@ module_init(kvmppc_book3s_init_pr);
module_exit(kvmppc_book3s_exit_pr); module_exit(kvmppc_book3s_exit_pr);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
#endif #endif
...@@ -162,51 +162,4 @@ _GLOBAL(kvmppc_entry_trampoline) ...@@ -162,51 +162,4 @@ _GLOBAL(kvmppc_entry_trampoline)
mtsrr1 r6 mtsrr1 r6
RFI RFI
#if defined(CONFIG_PPC_BOOK3S_32)
#define STACK_LR INT_FRAME_SIZE+4
/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
#define MSR_EXT_START \
PPC_STL r20, _NIP(r1); \
mfmsr r20; \
LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
andc r3,r20,r3; /* Disable DR,EE */ \
mtmsr r3; \
sync
#define MSR_EXT_END \
mtmsr r20; /* Enable DR,EE */ \
sync; \
PPC_LL r20, _NIP(r1)
#elif defined(CONFIG_PPC_BOOK3S_64)
#define STACK_LR _LINK
#define MSR_EXT_START
#define MSR_EXT_END
#endif
/*
* Activate current's external feature (FPU/Altivec/VSX)
*/
#define define_load_up(what) \
\
_GLOBAL(kvmppc_load_up_ ## what); \
PPC_STLU r1, -INT_FRAME_SIZE(r1); \
mflr r3; \
PPC_STL r3, STACK_LR(r1); \
MSR_EXT_START; \
\
bl FUNC(load_up_ ## what); \
\
MSR_EXT_END; \
PPC_LL r3, STACK_LR(r1); \
mtlr r3; \
addi r1, r1, INT_FRAME_SIZE; \
blr
define_load_up(fpu)
#ifdef CONFIG_ALTIVEC
define_load_up(altivec)
#endif
#include "book3s_segment.S" #include "book3s_segment.S"
...@@ -361,6 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) ...@@ -361,6 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
beqa BOOK3S_INTERRUPT_DECREMENTER beqa BOOK3S_INTERRUPT_DECREMENTER
cmpwi r12, BOOK3S_INTERRUPT_PERFMON cmpwi r12, BOOK3S_INTERRUPT_PERFMON
beqa BOOK3S_INTERRUPT_PERFMON beqa BOOK3S_INTERRUPT_PERFMON
cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
beqa BOOK3S_INTERRUPT_DOORBELL
RFI RFI
kvmppc_handler_trampoline_exit_end: kvmppc_handler_trampoline_exit_end:
...@@ -1246,8 +1246,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) ...@@ -1246,8 +1246,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
kvm->arch.xics = xics; kvm->arch.xics = xics;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
if (ret) if (ret) {
kfree(xics);
return ret; return ret;
}
xics_debugfs_init(xics); xics_debugfs_init(xics);
......
...@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests); clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
local_irq_disable(); hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1; r = 1;
...@@ -682,34 +682,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -682,34 +682,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int ret, s; int ret, s;
struct debug_reg debug; struct debug_reg debug;
#ifdef CONFIG_PPC_FPU
struct thread_fp_state fp;
int fpexc_mode;
#endif
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; return -EINVAL;
} }
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu); s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) { if (s <= 0) {
local_irq_enable();
ret = s; ret = s;
goto out; goto out;
} }
/* interrupts now hard-disabled */
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */ /* Save userspace FPU state in stack */
enable_kernel_fp(); enable_kernel_fp();
fp = current->thread.fp_state;
fpexc_mode = current->thread.fpexc_mode;
/* Restore guest FPU state to thread */
memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
sizeof(vcpu->arch.fpr));
current->thread.fp_state.fpscr = vcpu->arch.fpscr;
/* /*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest * Since we can't trap on MSR_FP in GS-mode, we consider the guest
...@@ -728,6 +716,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -728,6 +716,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
debug = current->thread.debug; debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg; current->thread.debug = vcpu->arch.shadow_dbg_reg;
vcpu->arch.pgdir = current->mm->pgd;
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu); ret = __kvmppc_vcpu_run(kvm_run, vcpu);
...@@ -743,15 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -743,15 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_save_guest_fp(vcpu); kvmppc_save_guest_fp(vcpu);
vcpu->fpu_active = 0; vcpu->fpu_active = 0;
/* Save guest FPU state from thread */
memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
sizeof(vcpu->arch.fpr));
vcpu->arch.fpscr = current->thread.fp_state.fpscr;
/* Restore userspace FPU state from stack */
current->thread.fp_state = fp;
current->thread.fpexc_mode = fpexc_mode;
#endif #endif
out: out:
...@@ -898,17 +878,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -898,17 +878,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
int s; int s;
int idx; int idx;
#ifdef CONFIG_PPC64
WARN_ON(local_paca->irq_happened != 0);
#endif
/*
* We enter with interrupts disabled in hardware, but
* we need to call hard_irq_disable anyway to ensure that
* the software state is kept in sync.
*/
hard_irq_disable();
/* update before a new last_exit_type is rewritten */ /* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu); kvmppc_update_timing_stats(vcpu);
...@@ -1217,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -1217,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* aren't already exiting to userspace for some other reason. * aren't already exiting to userspace for some other reason.
*/ */
if (!(r & RESUME_HOST)) { if (!(r & RESUME_HOST)) {
local_irq_disable();
s = kvmppc_prepare_to_enter(vcpu); s = kvmppc_prepare_to_enter(vcpu);
if (s <= 0) { if (s <= 0)
local_irq_enable();
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
} else { else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry(); kvmppc_fix_ee_before_entry();
} }
} }
......
...@@ -136,7 +136,9 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) ...@@ -136,7 +136,9 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
load_up_fpu(); enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
current->thread.fp_save_area = &vcpu->arch.fp;
current->thread.regs->msr |= MSR_FP; current->thread.regs->msr |= MSR_FP;
} }
#endif #endif
...@@ -151,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) ...@@ -151,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
giveup_fpu(current); giveup_fpu(current);
current->thread.fp_save_area = NULL;
#endif #endif
} }
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#include <asm/exception-64e.h> #include <asm/exception-64e.h>
#include <asm/hw_irq.h>
#include <asm/irqflags.h>
#else #else
#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
#endif #endif
...@@ -465,6 +467,15 @@ _GLOBAL(kvmppc_resume_host) ...@@ -465,6 +467,15 @@ _GLOBAL(kvmppc_resume_host)
mtspr SPRN_EPCR, r3 mtspr SPRN_EPCR, r3
isync isync
#ifdef CONFIG_64BIT
/*
* We enter with interrupts disabled in hardware, but
* we need to call RECONCILE_IRQ_STATE to ensure
* that the software state is kept in sync.
*/
RECONCILE_IRQ_STATE(r3,r5)
#endif
/* Switch to kernel stack and jump to handler. */ /* Switch to kernel stack and jump to handler. */
PPC_LL r3, HOST_RUN(r1) PPC_LL r3, HOST_RUN(r1)
mr r5, r14 /* intno */ mr r5, r14 /* intno */
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -573,3 +575,5 @@ static void __exit kvmppc_e500_exit(void) ...@@ -573,3 +575,5 @@ static void __exit kvmppc_e500_exit(void)
module_init(kvmppc_e500_init); module_init(kvmppc_e500_init);
module_exit(kvmppc_e500_exit); module_exit(kvmppc_e500_exit);
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
...@@ -31,11 +31,13 @@ enum vcpu_ftr { ...@@ -31,11 +31,13 @@ enum vcpu_ftr {
#define E500_TLB_NUM 2 #define E500_TLB_NUM 2
/* entry is mapped somewhere in host TLB */ /* entry is mapped somewhere in host TLB */
#define E500_TLB_VALID (1 << 0) #define E500_TLB_VALID (1 << 31)
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
#define E500_TLB_BITMAP (1 << 1) #define E500_TLB_BITMAP (1 << 30)
/* TLB1 entry is mapped by host TLB0 */ /* TLB1 entry is mapped by host TLB0 */
#define E500_TLB_TLB0 (1 << 2) #define E500_TLB_TLB0 (1 << 29)
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
#define E500_TLB_MAS2_ATTR (0x7f)
struct tlbe_ref { struct tlbe_ref {
pfn_t pfn; /* valid only for TLB0, except briefly */ pfn_t pfn; /* valid only for TLB0, except briefly */
......
...@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
} }
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
unsigned int eaddr, int as) gva_t eaddr, int as)
{ {
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
unsigned int victim, tsized; unsigned int victim, tsized;
......
...@@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) ...@@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
return mas3; return mas3;
} }
static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
{
#ifdef CONFIG_SMP
return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
#else
return mas2 & MAS2_ATTRIB_MASK;
#endif
}
/* /*
* writing shadow tlb entry to host TLB * writing shadow tlb entry to host TLB
*/ */
...@@ -231,15 +222,15 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, ...@@ -231,15 +222,15 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
} }
/* Already invalidated in between */ /*
if (!(ref->flags & E500_TLB_VALID)) * If TLB entry is still valid then it's a TLB0 entry, and thus
return; * backed by at most one host tlbe per shadow pid
*/
/* Guest tlbe is backed by at most one host tlbe per shadow pid. */ if (ref->flags & E500_TLB_VALID)
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
/* Mark the TLB as not backed by the host anymore */ /* Mark the TLB as not backed by the host anymore */
ref->flags &= ~E500_TLB_VALID; ref->flags = 0;
} }
static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
...@@ -249,10 +240,13 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) ...@@ -249,10 +240,13 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *gtlbe,
pfn_t pfn) pfn_t pfn, unsigned int wimg)
{ {
ref->pfn = pfn; ref->pfn = pfn;
ref->flags |= E500_TLB_VALID; ref->flags = E500_TLB_VALID;
/* Use guest supplied MAS2_G and MAS2_E */
ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
/* Mark the page accessed */ /* Mark the page accessed */
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
...@@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe( ...@@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe(
/* Force IPROT=0 for all guest mappings. */ /* Force IPROT=0 for all guest mappings. */
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
stlbe->mas2 = (gvaddr & MAS2_EPN) | stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
e500_shadow_mas2_attrib(gtlbe->mas2, pr);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
...@@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
int ret = 0; int ret = 0;
unsigned long mmu_seq; unsigned long mmu_seq;
struct kvm *kvm = vcpu_e500->vcpu.kvm; struct kvm *kvm = vcpu_e500->vcpu.kvm;
unsigned long tsize_pages = 0;
pte_t *ptep;
unsigned int wimg = 0;
pgd_t *pgdir;
/* used to check for invalidations in progress */ /* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq; mmu_seq = kvm->mmu_notifier_seq;
...@@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
*/ */
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
unsigned long gfn_start, gfn_end, tsize_pages; unsigned long gfn_start, gfn_end;
tsize_pages = 1 << (tsize - 2); tsize_pages = 1 << (tsize - 2);
gfn_start = gfn & ~(tsize_pages - 1); gfn_start = gfn & ~(tsize_pages - 1);
...@@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
} }
if (likely(!pfnmap)) { if (likely(!pfnmap)) {
unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(slot, gfn); pfn = gfn_to_pfn_memslot(slot, gfn);
if (is_error_noslot_pfn(pfn)) { if (is_error_noslot_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", if (printk_ratelimit())
(long)gfn); pr_err("%s: real page not found for gfn %lx\n",
__func__, (long)gfn);
return -EINVAL; return -EINVAL;
} }
...@@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
goto out; goto out;
} }
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
pgdir = vcpu_e500->vcpu.arch.pgdir;
ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
if (pte_present(*ptep))
wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
else {
if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn);
return -EINVAL;
}
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe); ref, gvaddr, stlbe);
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/cputable.h> #include <asm/cputable.h>
...@@ -391,3 +393,5 @@ static void __exit kvmppc_e500mc_exit(void) ...@@ -391,3 +393,5 @@ static void __exit kvmppc_e500mc_exit(void)
module_init(kvmppc_e500mc_init); module_init(kvmppc_e500mc_init);
module_exit(kvmppc_e500mc_exit); module_exit(kvmppc_e500mc_exit);
MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm");
...@@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) ...@@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
* lmw * lmw
* stmw * stmw
* *
* XXX is_bigendian should depend on MMU mapping or MSR[LE]
*/ */
/* XXX Should probably auto-generate instruction decoding for a particular core /* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */ * from opcode tables in the future. */
......
...@@ -1635,6 +1635,7 @@ static void mpic_destroy(struct kvm_device *dev) ...@@ -1635,6 +1635,7 @@ static void mpic_destroy(struct kvm_device *dev)
dev->kvm->arch.mpic = NULL; dev->kvm->arch.mpic = NULL;
kfree(opp); kfree(opp);
kfree(dev);
} }
static int mpic_set_default_irq_routing(struct openpic *opp) static int mpic_set_default_irq_routing(struct openpic *opp)
......
...@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) ...@@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
*/ */
int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
{ {
int r = 1; int r;
WARN_ON(irqs_disabled());
hard_irq_disable();
WARN_ON_ONCE(!irqs_disabled());
while (true) { while (true) {
if (need_resched()) { if (need_resched()) {
local_irq_enable(); local_irq_enable();
cond_resched(); cond_resched();
local_irq_disable(); hard_irq_disable();
continue; continue;
} }
...@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
trace_kvm_check_requests(vcpu); trace_kvm_check_requests(vcpu);
r = kvmppc_core_check_requests(vcpu); r = kvmppc_core_check_requests(vcpu);
local_irq_disable(); hard_irq_disable();
if (r > 0) if (r > 0)
continue; continue;
break; break;
...@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) ...@@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
continue; continue;
} }
#ifdef CONFIG_PPC64
/* lazy EE magic */
hard_irq_disable();
if (lazy_irq_pending()) {
/* Got an interrupt in between, try again */
local_irq_enable();
local_irq_disable();
kvm_guest_exit();
continue;
}
#endif
kvm_guest_enter(); kvm_guest_enter();
break; return 1;
} }
/* return to host */
local_irq_enable();
return r; return r;
} }
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
...@@ -656,14 +648,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -656,14 +648,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
break; break;
case KVM_MMIO_REG_FPR: case KVM_MMIO_REG_FPR:
vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
break; break;
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR: case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break; break;
case KVM_MMIO_REG_FQPR: case KVM_MMIO_REG_FQPR:
vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break; break;
#endif #endif
...@@ -673,9 +665,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -673,9 +665,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
} }
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_bigendian) unsigned int rt, unsigned int bytes,
int is_default_endian)
{ {
int idx, ret; int idx, ret;
int is_bigendian;
if (kvmppc_need_byteswap(vcpu)) {
/* Default endianness is "little endian". */
is_bigendian = !is_default_endian;
} else {
/* Default endianness is "big endian". */
is_bigendian = is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) { if (bytes > sizeof(run->mmio.data)) {
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
...@@ -711,21 +713,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load); ...@@ -711,21 +713,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */ /* Same as above, but sign extends */
int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_bigendian) unsigned int rt, unsigned int bytes,
int is_default_endian)
{ {
int r; int r;
vcpu->arch.mmio_sign_extend = 1; vcpu->arch.mmio_sign_extend = 1;
r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
return r; return r;
} }
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_bigendian) u64 val, unsigned int bytes, int is_default_endian)
{ {
void *data = run->mmio.data; void *data = run->mmio.data;
int idx, ret; int idx, ret;
int is_bigendian;
if (kvmppc_need_byteswap(vcpu)) {
/* Default endianness is "little endian". */
is_bigendian = !is_default_endian;
} else {
/* Default endianness is "big endian". */
is_bigendian = is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) { if (bytes > sizeof(run->mmio.data)) {
printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment