Commit 5b2b9d77 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "These are mostly PPC changes for 3.16-new things.  However, there is
  an x86 change too and it is a regression from 3.14.  As it only
  affects nested virtualization and there were other changes in this
  area in 3.16, I am not nominating it for 3.15-stable"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Check for nested events if there is an injectable interrupt
  KVM: PPC: RTAS: Do byte swaps explicitly
  KVM: PPC: Book3S PR: Fix ABIv2 on LE
  KVM: PPC: Assembly functions exported to modules need _GLOBAL_TOC()
  PPC: Add _GLOBAL_TOC for 32bit
  KVM: PPC: BOOK3S: HV: Use base page size when comparing against slb value
  KVM: PPC: Book3E: Unlock mmu_lock when setting caching atttribute
parents 80d6191e bb18b526
...@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, ...@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
return rb; return rb;
} }
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
bool is_base_size)
{ {
int size, a_psize; int size, a_psize;
/* Look at the 8 bit LP value */ /* Look at the 8 bit LP value */
unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
...@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) ...@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
continue; continue;
a_psize = __hpte_actual_psize(lp, size); a_psize = __hpte_actual_psize(lp, size);
if (a_psize != -1) if (a_psize != -1) {
if (is_base_size)
return 1ul << mmu_psize_defs[size].shift;
return 1ul << mmu_psize_defs[a_psize].shift; return 1ul << mmu_psize_defs[a_psize].shift;
} }
}
} }
return 0; return 0;
} }
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 0);
}
static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
{
return __hpte_page_size(h, l, 1);
}
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{ {
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
......
...@@ -277,6 +277,8 @@ GLUE(.,name): ...@@ -277,6 +277,8 @@ GLUE(.,name):
.globl n; \ .globl n; \
n: n:
#define _GLOBAL_TOC(name) _GLOBAL(name)
#define _KPROBE(n) \ #define _KPROBE(n) \
.section ".kprobes.text","a"; \ .section ".kprobes.text","a"; \
.globl n; \ .globl n; \
......
...@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
goto out; goto out;
} }
if (!rma_setup && is_vrma_hpte(v)) { if (!rma_setup && is_vrma_hpte(v)) {
unsigned long psize = hpte_page_size(v, r); unsigned long psize = hpte_base_page_size(v, r);
unsigned long senc = slb_pgsize_encoding(psize); unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr; unsigned long lpcr;
......
...@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, ...@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
r = hpte[i+1]; r = hpte[i+1];
/* /*
* Check the HPTE again, including large page size * Check the HPTE again, including base page size
* Since we don't currently allow any MPSS (mixed
* page-size segment) page sizes, it is sufficient
* to check against the actual page size.
*/ */
if ((v & valid) && (v & mask) == val && if ((v & valid) && (v & mask) == val &&
hpte_page_size(v, r) == (1ul << pshift)) hpte_base_page_size(v, r) == (1ul << pshift))
/* Return with the HPTE still locked */ /* Return with the HPTE still locked */
return (hash << 3) + (i >> 1); return (hash << 3) + (i >> 1);
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* *
* LR = return address to continue at after eventually re-enabling MMU * LR = return address to continue at after eventually re-enabling MMU
*/ */
_GLOBAL(kvmppc_hv_entry_trampoline) _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1) stdu r1, -112(r1)
......
...@@ -25,7 +25,11 @@ ...@@ -25,7 +25,11 @@
#include <asm/exception-64s.h> #include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64) #if defined(CONFIG_PPC_BOOK3S_64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name) #define FUNC(name) GLUE(.,name)
#endif
#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
#elif defined(CONFIG_PPC_BOOK3S_32) #elif defined(CONFIG_PPC_BOOK3S_32)
......
...@@ -36,7 +36,11 @@ ...@@ -36,7 +36,11 @@
#if defined(CONFIG_PPC_BOOK3S_64) #if defined(CONFIG_PPC_BOOK3S_64)
#if defined(_CALL_ELF) && _CALL_ELF == 2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name) #define FUNC(name) GLUE(.,name)
#endif
#elif defined(CONFIG_PPC_BOOK3S_32) #elif defined(CONFIG_PPC_BOOK3S_32)
...@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins: ...@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
* On entry, r4 contains the guest shadow MSR * On entry, r4 contains the guest shadow MSR
* MSR.EE has to be 0 when calling this function * MSR.EE has to be 0 when calling this function
*/ */
_GLOBAL(kvmppc_entry_trampoline) _GLOBAL_TOC(kvmppc_entry_trampoline)
mfmsr r5 mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7) toreal(r7)
......
...@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) ...@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority; u32 irq, server, priority;
int rc; int rc;
if (args->nargs != 3 || args->nret != 1) { if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
rc = -3; rc = -3;
goto out; goto out;
} }
irq = args->args[0]; irq = be32_to_cpu(args->args[0]);
server = args->args[1]; server = be32_to_cpu(args->args[1]);
priority = args->args[2]; priority = be32_to_cpu(args->args[2]);
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc) if (rc)
rc = -3; rc = -3;
out: out:
args->rets[0] = rc; args->rets[0] = cpu_to_be32(rc);
} }
static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
...@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) ...@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq, server, priority; u32 irq, server, priority;
int rc; int rc;
if (args->nargs != 1 || args->nret != 3) { if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
rc = -3; rc = -3;
goto out; goto out;
} }
irq = args->args[0]; irq = be32_to_cpu(args->args[0]);
server = priority = 0; server = priority = 0;
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
...@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) ...@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
goto out; goto out;
} }
args->rets[1] = server; args->rets[1] = cpu_to_be32(server);
args->rets[2] = priority; args->rets[2] = cpu_to_be32(priority);
out: out:
args->rets[0] = rc; args->rets[0] = cpu_to_be32(rc);
} }
static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
...@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) ...@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq; u32 irq;
int rc; int rc;
if (args->nargs != 1 || args->nret != 1) { if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3; rc = -3;
goto out; goto out;
} }
irq = args->args[0]; irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_off(vcpu->kvm, irq); rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc) if (rc)
rc = -3; rc = -3;
out: out:
args->rets[0] = rc; args->rets[0] = cpu_to_be32(rc);
} }
static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
...@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) ...@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
u32 irq; u32 irq;
int rc; int rc;
if (args->nargs != 1 || args->nret != 1) { if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3; rc = -3;
goto out; goto out;
} }
irq = args->args[0]; irq = be32_to_cpu(args->args[0]);
rc = kvmppc_xics_int_on(vcpu->kvm, irq); rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc) if (rc)
rc = -3; rc = -3;
out: out:
args->rets[0] = rc; args->rets[0] = cpu_to_be32(rc);
} }
#endif /* CONFIG_KVM_XICS */ #endif /* CONFIG_KVM_XICS */
...@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) ...@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
return rc; return rc;
} }
static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
{
#ifdef __LITTLE_ENDIAN__
int i;
args->token = be32_to_cpu(args->token);
args->nargs = be32_to_cpu(args->nargs);
args->nret = be32_to_cpu(args->nret);
for (i = 0; i < args->nargs; i++)
args->args[i] = be32_to_cpu(args->args[i]);
#endif
}
static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
{
#ifdef __LITTLE_ENDIAN__
int i;
for (i = 0; i < args->nret; i++)
args->args[i] = cpu_to_be32(args->args[i]);
args->token = cpu_to_be32(args->token);
args->nargs = cpu_to_be32(args->nargs);
args->nret = cpu_to_be32(args->nret);
#endif
}
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
{ {
struct rtas_token_definition *d; struct rtas_token_definition *d;
...@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) ...@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc) if (rc)
goto fail; goto fail;
kvmppc_rtas_swap_endian_in(&args);
/* /*
* args->rets is a pointer into args->args. Now that we've * args->rets is a pointer into args->args. Now that we've
* copied args we need to fix it up to point into our copy, * copied args we need to fix it up to point into our copy,
...@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) ...@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out. * value so we can restore it on the way out.
*/ */
orig_rets = args.rets; orig_rets = args.rets;
args.rets = &args.args[args.nargs]; args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
rc = -ENOENT; rc = -ENOENT;
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
if (d->token == args.token) { if (d->token == be32_to_cpu(args.token)) {
d->handler->handler(vcpu, &args); d->handler->handler(vcpu, &args);
rc = 0; rc = 0;
break; break;
...@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) ...@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc == 0) { if (rc == 0) {
args.rets = orig_rets; args.rets = orig_rets;
kvmppc_rtas_swap_endian_out(&args);
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
if (rc) if (rc)
goto fail; goto fail;
......
...@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (printk_ratelimit()) if (printk_ratelimit())
pr_err("%s: pte not present: gfn %lx, pfn %lx\n", pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
__func__, (long)gfn, pfn); __func__, (long)gfn, pfn);
return -EINVAL; ret = -EINVAL;
goto out;
} }
kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
......
...@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) ...@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_x86_ops->set_nmi(vcpu); kvm_x86_ops->set_nmi(vcpu);
} }
} else if (kvm_cpu_has_injectable_intr(vcpu)) { } else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
* calling check_nested_events again here to avoid a race condition.
* See https://lkml.org/lkml/2014/7/2/60 for discussion about this
* proposal and current concerns. Perhaps we should be setting
* KVM_REQ_EVENT only on certain events and not unconditionally?
*/
if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
if (r != 0)
return r;
}
if (kvm_x86_ops->interrupt_allowed(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false); false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment