Commit fb9fc395 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'xen-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen

* 'xen-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen:
  xfs: eagerly remove vmap mappings to avoid upsetting Xen
  xen: add some debug output for failed multicalls
  xen: fix incorrect vcpu_register_vcpu_info hypercall argument
  xen: ask the hypervisor how much space it needs reserved
  xen: lock pte pages while pinning/unpinning
  xen: deal with stale cr3 values when unpinning pagetables
  xen: add batch completion callbacks
  xen: yield to IPI target if necessary
  Clean up duplicate includes in arch/i386/xen/
  remove dead code in pgtable_cache_init
  paravirt: clean up lazy mode handling
  paravirt: refactor struct paravirt_ops into smaller pv_*_ops
parents 0eafaae8 ace2e92e
...@@ -369,8 +369,8 @@ void apply_paravirt(struct paravirt_patch_site *start, ...@@ -369,8 +369,8 @@ void apply_paravirt(struct paravirt_patch_site *start,
BUG_ON(p->len > MAX_PATCH_LEN); BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */ /* prep the buffer with the original instructions */
memcpy(insnbuf, p->instr, p->len); memcpy(insnbuf, p->instr, p->len);
used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf, used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
(unsigned long)p->instr, p->len); (unsigned long)p->instr, p->len);
BUG_ON(used > p->len); BUG_ON(used > p->len);
......
...@@ -116,12 +116,14 @@ void foo(void) ...@@ -116,12 +116,14 @@ void foo(void)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
BLANK(); BLANK();
OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled); OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable); OFFSET(PARAVIRT_PATCH_pv_cpu_ops, paravirt_patch_template, pv_cpu_ops);
OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable); OFFSET(PARAVIRT_PATCH_pv_irq_ops, paravirt_patch_template, pv_irq_ops);
OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit); OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
OFFSET(PARAVIRT_iret, paravirt_ops, iret); OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0); OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
#endif #endif
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
......
...@@ -437,7 +437,7 @@ ldt_ss: ...@@ -437,7 +437,7 @@ ldt_ss:
* is still available to implement the setting of the high * is still available to implement the setting of the high
* 16-bits in the INTERRUPT_RETURN paravirt-op. * 16-bits in the INTERRUPT_RETURN paravirt-op.
*/ */
cmpl $0, paravirt_ops+PARAVIRT_enabled cmpl $0, pv_info+PARAVIRT_enabled
jne restore_nocheck jne restore_nocheck
#endif #endif
......
This diff is collapsed.
This diff is collapsed.
...@@ -741,24 +741,12 @@ struct kmem_cache *pmd_cache; ...@@ -741,24 +741,12 @@ struct kmem_cache *pmd_cache;
void __init pgtable_cache_init(void) void __init pgtable_cache_init(void)
{ {
size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t); if (PTRS_PER_PMD > 1)
if (PTRS_PER_PMD > 1) {
pmd_cache = kmem_cache_create("pmd", pmd_cache = kmem_cache_create("pmd",
PTRS_PER_PMD*sizeof(pmd_t), PTRS_PER_PMD*sizeof(pmd_t),
PTRS_PER_PMD*sizeof(pmd_t), PTRS_PER_PMD*sizeof(pmd_t),
SLAB_PANIC, SLAB_PANIC,
pmd_ctor); pmd_ctor);
if (!SHARED_KERNEL_PMD) {
/* If we're in PAE mode and have a non-shared
kernel pmd, then the pgd size must be a
page size. This is because the pgd_list
links through the page structure, so there
can only be one pgd per page for this to
work. */
pgd_size = PAGE_SIZE;
}
}
} }
/* /*
......
This diff is collapsed.
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/sched.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -155,7 +154,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, ...@@ -155,7 +154,7 @@ void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval) pte_t *ptep, pte_t pteval)
{ {
if (mm == current->mm || mm == &init_mm) { if (mm == current->mm || mm == &init_mm) {
if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) { if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
struct multicall_space mcs; struct multicall_space mcs;
mcs = xen_mc_entry(0); mcs = xen_mc_entry(0);
...@@ -304,7 +303,12 @@ pgd_t xen_make_pgd(unsigned long pgd) ...@@ -304,7 +303,12 @@ pgd_t xen_make_pgd(unsigned long pgd)
} }
#endif /* CONFIG_X86_PAE */ #endif /* CONFIG_X86_PAE */
enum pt_level {
PT_PGD,
PT_PUD,
PT_PMD,
PT_PTE
};
/* /*
(Yet another) pagetable walker. This one is intended for pinning a (Yet another) pagetable walker. This one is intended for pinning a
...@@ -316,7 +320,7 @@ pgd_t xen_make_pgd(unsigned long pgd) ...@@ -316,7 +320,7 @@ pgd_t xen_make_pgd(unsigned long pgd)
FIXADDR_TOP. But the important bit is that we don't pin beyond FIXADDR_TOP. But the important bit is that we don't pin beyond
there, because then we start getting into Xen's ptes. there, because then we start getting into Xen's ptes.
*/ */
static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned), static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
unsigned long limit) unsigned long limit)
{ {
pgd_t *pgd = pgd_base; pgd_t *pgd = pgd_base;
...@@ -341,7 +345,7 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned), ...@@ -341,7 +345,7 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
pud = pud_offset(pgd, 0); pud = pud_offset(pgd, 0);
if (PTRS_PER_PUD > 1) /* not folded */ if (PTRS_PER_PUD > 1) /* not folded */
flush |= (*func)(virt_to_page(pud), 0); flush |= (*func)(virt_to_page(pud), PT_PUD);
for (; addr != pud_limit; pud++, addr = pud_next) { for (; addr != pud_limit; pud++, addr = pud_next) {
pmd_t *pmd; pmd_t *pmd;
...@@ -360,7 +364,7 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned), ...@@ -360,7 +364,7 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
pmd = pmd_offset(pud, 0); pmd = pmd_offset(pud, 0);
if (PTRS_PER_PMD > 1) /* not folded */ if (PTRS_PER_PMD > 1) /* not folded */
flush |= (*func)(virt_to_page(pmd), 0); flush |= (*func)(virt_to_page(pmd), PT_PMD);
for (; addr != pmd_limit; pmd++) { for (; addr != pmd_limit; pmd++) {
addr += (PAGE_SIZE * PTRS_PER_PTE); addr += (PAGE_SIZE * PTRS_PER_PTE);
...@@ -372,17 +376,47 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned), ...@@ -372,17 +376,47 @@ static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, unsigned),
if (pmd_none(*pmd)) if (pmd_none(*pmd))
continue; continue;
flush |= (*func)(pmd_page(*pmd), 0); flush |= (*func)(pmd_page(*pmd), PT_PTE);
} }
} }
} }
flush |= (*func)(virt_to_page(pgd_base), UVMF_TLB_FLUSH); flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
return flush; return flush;
} }
static int pin_page(struct page *page, unsigned flags) static spinlock_t *lock_pte(struct page *page)
{
spinlock_t *ptl = NULL;
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
ptl = __pte_lockptr(page);
spin_lock(ptl);
#endif
return ptl;
}
static void do_unlock(void *v)
{
spinlock_t *ptl = v;
spin_unlock(ptl);
}
static void xen_do_pin(unsigned level, unsigned long pfn)
{
struct mmuext_op *op;
struct multicall_space mcs;
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = level;
op->arg1.mfn = pfn_to_mfn(pfn);
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
}
static int pin_page(struct page *page, enum pt_level level)
{ {
unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags); unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
int flush; int flush;
...@@ -397,12 +431,26 @@ static int pin_page(struct page *page, unsigned flags) ...@@ -397,12 +431,26 @@ static int pin_page(struct page *page, unsigned flags)
void *pt = lowmem_page_address(page); void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct multicall_space mcs = __xen_mc_entry(0); struct multicall_space mcs = __xen_mc_entry(0);
spinlock_t *ptl;
flush = 0; flush = 0;
ptl = NULL;
if (level == PT_PTE)
ptl = lock_pte(page);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL_RO), pfn_pte(pfn, PAGE_KERNEL_RO),
flags); level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (level == PT_PTE)
xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
if (ptl) {
/* Queue a deferred unlock for when this batch
is completed. */
xen_mc_callback(do_unlock, ptl);
}
} }
return flush; return flush;
...@@ -413,8 +461,7 @@ static int pin_page(struct page *page, unsigned flags) ...@@ -413,8 +461,7 @@ static int pin_page(struct page *page, unsigned flags)
read-only, and can be pinned. */ read-only, and can be pinned. */
void xen_pgd_pin(pgd_t *pgd) void xen_pgd_pin(pgd_t *pgd)
{ {
struct multicall_space mcs; unsigned level;
struct mmuext_op *op;
xen_mc_batch(); xen_mc_batch();
...@@ -425,16 +472,13 @@ void xen_pgd_pin(pgd_t *pgd) ...@@ -425,16 +472,13 @@ void xen_pgd_pin(pgd_t *pgd)
xen_mc_batch(); xen_mc_batch();
} }
mcs = __xen_mc_entry(sizeof(*op));
op = mcs.args;
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
op->cmd = MMUEXT_PIN_L3_TABLE; level = MMUEXT_PIN_L3_TABLE;
#else #else
op->cmd = MMUEXT_PIN_L2_TABLE; level = MMUEXT_PIN_L2_TABLE;
#endif #endif
op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); xen_do_pin(level, PFN_DOWN(__pa(pgd)));
xen_mc_issue(0); xen_mc_issue(0);
} }
...@@ -442,7 +486,7 @@ void xen_pgd_pin(pgd_t *pgd) ...@@ -442,7 +486,7 @@ void xen_pgd_pin(pgd_t *pgd)
/* The init_mm pagetable is really pinned as soon as its created, but /* The init_mm pagetable is really pinned as soon as its created, but
that's before we have page structures to store the bits. So do all that's before we have page structures to store the bits. So do all
the book-keeping now. */ the book-keeping now. */
static __init int mark_pinned(struct page *page, unsigned flags) static __init int mark_pinned(struct page *page, enum pt_level level)
{ {
SetPagePinned(page); SetPagePinned(page);
return 0; return 0;
...@@ -453,18 +497,32 @@ void __init xen_mark_init_mm_pinned(void) ...@@ -453,18 +497,32 @@ void __init xen_mark_init_mm_pinned(void)
pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP); pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
} }
static int unpin_page(struct page *page, unsigned flags) static int unpin_page(struct page *page, enum pt_level level)
{ {
unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags); unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
if (pgfl && !PageHighMem(page)) { if (pgfl && !PageHighMem(page)) {
void *pt = lowmem_page_address(page); void *pt = lowmem_page_address(page);
unsigned long pfn = page_to_pfn(page); unsigned long pfn = page_to_pfn(page);
struct multicall_space mcs = __xen_mc_entry(0); spinlock_t *ptl = NULL;
struct multicall_space mcs;
if (level == PT_PTE) {
ptl = lock_pte(page);
xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
}
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
pfn_pte(pfn, PAGE_KERNEL), pfn_pte(pfn, PAGE_KERNEL),
flags); level == PT_PGD ? UVMF_TLB_FLUSH : 0);
if (ptl) {
/* unlock when batch completed */
xen_mc_callback(do_unlock, ptl);
}
} }
return 0; /* never need to flush on unpin */ return 0; /* never need to flush on unpin */
...@@ -473,18 +531,9 @@ static int unpin_page(struct page *page, unsigned flags) ...@@ -473,18 +531,9 @@ static int unpin_page(struct page *page, unsigned flags)
/* Release a pagetables pages back as normal RW */ /* Release a pagetables pages back as normal RW */
static void xen_pgd_unpin(pgd_t *pgd) static void xen_pgd_unpin(pgd_t *pgd)
{ {
struct mmuext_op *op;
struct multicall_space mcs;
xen_mc_batch(); xen_mc_batch();
mcs = __xen_mc_entry(sizeof(*op)); xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
op = mcs.args;
op->cmd = MMUEXT_UNPIN_TABLE;
op->arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(pgd)));
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
pgd_walk(pgd, unpin_page, TASK_SIZE); pgd_walk(pgd, unpin_page, TASK_SIZE);
...@@ -515,20 +564,43 @@ static void drop_other_mm_ref(void *info) ...@@ -515,20 +564,43 @@ static void drop_other_mm_ref(void *info)
if (__get_cpu_var(cpu_tlbstate).active_mm == mm) if (__get_cpu_var(cpu_tlbstate).active_mm == mm)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
/* If this cpu still has a stale cr3 reference, then make sure
it has been flushed. */
if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
load_cr3(swapper_pg_dir);
arch_flush_lazy_cpu_mode();
}
} }
static void drop_mm_ref(struct mm_struct *mm) static void drop_mm_ref(struct mm_struct *mm)
{ {
cpumask_t mask;
unsigned cpu;
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm == mm) if (current->mm == mm)
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
else else
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
arch_flush_lazy_cpu_mode();
} }
if (!cpus_empty(mm->cpu_vm_mask)) /* Get the "official" set of cpus referring to our pagetable. */
xen_smp_call_function_mask(mm->cpu_vm_mask, drop_other_mm_ref, mask = mm->cpu_vm_mask;
mm, 1);
/* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed
its set of pending hypercalls yet. In this case, we can
look at its actual current cr3 value, and force it to flush
if needed. */
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpu_set(cpu, mask);
}
if (!cpus_empty(mask))
xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
} }
#else #else
static void drop_mm_ref(struct mm_struct *mm) static void drop_mm_ref(struct mm_struct *mm)
...@@ -563,5 +635,6 @@ void xen_exit_mmap(struct mm_struct *mm) ...@@ -563,5 +635,6 @@ void xen_exit_mmap(struct mm_struct *mm)
/* pgd may not be pinned in the error exit path of execve */ /* pgd may not be pinned in the error exit path of execve */
if (PagePinned(virt_to_page(mm->pgd))) if (PagePinned(virt_to_page(mm->pgd)))
xen_pgd_unpin(mm->pgd); xen_pgd_unpin(mm->pgd);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
} }
...@@ -26,13 +26,22 @@ ...@@ -26,13 +26,22 @@
#include "multicalls.h" #include "multicalls.h"
#define MC_DEBUG 1
#define MC_BATCH 32 #define MC_BATCH 32
#define MC_ARGS (MC_BATCH * 16 / sizeof(u64)) #define MC_ARGS (MC_BATCH * 16 / sizeof(u64))
struct mc_buffer { struct mc_buffer {
struct multicall_entry entries[MC_BATCH]; struct multicall_entry entries[MC_BATCH];
#if MC_DEBUG
struct multicall_entry debug[MC_BATCH];
#endif
u64 args[MC_ARGS]; u64 args[MC_ARGS];
unsigned mcidx, argidx; struct callback {
void (*fn)(void *);
void *data;
} callbacks[MC_BATCH];
unsigned mcidx, argidx, cbidx;
}; };
static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); static DEFINE_PER_CPU(struct mc_buffer, mc_buffer);
...@@ -43,6 +52,7 @@ void xen_mc_flush(void) ...@@ -43,6 +52,7 @@ void xen_mc_flush(void)
struct mc_buffer *b = &__get_cpu_var(mc_buffer); struct mc_buffer *b = &__get_cpu_var(mc_buffer);
int ret = 0; int ret = 0;
unsigned long flags; unsigned long flags;
int i;
BUG_ON(preemptible()); BUG_ON(preemptible());
...@@ -51,13 +61,31 @@ void xen_mc_flush(void) ...@@ -51,13 +61,31 @@ void xen_mc_flush(void)
local_irq_save(flags); local_irq_save(flags);
if (b->mcidx) { if (b->mcidx) {
int i; #if MC_DEBUG
memcpy(b->debug, b->entries,
b->mcidx * sizeof(struct multicall_entry));
#endif
if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
BUG(); BUG();
for (i = 0; i < b->mcidx; i++) for (i = 0; i < b->mcidx; i++)
if (b->entries[i].result < 0) if (b->entries[i].result < 0)
ret++; ret++;
#if MC_DEBUG
if (ret) {
printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
ret, smp_processor_id());
for(i = 0; i < b->mcidx; i++) {
printk(" call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
i+1, b->mcidx,
b->debug[i].op,
b->debug[i].args[0],
b->entries[i].result);
}
}
#endif
b->mcidx = 0; b->mcidx = 0;
b->argidx = 0; b->argidx = 0;
} else } else
...@@ -65,6 +93,13 @@ void xen_mc_flush(void) ...@@ -65,6 +93,13 @@ void xen_mc_flush(void)
local_irq_restore(flags); local_irq_restore(flags);
for(i = 0; i < b->cbidx; i++) {
struct callback *cb = &b->callbacks[i];
(*cb->fn)(cb->data);
}
b->cbidx = 0;
BUG_ON(ret); BUG_ON(ret);
} }
...@@ -88,3 +123,16 @@ struct multicall_space __xen_mc_entry(size_t args) ...@@ -88,3 +123,16 @@ struct multicall_space __xen_mc_entry(size_t args)
return ret; return ret;
} }
void xen_mc_callback(void (*fn)(void *), void *data)
{
struct mc_buffer *b = &__get_cpu_var(mc_buffer);
struct callback *cb;
if (b->cbidx == MC_BATCH)
xen_mc_flush();
cb = &b->callbacks[b->cbidx++];
cb->fn = fn;
cb->data = data;
}
...@@ -35,11 +35,14 @@ void xen_mc_flush(void); ...@@ -35,11 +35,14 @@ void xen_mc_flush(void);
/* Issue a multicall if we're not in a lazy mode */ /* Issue a multicall if we're not in a lazy mode */
static inline void xen_mc_issue(unsigned mode) static inline void xen_mc_issue(unsigned mode)
{ {
if ((xen_get_lazy_mode() & mode) == 0) if ((paravirt_get_lazy_mode() & mode) == 0)
xen_mc_flush(); xen_mc_flush();
/* restore flags saved in xen_mc_batch */ /* restore flags saved in xen_mc_batch */
local_irq_restore(x86_read_percpu(xen_mc_irq_flags)); local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
} }
/* Set up a callback to be called when the current batch is flushed */
void xen_mc_callback(void (*fn)(void *), void *data);
#endif /* _XEN_MULTICALLS_H */ #endif /* _XEN_MULTICALLS_H */
...@@ -370,7 +370,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), ...@@ -370,7 +370,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait) void *info, int wait)
{ {
struct call_data_struct data; struct call_data_struct data;
int cpus; int cpus, cpu;
bool yield;
/* Holding any lock stops cpus from going down. */ /* Holding any lock stops cpus from going down. */
spin_lock(&call_lock); spin_lock(&call_lock);
...@@ -399,9 +400,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), ...@@ -399,9 +400,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
/* Send a message to other CPUs and wait for them to respond */ /* Send a message to other CPUs and wait for them to respond */
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run. /* Make sure other vcpus get a chance to run if they need to. */
XXX too severe? Maybe we should check the other CPU's states? */ yield = false;
HYPERVISOR_sched_op(SCHEDOP_yield, 0); for_each_cpu_mask(cpu, mask)
if (xen_vcpu_stolen(cpu))
yield = true;
if (yield)
HYPERVISOR_sched_op(SCHEDOP_yield, 0);
/* Wait for response */ /* Wait for response */
while (atomic_read(&data.started) != cpus || while (atomic_read(&data.started) != cpus ||
......
...@@ -105,6 +105,12 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res) ...@@ -105,6 +105,12 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
} while (get64(&state->state_entry_time) != state_time); } while (get64(&state->state_entry_time) != state_time);
} }
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
}
static void setup_runstate_info(int cpu) static void setup_runstate_info(int cpu)
{ {
struct vcpu_register_runstate_memory_area area; struct vcpu_register_runstate_memory_area area;
......
...@@ -11,6 +11,7 @@ void xen_copy_trap_info(struct trap_info *traps); ...@@ -11,6 +11,7 @@ void xen_copy_trap_info(struct trap_info *traps);
DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
DECLARE_PER_CPU(unsigned long, xen_cr3); DECLARE_PER_CPU(unsigned long, xen_cr3);
DECLARE_PER_CPU(unsigned long, xen_current_cr3);
extern struct start_info *xen_start_info; extern struct start_info *xen_start_info;
extern struct shared_info *HYPERVISOR_shared_info; extern struct shared_info *HYPERVISOR_shared_info;
...@@ -27,14 +28,9 @@ unsigned long xen_get_wallclock(void); ...@@ -27,14 +28,9 @@ unsigned long xen_get_wallclock(void);
int xen_set_wallclock(unsigned long time); int xen_set_wallclock(unsigned long time);
unsigned long long xen_sched_clock(void); unsigned long long xen_sched_clock(void);
void xen_mark_init_mm_pinned(void); bool xen_vcpu_stolen(int vcpu);
DECLARE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode);
static inline unsigned xen_get_lazy_mode(void) void xen_mark_init_mm_pinned(void);
{
return x86_read_percpu(xen_lazy_mode);
}
void __init xen_fill_possible_map(void); void __init xen_fill_possible_map(void);
......
...@@ -115,7 +115,7 @@ static struct hv_ops lguest_cons = { ...@@ -115,7 +115,7 @@ static struct hv_ops lguest_cons = {
* (0), and the struct hv_ops containing the put_chars() function. */ * (0), and the struct hv_ops containing the put_chars() function. */
static int __init cons_init(void) static int __init cons_init(void)
{ {
if (strcmp(paravirt_ops.name, "lguest") != 0) if (strcmp(pv_info.name, "lguest") != 0)
return 0; return 0;
return hvc_instantiate(0, 0, &lguest_cons); return hvc_instantiate(0, 0, &lguest_cons);
......
...@@ -248,8 +248,8 @@ static void unmap_switcher(void) ...@@ -248,8 +248,8 @@ static void unmap_switcher(void)
} }
/*H:130 Our Guest is usually so well behaved; it never tries to do things it /*H:130 Our Guest is usually so well behaved; it never tries to do things it
* isn't allowed to. Unfortunately, "struct paravirt_ops" isn't quite * isn't allowed to. Unfortunately, Linux's paravirtual infrastructure isn't
* complete, because it doesn't contain replacements for the Intel I/O * quite complete, because it doesn't contain replacements for the Intel I/O
* instructions. As a result, the Guest sometimes fumbles across one during * instructions. As a result, the Guest sometimes fumbles across one during
* the boot process as it probes for various things which are usually attached * the boot process as it probes for various things which are usually attached
* to a PC. * to a PC.
...@@ -694,7 +694,7 @@ static int __init init(void) ...@@ -694,7 +694,7 @@ static int __init init(void)
/* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */ /* Lguest can't run under Xen, VMI or itself. It does Tricky Stuff. */
if (paravirt_enabled()) { if (paravirt_enabled()) {
printk("lguest is afraid of %s\n", paravirt_ops.name); printk("lguest is afraid of %s\n", pv_info.name);
return -EPERM; return -EPERM;
} }
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
* *
* So how does the kernel know it's a Guest? The Guest starts at a special * So how does the kernel know it's a Guest? The Guest starts at a special
* entry point marked with a magic string, which sets up a few things then * entry point marked with a magic string, which sets up a few things then
* calls here. We replace the native functions in "struct paravirt_ops" * calls here. We replace the native functions various "paravirt" structures
* with our Guest versions, then boot like normal. :*/ * with our Guest versions, then boot like normal. :*/
/* /*
...@@ -97,29 +97,17 @@ static cycle_t clock_base; ...@@ -97,29 +97,17 @@ static cycle_t clock_base;
* them as a batch when lazy_mode is eventually turned off. Because hypercalls * them as a batch when lazy_mode is eventually turned off. Because hypercalls
* are reasonably expensive, batching them up makes sense. For example, a * are reasonably expensive, batching them up makes sense. For example, a
* large mmap might update dozens of page table entries: that code calls * large mmap might update dozens of page table entries: that code calls
* lguest_lazy_mode(PARAVIRT_LAZY_MMU), does the dozen updates, then calls * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
* lguest_lazy_mode(PARAVIRT_LAZY_NONE). * lguest_leave_lazy_mode().
* *
* So, when we're in lazy mode, we call async_hypercall() to store the call for * So, when we're in lazy mode, we call async_hypercall() to store the call for
* future processing. When lazy mode is turned off we issue a hypercall to * future processing. When lazy mode is turned off we issue a hypercall to
* flush the stored calls. * flush the stored calls.
* */
* There's also a hack where "mode" is set to "PARAVIRT_LAZY_FLUSH" which static void lguest_leave_lazy_mode(void)
* indicates we're to flush any outstanding calls immediately. This is used
* when an interrupt handler does a kmap_atomic(): the page table changes must
* happen immediately even if we're in the middle of a batch. Usually we're
* not, though, so there's nothing to do. */
static enum paravirt_lazy_mode lazy_mode; /* Note: not SMP-safe! */
static void lguest_lazy_mode(enum paravirt_lazy_mode mode)
{ {
if (mode == PARAVIRT_LAZY_FLUSH) { paravirt_leave_lazy(paravirt_get_lazy_mode());
if (unlikely(lazy_mode != PARAVIRT_LAZY_NONE)) hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
} else {
lazy_mode = mode;
if (mode == PARAVIRT_LAZY_NONE)
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
}
} }
static void lazy_hcall(unsigned long call, static void lazy_hcall(unsigned long call,
...@@ -127,7 +115,7 @@ static void lazy_hcall(unsigned long call, ...@@ -127,7 +115,7 @@ static void lazy_hcall(unsigned long call,
unsigned long arg2, unsigned long arg2,
unsigned long arg3) unsigned long arg3)
{ {
if (lazy_mode == PARAVIRT_LAZY_NONE) if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
hcall(call, arg1, arg2, arg3); hcall(call, arg1, arg2, arg3);
else else
async_hcall(call, arg1, arg2, arg3); async_hcall(call, arg1, arg2, arg3);
...@@ -331,7 +319,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu) ...@@ -331,7 +319,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
} }
/*G:038 That's enough excitement for now, back to ploughing through each of /*G:038 That's enough excitement for now, back to ploughing through each of
* the paravirt_ops (we're about 1/3 of the way through). * the different pv_ops structures (we're about 1/3 of the way through).
* *
* This is the Local Descriptor Table, another weird Intel thingy. Linux only * This is the Local Descriptor Table, another weird Intel thingy. Linux only
* uses this for some strange applications like Wine. We don't do anything * uses this for some strange applications like Wine. We don't do anything
...@@ -558,7 +546,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval) ...@@ -558,7 +546,7 @@ static void lguest_set_pte(pte_t *ptep, pte_t pteval)
lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0); lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
} }
/* Unfortunately for Lguest, the paravirt_ops for page tables were based on /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
* native page table operations. On native hardware you can set a new page * native page table operations. On native hardware you can set a new page
* table entry whenever you want, but if you want to remove one you have to do * table entry whenever you want, but if you want to remove one you have to do
* a TLB flush (a TLB is a little cache of page table entries kept by the CPU). * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
...@@ -782,7 +770,7 @@ static void lguest_time_init(void) ...@@ -782,7 +770,7 @@ static void lguest_time_init(void)
clocksource_register(&lguest_clock); clocksource_register(&lguest_clock);
/* Now we've set up our clock, we can use it as the scheduler clock */ /* Now we've set up our clock, we can use it as the scheduler clock */
paravirt_ops.sched_clock = lguest_sched_clock; pv_time_ops.sched_clock = lguest_sched_clock;
/* We can't set cpumask in the initializer: damn C limitations! Set it /* We can't set cpumask in the initializer: damn C limitations! Set it
* here and register our timer device. */ * here and register our timer device. */
...@@ -904,7 +892,7 @@ static __init char *lguest_memory_setup(void) ...@@ -904,7 +892,7 @@ static __init char *lguest_memory_setup(void)
/*G:050 /*G:050
* Patching (Powerfully Placating Performance Pedants) * Patching (Powerfully Placating Performance Pedants)
* *
* We have already seen that "struct paravirt_ops" lets us replace simple * We have already seen that pv_ops structures let us replace simple
* native instructions with calls to the appropriate back end all throughout * native instructions with calls to the appropriate back end all throughout
* the kernel. This allows the same kernel to run as a Guest and as a native * the kernel. This allows the same kernel to run as a Guest and as a native
* kernel, but it's slow because of all the indirect branches. * kernel, but it's slow because of all the indirect branches.
...@@ -929,10 +917,10 @@ static const struct lguest_insns ...@@ -929,10 +917,10 @@ static const struct lguest_insns
{ {
const char *start, *end; const char *start, *end;
} lguest_insns[] = { } lguest_insns[] = {
[PARAVIRT_PATCH(irq_disable)] = { lgstart_cli, lgend_cli }, [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
[PARAVIRT_PATCH(irq_enable)] = { lgstart_sti, lgend_sti }, [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
[PARAVIRT_PATCH(restore_fl)] = { lgstart_popf, lgend_popf }, [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
[PARAVIRT_PATCH(save_fl)] = { lgstart_pushf, lgend_pushf }, [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
}; };
/* Now our patch routine is fairly simple (based on the native one in /* Now our patch routine is fairly simple (based on the native one in
...@@ -959,9 +947,9 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf, ...@@ -959,9 +947,9 @@ static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
return insn_len; return insn_len;
} }
/*G:030 Once we get to lguest_init(), we know we're a Guest. The paravirt_ops /*G:030 Once we get to lguest_init(), we know we're a Guest. The pv_ops
* structure in the kernel provides a single point for (almost) every routine * structures in the kernel provide points for (almost) every routine we have
* we have to override to avoid privileged instructions. */ * to override to avoid privileged instructions. */
__init void lguest_init(void *boot) __init void lguest_init(void *boot)
{ {
/* Copy boot parameters first: the Launcher put the physical location /* Copy boot parameters first: the Launcher put the physical location
...@@ -976,54 +964,70 @@ __init void lguest_init(void *boot) ...@@ -976,54 +964,70 @@ __init void lguest_init(void *boot)
/* We're under lguest, paravirt is enabled, and we're running at /* We're under lguest, paravirt is enabled, and we're running at
* privilege level 1, not 0 as normal. */ * privilege level 1, not 0 as normal. */
paravirt_ops.name = "lguest"; pv_info.name = "lguest";
paravirt_ops.paravirt_enabled = 1; pv_info.paravirt_enabled = 1;
paravirt_ops.kernel_rpl = 1; pv_info.kernel_rpl = 1;
/* We set up all the lguest overrides for sensitive operations. These /* We set up all the lguest overrides for sensitive operations. These
* are detailed with the operations themselves. */ * are detailed with the operations themselves. */
paravirt_ops.save_fl = save_fl;
paravirt_ops.restore_fl = restore_fl; /* interrupt-related operations */
paravirt_ops.irq_disable = irq_disable; pv_irq_ops.init_IRQ = lguest_init_IRQ;
paravirt_ops.irq_enable = irq_enable; pv_irq_ops.save_fl = save_fl;
paravirt_ops.load_gdt = lguest_load_gdt; pv_irq_ops.restore_fl = restore_fl;
paravirt_ops.memory_setup = lguest_memory_setup; pv_irq_ops.irq_disable = irq_disable;
paravirt_ops.cpuid = lguest_cpuid; pv_irq_ops.irq_enable = irq_enable;
paravirt_ops.write_cr3 = lguest_write_cr3; pv_irq_ops.safe_halt = lguest_safe_halt;
paravirt_ops.flush_tlb_user = lguest_flush_tlb_user;
paravirt_ops.flush_tlb_single = lguest_flush_tlb_single; /* init-time operations */
paravirt_ops.flush_tlb_kernel = lguest_flush_tlb_kernel; pv_init_ops.memory_setup = lguest_memory_setup;
paravirt_ops.set_pte = lguest_set_pte; pv_init_ops.patch = lguest_patch;
paravirt_ops.set_pte_at = lguest_set_pte_at;
paravirt_ops.set_pmd = lguest_set_pmd; /* Intercepts of various cpu instructions */
pv_cpu_ops.load_gdt = lguest_load_gdt;
pv_cpu_ops.cpuid = lguest_cpuid;
pv_cpu_ops.load_idt = lguest_load_idt;
pv_cpu_ops.iret = lguest_iret;
pv_cpu_ops.load_esp0 = lguest_load_esp0;
pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
pv_cpu_ops.set_ldt = lguest_set_ldt;
pv_cpu_ops.load_tls = lguest_load_tls;
pv_cpu_ops.set_debugreg = lguest_set_debugreg;
pv_cpu_ops.clts = lguest_clts;
pv_cpu_ops.read_cr0 = lguest_read_cr0;
pv_cpu_ops.write_cr0 = lguest_write_cr0;
pv_cpu_ops.read_cr4 = lguest_read_cr4;
pv_cpu_ops.write_cr4 = lguest_write_cr4;
pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
pv_cpu_ops.wbinvd = lguest_wbinvd;
pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
/* pagetable management */
pv_mmu_ops.write_cr3 = lguest_write_cr3;
pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
pv_mmu_ops.set_pte = lguest_set_pte;
pv_mmu_ops.set_pte_at = lguest_set_pte_at;
pv_mmu_ops.set_pmd = lguest_set_pmd;
pv_mmu_ops.read_cr2 = lguest_read_cr2;
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
paravirt_ops.apic_write = lguest_apic_write; /* apic read/write intercepts */
paravirt_ops.apic_write_atomic = lguest_apic_write; pv_apic_ops.apic_write = lguest_apic_write;
paravirt_ops.apic_read = lguest_apic_read; pv_apic_ops.apic_write_atomic = lguest_apic_write;
pv_apic_ops.apic_read = lguest_apic_read;
#endif #endif
paravirt_ops.load_idt = lguest_load_idt;
paravirt_ops.iret = lguest_iret; /* time operations */
paravirt_ops.load_esp0 = lguest_load_esp0; pv_time_ops.get_wallclock = lguest_get_wallclock;
paravirt_ops.load_tr_desc = lguest_load_tr_desc; pv_time_ops.time_init = lguest_time_init;
paravirt_ops.set_ldt = lguest_set_ldt;
paravirt_ops.load_tls = lguest_load_tls;
paravirt_ops.set_debugreg = lguest_set_debugreg;
paravirt_ops.clts = lguest_clts;
paravirt_ops.read_cr0 = lguest_read_cr0;
paravirt_ops.write_cr0 = lguest_write_cr0;
paravirt_ops.init_IRQ = lguest_init_IRQ;
paravirt_ops.read_cr2 = lguest_read_cr2;
paravirt_ops.read_cr3 = lguest_read_cr3;
paravirt_ops.read_cr4 = lguest_read_cr4;
paravirt_ops.write_cr4 = lguest_write_cr4;
paravirt_ops.write_gdt_entry = lguest_write_gdt_entry;
paravirt_ops.write_idt_entry = lguest_write_idt_entry;
paravirt_ops.patch = lguest_patch;
paravirt_ops.safe_halt = lguest_safe_halt;
paravirt_ops.get_wallclock = lguest_get_wallclock;
paravirt_ops.time_init = lguest_time_init;
paravirt_ops.set_lazy_mode = lguest_lazy_mode;
paravirt_ops.wbinvd = lguest_wbinvd;
/* Now is a good time to look at the implementations of these functions /* Now is a good time to look at the implementations of these functions
* before returning to the rest of lguest_init(). */ * before returning to the rest of lguest_init(). */
......
...@@ -201,7 +201,7 @@ static void scan_devices(void) ...@@ -201,7 +201,7 @@ static void scan_devices(void)
* "struct lguest_device_desc" array. */ * "struct lguest_device_desc" array. */
static int __init lguest_bus_init(void) static int __init lguest_bus_init(void)
{ {
if (strcmp(paravirt_ops.name, "lguest") != 0) if (strcmp(pv_info.name, "lguest") != 0)
return 0; return 0;
/* Devices are in a single page above top of "normal" mem */ /* Devices are in a single page above top of "normal" mem */
......
This diff is collapsed.
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _I386_PGTABLE_3LEVEL_DEFS_H #define _I386_PGTABLE_3LEVEL_DEFS_H
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd) #define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
#else #else
#define SHARED_KERNEL_PMD 1 #define SHARED_KERNEL_PMD 1
#endif #endif
......
...@@ -160,8 +160,9 @@ struct vcpu_set_singleshot_timer { ...@@ -160,8 +160,9 @@ struct vcpu_set_singleshot_timer {
*/ */
#define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */ #define VCPUOP_register_vcpu_info 10 /* arg == struct vcpu_info */
struct vcpu_register_vcpu_info { struct vcpu_register_vcpu_info {
uint32_t mfn; /* mfn of page to place vcpu_info */ uint64_t mfn; /* mfn of page to place vcpu_info */
uint32_t offset; /* offset within page */ uint32_t offset; /* offset within page */
uint32_t rsvd; /* unused */
}; };
#endif /* __XEN_PUBLIC_VCPU_H__ */ #endif /* __XEN_PUBLIC_VCPU_H__ */
...@@ -155,7 +155,6 @@ config SPLIT_PTLOCK_CPUS ...@@ -155,7 +155,6 @@ config SPLIT_PTLOCK_CPUS
int int
default "4096" if ARM && !CPU_CACHE_VIPT default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && !PA20 default "4096" if PARISC && !PA20
default "4096" if XEN
default "4" default "4"
# #
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment