Commit bdccc4ed authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'stable/for-linus-3.20-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen features and fixes from David Vrabel:

 - Reworked handling for foreign (grant mapped) pages to simplify the
   code, enable a number of additional use cases and fix a number of
   long-standing bugs.

 - Prefer the TSC over the Xen PV clock when dom0 (and the TSC is
   stable).

 - Assorted other cleanup and minor bug fixes.

* tag 'stable/for-linus-3.20-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (25 commits)
  xen/manage: Fix USB interaction issues when resuming
  xenbus: Add proper handling of XS_ERROR from Xenbus for transactions.
  xen/gntdev: provide find_special_page VMA operation
  xen/gntdev: mark userspace PTEs as special on x86 PV guests
  xen-blkback: safely unmap grants in case they are still in use
  xen/gntdev: safely unmap grants in case they are still in use
  xen/gntdev: convert priv->lock to a mutex
  xen/grant-table: add a mechanism to safely unmap pages that are in use
  xen-netback: use foreign page information from the pages themselves
  xen: mark grant mapped pages as foreign
  xen/grant-table: add helpers for allocating pages
  x86/xen: require ballooned pages for grant maps
  xen: remove scratch frames for ballooned pages and m2p override
  xen/grant-table: pre-populate kernel unmap ops for xen_gnttab_unmap_refs()
  mm: add 'foreign' alias for the 'pinned' page flag
  mm: provide a find_special_page vma operation
  x86/xen: cleanup arch/x86/xen/mmu.c
  x86/xen: add some __init annotations in arch/x86/xen/mmu.c
  x86/xen: add some __init and static annotations in arch/x86/xen/setup.c
  x86/xen: use correct types for addresses in arch/x86/xen/setup.c
  ...
parents 98368ab4 72978b2f
...@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -92,7 +92,7 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
......
...@@ -29,10 +29,10 @@ ...@@ -29,10 +29,10 @@
struct start_info _xen_start_info; struct start_info _xen_start_info;
struct start_info *xen_start_info = &_xen_start_info; struct start_info *xen_start_info = &_xen_start_info;
EXPORT_SYMBOL_GPL(xen_start_info); EXPORT_SYMBOL(xen_start_info);
enum xen_domain_type xen_domain_type = XEN_NATIVE; enum xen_domain_type xen_domain_type = XEN_NATIVE;
EXPORT_SYMBOL_GPL(xen_domain_type); EXPORT_SYMBOL(xen_domain_type);
struct shared_info xen_dummy_shared_info; struct shared_info xen_dummy_shared_info;
struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
......
...@@ -149,7 +149,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) ...@@ -149,7 +149,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
struct dma_map_ops *xen_dma_ops; struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL_GPL(xen_dma_ops); EXPORT_SYMBOL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = { static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error, .mapping_error = xen_swiotlb_dma_mapping_error,
......
...@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -102,7 +102,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count)
{ {
int i; int i;
......
...@@ -55,9 +55,8 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -55,9 +55,8 @@ extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
/* /*
* Helper functions to write or read unsigned long values to/from * Helper functions to write or read unsigned long values to/from
...@@ -154,21 +153,12 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) ...@@ -154,21 +153,12 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
return mfn; return mfn;
pfn = mfn_to_pfn_no_overrides(mfn); pfn = mfn_to_pfn_no_overrides(mfn);
if (__pfn_to_mfn(pfn) != mfn) { if (__pfn_to_mfn(pfn) != mfn)
/* pfn = ~0;
* If this appears to be a foreign mfn (because the pfn
* doesn't map back to the mfn), then check the local override
* table to see if there's a better pfn to use.
*
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
*/
pfn = m2p_find_override_pfn(mfn, ~0);
}
/* /*
* pfn is ~0 if there are no entries in the m2p for mfn or if the * pfn is ~0 if there are no entries in the m2p for mfn or the
* entry doesn't map back to the mfn and m2p_override doesn't have a * entry doesn't map back to the mfn.
* valid entry for it.
*/ */
if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn)) if (pfn == ~0 && __pfn_to_mfn(mfn) == IDENTITY_FRAME(mfn))
pfn = mfn; pfn = mfn;
......
...@@ -1489,7 +1489,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) ...@@ -1489,7 +1489,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
native_set_pte(ptep, pte); native_set_pte(ptep, pte);
} }
static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
{ {
struct mmuext_op op; struct mmuext_op op;
op.cmd = cmd; op.cmd = cmd;
...@@ -1657,7 +1657,7 @@ void __init xen_reserve_top(void) ...@@ -1657,7 +1657,7 @@ void __init xen_reserve_top(void)
* Like __va(), but returns address in the kernel mapping (which is * Like __va(), but returns address in the kernel mapping (which is
* all we have until the physical memory mapping has been set up. * all we have until the physical memory mapping has been set up.
*/ */
static void *__ka(phys_addr_t paddr) static void * __init __ka(phys_addr_t paddr)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return (void *)(paddr + __START_KERNEL_map); return (void *)(paddr + __START_KERNEL_map);
...@@ -1667,7 +1667,7 @@ static void *__ka(phys_addr_t paddr) ...@@ -1667,7 +1667,7 @@ static void *__ka(phys_addr_t paddr)
} }
/* Convert a machine address to physical address */ /* Convert a machine address to physical address */
static unsigned long m2p(phys_addr_t maddr) static unsigned long __init m2p(phys_addr_t maddr)
{ {
phys_addr_t paddr; phys_addr_t paddr;
...@@ -1678,13 +1678,14 @@ static unsigned long m2p(phys_addr_t maddr) ...@@ -1678,13 +1678,14 @@ static unsigned long m2p(phys_addr_t maddr)
} }
/* Convert a machine address to kernel virtual */ /* Convert a machine address to kernel virtual */
static void *m2v(phys_addr_t maddr) static void * __init m2v(phys_addr_t maddr)
{ {
return __ka(m2p(maddr)); return __ka(m2p(maddr));
} }
/* Set the page permissions on an identity-mapped pages */ /* Set the page permissions on an identity-mapped pages */
static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) static void __init set_page_prot_flags(void *addr, pgprot_t prot,
unsigned long flags)
{ {
unsigned long pfn = __pa(addr) >> PAGE_SHIFT; unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot); pte_t pte = pfn_pte(pfn, prot);
...@@ -1696,7 +1697,7 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) ...@@ -1696,7 +1697,7 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG(); BUG();
} }
static void set_page_prot(void *addr, pgprot_t prot) static void __init set_page_prot(void *addr, pgprot_t prot)
{ {
return set_page_prot_flags(addr, prot, UVMF_NONE); return set_page_prot_flags(addr, prot, UVMF_NONE);
} }
...@@ -1733,10 +1734,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) ...@@ -1733,10 +1734,8 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
pte_t pte; pte_t pte;
#ifdef CONFIG_X86_32
if (pfn > max_pfn_mapped) if (pfn > max_pfn_mapped)
max_pfn_mapped = pfn; max_pfn_mapped = pfn;
#endif
if (!pte_none(pte_page[pteidx])) if (!pte_none(pte_page[pteidx]))
continue; continue;
...@@ -1769,7 +1768,7 @@ void __init xen_setup_machphys_mapping(void) ...@@ -1769,7 +1768,7 @@ void __init xen_setup_machphys_mapping(void)
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static void convert_pfn_mfn(void *v) static void __init convert_pfn_mfn(void *v)
{ {
pte_t *pte = v; pte_t *pte = v;
int i; int i;
......
...@@ -84,8 +84,6 @@ ...@@ -84,8 +84,6 @@
#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE) #define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE)
static void __init m2p_override_init(void);
unsigned long *xen_p2m_addr __read_mostly; unsigned long *xen_p2m_addr __read_mostly;
EXPORT_SYMBOL_GPL(xen_p2m_addr); EXPORT_SYMBOL_GPL(xen_p2m_addr);
unsigned long xen_p2m_size __read_mostly; unsigned long xen_p2m_size __read_mostly;
...@@ -402,8 +400,6 @@ void __init xen_vmalloc_p2m_tree(void) ...@@ -402,8 +400,6 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_size = xen_max_p2m_pfn; xen_p2m_size = xen_max_p2m_pfn;
xen_inv_extra_mem(); xen_inv_extra_mem();
m2p_override_init();
} }
unsigned long get_phys_to_machine(unsigned long pfn) unsigned long get_phys_to_machine(unsigned long pfn)
...@@ -652,100 +648,21 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) ...@@ -652,100 +648,21 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return true; return true;
} }
#define M2P_OVERRIDE_HASH_SHIFT 10
#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
static struct list_head *m2p_overrides;
static DEFINE_SPINLOCK(m2p_override_lock);
static void __init m2p_override_init(void)
{
unsigned i;
m2p_overrides = alloc_bootmem_align(
sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
sizeof(unsigned long));
for (i = 0; i < M2P_OVERRIDE_HASH; i++)
INIT_LIST_HEAD(&m2p_overrides[i]);
}
static unsigned long mfn_hash(unsigned long mfn)
{
return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
}
/* Add an MFN override for a particular page */
static int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long pfn;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT);
ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K,
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs =
xen_mc_entry(sizeof(*kmap_op));
MULTI_grant_table_op(mcs.mc,
GNTTABOP_map_grant_ref, kmap_op, 1);
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
}
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags);
/* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
* this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
* pfn so that the following mfn_to_pfn(mfn) calls will return the
* pfn from the m2p_override (the backend pfn) instead.
* We need to do this because the pages shared by the frontend
* (xen-blkfront) can be already locked (lock_page, called by
* do_read_cache_page); when the userspace backend tries to use them
* with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
* do_blockdev_direct_IO is going to try to lock the same pages
* again resulting in a deadlock.
* As a side effect get_user_pages_fast might not be safe on the
* frontend pages while they are being shared with the backend,
* because mfn_to_pfn (that ends up being called by GUPF) will
* return the backend pfn rather than the frontend pfn. */
pfn = mfn_to_pfn_no_overrides(mfn);
if (__pfn_to_mfn(pfn) == mfn)
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
return 0;
}
int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count)
{ {
int i, ret = 0; int i, ret = 0;
bool lazy = false;
pte_t *pte; pte_t *pte;
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return 0; return 0;
if (kmap_ops && if (kmap_ops) {
!in_interrupt() && ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { kmap_ops, count);
arch_enter_lazy_mmu_mode(); if (ret)
lazy = true; goto out;
} }
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
...@@ -764,170 +681,28 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, ...@@ -764,170 +681,28 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
} }
pfn = page_to_pfn(pages[i]); pfn = page_to_pfn(pages[i]);
WARN_ON(PagePrivate(pages[i])); WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
SetPagePrivate(pages[i]);
set_page_private(pages[i], mfn);
pages[i]->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
if (kmap_ops) {
ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
if (ret)
goto out;
}
} }
out: out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping); EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
static struct page *m2p_find_override(unsigned long mfn)
{
unsigned long flags;
struct list_head *bucket;
struct page *p, *ret;
if (unlikely(!m2p_overrides))
return NULL;
ret = NULL;
bucket = &m2p_overrides[mfn_hash(mfn)];
spin_lock_irqsave(&m2p_override_lock, flags);
list_for_each_entry(p, bucket, lru) {
if (page_private(p) == mfn) {
ret = p;
break;
}
}
spin_unlock_irqrestore(&m2p_override_lock, flags);
return ret;
}
static int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op,
unsigned long mfn)
{
unsigned long flags;
unsigned long pfn;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT);
ptep = lookup_address(address, &level);
if (WARN(ptep == NULL || level != PG_LEVEL_4K,
"m2p_remove_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
struct gnttab_unmap_and_replace *unmap_op;
struct page *scratch_page = get_balloon_scratch_page();
unsigned long scratch_page_address = (unsigned long)
__va(page_to_pfn(scratch_page) << PAGE_SHIFT);
/*
* It might be that we queued all the m2p grant table
* hypercalls in a multicall, then m2p_remove_override
* get called before the multicall has actually been
* issued. In this case handle is going to -1 because
* it hasn't been modified yet.
*/
if (kmap_op->handle == -1)
xen_mc_flush();
/*
* Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error.
*/
if (kmap_op->handle == GNTST_general_error) {
pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
put_balloon_scratch_page();
return -1;
}
xen_mc_batch();
mcs = __xen_mc_entry(
sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr;
unmap_op->new_addr = scratch_page_address;
unmap_op->handle = kmap_op->handle;
MULTI_grant_table_op(mcs.mc,
GNTTABOP_unmap_and_replace, unmap_op, 1);
mcs = __xen_mc_entry(0);
MULTI_update_va_mapping(mcs.mc, scratch_page_address,
pfn_pte(page_to_pfn(scratch_page),
PAGE_KERNEL_RO), 0);
xen_mc_issue(PARAVIRT_LAZY_MMU);
kmap_op->host_addr = 0;
put_balloon_scratch_page();
}
}
/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
* somewhere in this domain, even before being added to the
* m2p_override (see comment above in m2p_add_override).
* If there are no other entries in the m2p_override corresponding
* to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
* the original pfn (the one shared by the frontend): the backend
* cannot do any IO on this page anymore because it has been
* unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
* pfn again. */
mfn &= ~FOREIGN_FRAME_BIT;
pfn = mfn_to_pfn_no_overrides(mfn);
if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
m2p_find_override(mfn) == NULL)
set_phys_to_machine(pfn, mfn);
return 0;
}
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count)
{ {
int i, ret = 0; int i, ret = 0;
bool lazy = false;
if (xen_feature(XENFEAT_auto_translated_physmap)) if (xen_feature(XENFEAT_auto_translated_physmap))
return 0; return 0;
if (kmap_ops &&
!in_interrupt() &&
paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]); unsigned long pfn = page_to_pfn(pages[i]);
...@@ -937,36 +712,16 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -937,36 +712,16 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
goto out; goto out;
} }
set_page_private(pages[i], INVALID_P2M_ENTRY); set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
WARN_ON(!PagePrivate(pages[i]));
ClearPagePrivate(pages[i]);
set_phys_to_machine(pfn, pages[i]->index);
if (kmap_ops)
ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
if (ret)
goto out;
} }
if (kunmap_ops)
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
kunmap_ops, count);
out: out:
if (lazy)
arch_leave_lazy_mmu_mode();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping); EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
{
struct page *p = m2p_find_override(mfn);
unsigned long ret = pfn;
if (p)
ret = page_to_pfn(p);
return ret;
}
EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
#ifdef CONFIG_XEN_DEBUG_FS #ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include "debugfs.h" #include "debugfs.h"
......
...@@ -32,16 +32,6 @@ ...@@ -32,16 +32,6 @@
#include "p2m.h" #include "p2m.h"
#include "mmu.h" #include "mmu.h"
/* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
#ifdef CONFIG_X86_64
extern asmlinkage void nmi(void);
#endif
extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
/* Amount of extra memory space we add to the e820 ranges */ /* Amount of extra memory space we add to the e820 ranges */
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
...@@ -74,7 +64,7 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; ...@@ -74,7 +64,7 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
*/ */
#define EXTRA_MEM_RATIO (10) #define EXTRA_MEM_RATIO (10)
static void __init xen_add_extra_mem(u64 start, u64 size) static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
{ {
int i; int i;
...@@ -97,10 +87,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size) ...@@ -97,10 +87,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
memblock_reserve(start, size); memblock_reserve(start, size);
} }
static void __init xen_del_extra_mem(u64 start, u64 size) static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
{ {
int i; int i;
u64 start_r, size_r; phys_addr_t start_r, size_r;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
start_r = xen_extra_mem[i].start; start_r = xen_extra_mem[i].start;
...@@ -267,7 +257,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, ...@@ -267,7 +257,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
{ {
struct mmu_update update = { struct mmu_update update = {
.ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
.val = pfn .val = pfn
}; };
...@@ -545,20 +535,21 @@ static unsigned long __init xen_get_max_pages(void) ...@@ -545,20 +535,21 @@ static unsigned long __init xen_get_max_pages(void)
return min(max_pages, MAX_DOMAIN_PAGES); return min(max_pages, MAX_DOMAIN_PAGES);
} }
static void xen_align_and_add_e820_region(u64 start, u64 size, int type) static void __init xen_align_and_add_e820_region(phys_addr_t start,
phys_addr_t size, int type)
{ {
u64 end = start + size; phys_addr_t end = start + size;
/* Align RAM regions to page boundaries. */ /* Align RAM regions to page boundaries. */
if (type == E820_RAM) { if (type == E820_RAM) {
start = PAGE_ALIGN(start); start = PAGE_ALIGN(start);
end &= ~((u64)PAGE_SIZE - 1); end &= ~((phys_addr_t)PAGE_SIZE - 1);
} }
e820_add_region(start, end - start, type); e820_add_region(start, end - start, type);
} }
void xen_ignore_unusable(struct e820entry *list, size_t map_size) static void __init xen_ignore_unusable(struct e820entry *list, size_t map_size)
{ {
struct e820entry *entry; struct e820entry *entry;
unsigned int i; unsigned int i;
...@@ -577,7 +568,7 @@ char * __init xen_memory_setup(void) ...@@ -577,7 +568,7 @@ char * __init xen_memory_setup(void)
static struct e820entry map[E820MAX] __initdata; static struct e820entry map[E820MAX] __initdata;
unsigned long max_pfn = xen_start_info->nr_pages; unsigned long max_pfn = xen_start_info->nr_pages;
unsigned long long mem_end; phys_addr_t mem_end;
int rc; int rc;
struct xen_memory_map memmap; struct xen_memory_map memmap;
unsigned long max_pages; unsigned long max_pages;
...@@ -652,16 +643,16 @@ char * __init xen_memory_setup(void) ...@@ -652,16 +643,16 @@ char * __init xen_memory_setup(void)
extra_pages); extra_pages);
i = 0; i = 0;
while (i < memmap.nr_entries) { while (i < memmap.nr_entries) {
u64 addr = map[i].addr; phys_addr_t addr = map[i].addr;
u64 size = map[i].size; phys_addr_t size = map[i].size;
u32 type = map[i].type; u32 type = map[i].type;
if (type == E820_RAM) { if (type == E820_RAM) {
if (addr < mem_end) { if (addr < mem_end) {
size = min(size, mem_end - addr); size = min(size, mem_end - addr);
} else if (extra_pages) { } else if (extra_pages) {
size = min(size, (u64)extra_pages * PAGE_SIZE); size = min(size, PFN_PHYS(extra_pages));
extra_pages -= size / PAGE_SIZE; extra_pages -= PFN_DOWN(size);
xen_add_extra_mem(addr, size); xen_add_extra_mem(addr, size);
xen_max_p2m_pfn = PFN_DOWN(addr + size); xen_max_p2m_pfn = PFN_DOWN(addr + size);
} else } else
......
...@@ -507,7 +507,7 @@ static int xen_cpu_disable(void) ...@@ -507,7 +507,7 @@ static int xen_cpu_disable(void)
static void xen_cpu_die(unsigned int cpu) static void xen_cpu_die(unsigned int cpu)
{ {
while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
current->state = TASK_UNINTERRUPTIBLE; __set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ/10); schedule_timeout(HZ/10);
} }
......
...@@ -479,6 +479,10 @@ static void __init xen_time_init(void) ...@@ -479,6 +479,10 @@ static void __init xen_time_init(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct timespec tp; struct timespec tp;
/* As Dom0 is never moved, no penalty on using TSC there */
if (xen_initial_domain())
xen_clocksource.rating = 275;
clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) { if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
......
...@@ -10,6 +10,12 @@ ...@@ -10,6 +10,12 @@
extern const char xen_hypervisor_callback[]; extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[]; extern const char xen_failsafe_callback[];
void xen_sysenter_target(void);
#ifdef CONFIG_X86_64
void xen_syscall_target(void);
void xen_syscall32_target(void);
#endif
extern void *xen_initial_gdt; extern void *xen_initial_gdt;
struct trap_info; struct trap_info;
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <xen/balloon.h> #include <xen/balloon.h>
#include <xen/grant_table.h>
#include "common.h" #include "common.h"
/* /*
...@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644); ...@@ -100,7 +101,7 @@ module_param(log_stats, int, 0644);
#define BLKBACK_INVALID_HANDLE (~0) #define BLKBACK_INVALID_HANDLE (~0)
/* Number of free pages to remove on each call to free_xenballooned_pages */ /* Number of free pages to remove on each call to gnttab_free_pages */
#define NUM_BATCH_FREE_PAGES 10 #define NUM_BATCH_FREE_PAGES 10
static inline int get_free_page(struct xen_blkif *blkif, struct page **page) static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
...@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page) ...@@ -111,7 +112,7 @@ static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
if (list_empty(&blkif->free_pages)) { if (list_empty(&blkif->free_pages)) {
BUG_ON(blkif->free_pages_num != 0); BUG_ON(blkif->free_pages_num != 0);
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
return alloc_xenballooned_pages(1, page, false); return gnttab_alloc_pages(1, page);
} }
BUG_ON(blkif->free_pages_num == 0); BUG_ON(blkif->free_pages_num == 0);
page[0] = list_first_entry(&blkif->free_pages, struct page, lru); page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
...@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) ...@@ -151,14 +152,14 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
blkif->free_pages_num--; blkif->free_pages_num--;
if (++num_pages == NUM_BATCH_FREE_PAGES) { if (++num_pages == NUM_BATCH_FREE_PAGES) {
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
free_xenballooned_pages(num_pages, page); gnttab_free_pages(num_pages, page);
spin_lock_irqsave(&blkif->free_pages_lock, flags); spin_lock_irqsave(&blkif->free_pages_lock, flags);
num_pages = 0; num_pages = 0;
} }
} }
spin_unlock_irqrestore(&blkif->free_pages_lock, flags); spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
if (num_pages != 0) if (num_pages != 0)
free_xenballooned_pages(num_pages, page); gnttab_free_pages(num_pages, page);
} }
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
...@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif, ...@@ -262,6 +263,17 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
atomic_dec(&blkif->persistent_gnt_in_use); atomic_dec(&blkif->persistent_gnt_in_use);
} }
static void free_persistent_gnts_unmap_callback(int result,
struct gntab_unmap_queue_data *data)
{
struct completion *c = data->data;
/* BUG_ON used to reproduce existing behaviour,
but is this the best way to deal with this? */
BUG_ON(result);
complete(c);
}
static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unsigned int num) unsigned int num)
{ {
...@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, ...@@ -269,8 +281,17 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt; struct persistent_gnt *persistent_gnt;
struct rb_node *n; struct rb_node *n;
int ret = 0;
int segs_to_unmap = 0; int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
struct completion unmap_completion;
init_completion(&unmap_completion);
unmap_data.data = &unmap_completion;
unmap_data.done = &free_persistent_gnts_unmap_callback;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
foreach_grant_safe(persistent_gnt, n, root, node) { foreach_grant_safe(persistent_gnt, n, root, node) {
BUG_ON(persistent_gnt->handle == BUG_ON(persistent_gnt->handle ==
...@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, ...@@ -285,9 +306,11 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) { !rb_next(&persistent_gnt->node)) {
ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap); unmap_data.count = segs_to_unmap;
BUG_ON(ret); gnttab_unmap_refs_async(&unmap_data);
wait_for_completion(&unmap_completion);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
} }
...@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif) ...@@ -653,18 +676,14 @@ void xen_blkbk_free_caches(struct xen_blkif *blkif)
shrink_free_pagepool(blkif, 0 /* All */); shrink_free_pagepool(blkif, 0 /* All */);
} }
/* static unsigned int xen_blkbk_unmap_prepare(
* Unmap the grant references, and also remove the M2P over-rides struct xen_blkif *blkif,
* used in the 'pending_req'. struct grant_page **pages,
*/ unsigned int num,
static void xen_blkbk_unmap(struct xen_blkif *blkif, struct gnttab_unmap_grant_ref *unmap_ops,
struct grant_page *pages[], struct page **unmap_pages)
int num)
{ {
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0; unsigned int i, invcount = 0;
int ret;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
if (pages[i]->persistent_gnt != NULL) { if (pages[i]->persistent_gnt != NULL) {
...@@ -674,22 +693,96 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, ...@@ -674,22 +693,96 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
if (pages[i]->handle == BLKBACK_INVALID_HANDLE) if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
continue; continue;
unmap_pages[invcount] = pages[i]->page; unmap_pages[invcount] = pages[i]->page;
gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
GNTMAP_host_map, pages[i]->handle); GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE; pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { invcount++;
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
invcount = 0;
} }
return invcount;
}
static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
{
struct pending_req* pending_req = (struct pending_req*) (data->data);
struct xen_blkif *blkif = pending_req->blkif;
/* BUG_ON used to reproduce existing behaviour,
but is this the best way to deal with this? */
BUG_ON(result);
put_free_pages(blkif, data->pages, data->count);
make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
free_req(blkif, pending_req);
/*
* Make sure the request is freed before releasing blkif,
* or there could be a race between free_req and the
* cleanup done in xen_blkif_free during shutdown.
*
* NB: The fact that we might try to wake up pending_free_wq
* before drain_complete (in case there's a drain going on)
* it's not a problem with our current implementation
* because we can assure there's no thread waiting on
* pending_free_wq if there's a drain going on, but it has
* to be taken into account if the current model is changed.
*/
if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
complete(&blkif->drain_complete);
} }
xen_blkif_put(blkif);
}
static void xen_blkbk_unmap_and_respond(struct pending_req *req)
{
struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
struct xen_blkif *blkif = req->blkif;
struct grant_page **pages = req->segments;
unsigned int invcount;
invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
req->unmap, req->unmap_pages);
work->data = req;
work->done = xen_blkbk_unmap_and_respond_callback;
work->unmap_ops = req->unmap;
work->kunmap_ops = NULL;
work->pages = req->unmap_pages;
work->count = invcount;
gnttab_unmap_refs_async(&req->gnttab_unmap_data);
}
/*
* Unmap the grant references.
*
* This could accumulate ops up to the batch size to reduce the number
* of hypercalls, but since this is only used in error paths there's
* no real need.
*/
static void xen_blkbk_unmap(struct xen_blkif *blkif,
struct grant_page *pages[],
int num)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int invcount = 0;
int ret;
while (num) {
unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
unmap, unmap_pages);
if (invcount) { if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
} }
pages += batch;
num -= batch;
}
} }
static int xen_blkbk_map(struct xen_blkif *blkif, static int xen_blkbk_map(struct xen_blkif *blkif,
...@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) ...@@ -982,32 +1075,8 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the grant references associated with 'request' and provide * the grant references associated with 'request' and provide
* the proper response on the ring. * the proper response on the ring.
*/ */
if (atomic_dec_and_test(&pending_req->pendcnt)) { if (atomic_dec_and_test(&pending_req->pendcnt))
struct xen_blkif *blkif = pending_req->blkif; xen_blkbk_unmap_and_respond(pending_req);
xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
free_req(blkif, pending_req);
/*
* Make sure the request is freed before releasing blkif,
* or there could be a race between free_req and the
* cleanup done in xen_blkif_free during shutdown.
*
* NB: The fact that we might try to wake up pending_free_wq
* before drain_complete (in case there's a drain going on)
* it's not a problem with our current implementation
* because we can assure there's no thread waiting on
* pending_free_wq if there's a drain going on, but it has
* to be taken into account if the current model is changed.
*/
if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
complete(&blkif->drain_complete);
}
xen_blkif_put(blkif);
}
} }
/* /*
......
...@@ -350,6 +350,9 @@ struct pending_req { ...@@ -350,6 +350,9 @@ struct pending_req {
struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
struct bio *biolist[MAX_INDIRECT_SEGMENTS]; struct bio *biolist[MAX_INDIRECT_SEGMENTS];
struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
struct gntab_unmap_queue_data gnttab_unmap_data;
}; };
......
...@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue) ...@@ -483,9 +483,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
* better enable it. The long term solution would be to use just a * better enable it. The long term solution would be to use just a
* bunch of valid page descriptors, without dependency on ballooning * bunch of valid page descriptors, without dependency on ballooning
*/ */
err = alloc_xenballooned_pages(MAX_PENDING_REQS, err = gnttab_alloc_pages(MAX_PENDING_REQS,
queue->mmap_pages, queue->mmap_pages);
false);
if (err) { if (err) {
netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
return -ENOMEM; return -ENOMEM;
...@@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -664,7 +663,7 @@ void xenvif_disconnect(struct xenvif *vif)
*/ */
void xenvif_deinit_queue(struct xenvif_queue *queue) void xenvif_deinit_queue(struct xenvif_queue *queue)
{ {
free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
} }
void xenvif_free(struct xenvif *vif) void xenvif_free(struct xenvif *vif)
......
...@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, ...@@ -314,9 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
struct netrx_pending_operations *npo, struct netrx_pending_operations *npo,
struct page *page, unsigned long size, struct page *page, unsigned long size,
unsigned long offset, int *head, unsigned long offset, int *head)
struct xenvif_queue *foreign_queue,
grant_ref_t foreign_gref)
{ {
struct gnttab_copy *copy_gop; struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta; struct xenvif_rx_meta *meta;
...@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -333,6 +331,8 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
offset &= ~PAGE_MASK; offset &= ~PAGE_MASK;
while (size > 0) { while (size > 0) {
struct xen_page_foreign *foreign;
BUG_ON(offset >= PAGE_SIZE); BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
...@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -361,9 +361,10 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
copy_gop->flags = GNTCOPY_dest_gref; copy_gop->flags = GNTCOPY_dest_gref;
copy_gop->len = bytes; copy_gop->len = bytes;
if (foreign_queue) { foreign = xen_page_foreign(page);
copy_gop->source.domid = foreign_queue->vif->domid; if (foreign) {
copy_gop->source.u.ref = foreign_gref; copy_gop->source.domid = foreign->domid;
copy_gop->source.u.ref = foreign->gref;
copy_gop->flags |= GNTCOPY_source_gref; copy_gop->flags |= GNTCOPY_source_gref;
} else { } else {
copy_gop->source.domid = DOMID_SELF; copy_gop->source.domid = DOMID_SELF;
...@@ -405,35 +406,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb ...@@ -405,35 +406,6 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
} }
} }
/*
* Find the grant ref for a given frag in a chain of struct ubuf_info's
* skb: the skb itself
* i: the frag's number
* ubuf: a pointer to an element in the chain. It should not be NULL
*
* Returns a pointer to the element in the chain where the page were found. If
* not found, returns NULL.
* See the definition of callback_struct in common.h for more details about
* the chain.
*/
static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
const int i,
const struct ubuf_info *ubuf)
{
struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
do {
u16 pending_idx = ubuf->desc;
if (skb_shinfo(skb)->frags[i].page.p ==
foreign_queue->mmap_pages[pending_idx])
break;
ubuf = (struct ubuf_info *) ubuf->ctx;
} while (ubuf);
return ubuf;
}
/* /*
* Prepare an SKB to be transmitted to the frontend. * Prepare an SKB to be transmitted to the frontend.
* *
...@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -459,8 +431,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1; int head = 1;
int old_meta_prod; int old_meta_prod;
int gso_type; int gso_type;
const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod; old_meta_prod = npo->meta_prod;
...@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb, ...@@ -507,68 +477,16 @@ static int xenvif_gop_skb(struct sk_buff *skb,
len = skb_tail_pointer(skb) - data; len = skb_tail_pointer(skb) - data;
xenvif_gop_frag_copy(queue, skb, npo, xenvif_gop_frag_copy(queue, skb, npo,
virt_to_page(data), len, offset, &head, virt_to_page(data), len, offset, &head);
NULL,
0);
data += len; data += len;
} }
for (i = 0; i < nr_frags; i++) { for (i = 0; i < nr_frags; i++) {
/* This variable also signals whether foreign_gref has a real
* value or not.
*/
struct xenvif_queue *foreign_queue = NULL;
grant_ref_t foreign_gref;
if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
(ubuf->callback == &xenvif_zerocopy_callback)) {
const struct ubuf_info *const startpoint = ubuf;
/* Ideally ubuf points to the chain element which
* belongs to this frag. Or if frags were removed from
* the beginning, then shortly before it.
*/
ubuf = xenvif_find_gref(skb, i, ubuf);
/* Try again from the beginning of the list, if we
* haven't tried from there. This only makes sense in
* the unlikely event of reordering the original frags.
* For injected local pages it's an unnecessary second
* run.
*/
if (unlikely(!ubuf) && startpoint != head_ubuf)
ubuf = xenvif_find_gref(skb, i, head_ubuf);
if (likely(ubuf)) {
u16 pending_idx = ubuf->desc;
foreign_queue = ubuf_to_queue(ubuf);
foreign_gref =
foreign_queue->pending_tx_info[pending_idx].req.gref;
/* Just a safety measure. If this was the last
* element on the list, the for loop will
* iterate again if a local page were added to
* the end. Using head_ubuf here prevents the
* second search on the chain. Or the original
* frags changed order, but that's less likely.
* In any way, ubuf shouldn't be NULL.
*/
ubuf = ubuf->ctx ?
(struct ubuf_info *) ubuf->ctx :
head_ubuf;
} else
/* This frag was a local page, added to the
* array after the skb left netback.
*/
ubuf = head_ubuf;
}
xenvif_gop_frag_copy(queue, skb, npo, xenvif_gop_frag_copy(queue, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].page_offset,
&head, &head);
foreign_queue,
foreign_queue ? foreign_gref : UINT_MAX);
} }
return npo->meta_prod - old_meta_prod; return npo->meta_prod - old_meta_prod;
...@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) ...@@ -1241,12 +1159,6 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
/* Take an extra reference to offset network stack's put_page */ /* Take an extra reference to offset network stack's put_page */
get_page(queue->mmap_pages[pending_idx]); get_page(queue->mmap_pages[pending_idx]);
} }
/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
* overlaps with "index", and "mapping" is not set. I think mapping
* should be set. If delivered to local stack, it would drop this
* skb in sk_filter unless the socket has the right to use it.
*/
skb->pfmemalloc = false;
} }
static int xenvif_get_extras(struct xenvif_queue *queue, static int xenvif_get_extras(struct xenvif_queue *queue,
......
...@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats); ...@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */ /* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
/* List of ballooned pages, threaded through the mem_map array. */ /* List of ballooned pages, threaded through the mem_map array. */
...@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU #ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
* a scratch frame. Ensure direct mappings and the
* p2m are consistent.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) { if (!xen_feature(XENFEAT_auto_translated_physmap)) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
struct page *scratch_page = get_balloon_scratch_page();
ret = HYPERVISOR_update_va_mapping( ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT), (unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte(page_to_pfn(scratch_page), __pte_ma(0), 0);
PAGE_KERNEL_RO), 0);
BUG_ON(ret); BUG_ON(ret);
put_balloon_scratch_page();
} }
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY); __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
} }
...@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work) ...@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex); mutex_unlock(&balloon_mutex);
} }
struct page *get_balloon_scratch_page(void)
{
struct page *ret = get_cpu_var(balloon_scratch_page);
BUG_ON(ret == NULL);
return ret;
}
void put_balloon_scratch_page(void)
{
put_cpu_var(balloon_scratch_page);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */ /* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target(unsigned long target) void balloon_set_new_target(unsigned long target)
{ {
...@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn, ...@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn,
} }
} }
static int alloc_balloon_scratch_page(int cpu)
{
if (per_cpu(balloon_scratch_page, cpu) != NULL)
return 0;
per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
if (per_cpu(balloon_scratch_page, cpu) == NULL) {
pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
return -ENOMEM;
}
return 0;
}
static int balloon_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
int cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
if (alloc_balloon_scratch_page(cpu))
return NOTIFY_BAD;
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block balloon_cpu_notifier = {
.notifier_call = balloon_cpu_notify,
};
static int __init balloon_init(void) static int __init balloon_init(void)
{ {
int i, cpu; int i;
if (!xen_domain()) if (!xen_domain())
return -ENODEV; return -ENODEV;
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
register_cpu_notifier(&balloon_cpu_notifier);
get_online_cpus();
for_each_online_cpu(cpu) {
if (alloc_balloon_scratch_page(cpu)) {
put_online_cpus();
unregister_cpu_notifier(&balloon_cpu_notifier);
return -ENOMEM;
}
}
put_online_cpus();
}
pr_info("Initialising balloon driver\n"); pr_info("Initialising balloon driver\n");
balloon_stats.current_pages = xen_pv_domain() balloon_stats.current_pages = xen_pv_domain()
...@@ -696,15 +625,4 @@ static int __init balloon_init(void) ...@@ -696,15 +625,4 @@ static int __init balloon_init(void)
subsys_initcall(balloon_init); subsys_initcall(balloon_init);
static int __init balloon_clear(void)
{
int cpu;
for_each_possible_cpu(cpu)
per_cpu(balloon_scratch_page, cpu) = NULL;
return 0;
}
early_initcall(balloon_clear);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
This diff is collapsed.
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/workqueue.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -50,6 +51,7 @@ ...@@ -50,6 +51,7 @@
#include <xen/interface/memory.h> #include <xen/interface/memory.h>
#include <xen/hvc-console.h> #include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h> #include <xen/swiotlb-xen.h>
#include <xen/balloon.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/interface.h> #include <asm/xen/interface.h>
...@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void) ...@@ -671,6 +673,59 @@ void gnttab_free_auto_xlat_frames(void)
} }
EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
/**
* gnttab_alloc_pages - alloc pages suitable for grant mapping into
* @nr_pages: number of pages to alloc
* @pages: returns the pages
*/
int gnttab_alloc_pages(int nr_pages, struct page **pages)
{
int i;
int ret;
ret = alloc_xenballooned_pages(nr_pages, pages, false);
if (ret < 0)
return ret;
for (i = 0; i < nr_pages; i++) {
#if BITS_PER_LONG < 64
struct xen_page_foreign *foreign;
foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
if (!foreign) {
gnttab_free_pages(nr_pages, pages);
return -ENOMEM;
}
set_page_private(pages[i], (unsigned long)foreign);
#endif
SetPagePrivate(pages[i]);
}
return 0;
}
EXPORT_SYMBOL(gnttab_alloc_pages);
/**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
* @nr_pages; number of pages to free
* @pages: the pages
*/
void gnttab_free_pages(int nr_pages, struct page **pages)
{
int i;
for (i = 0; i < nr_pages; i++) {
if (PagePrivate(pages[i])) {
#if BITS_PER_LONG < 64
kfree((void *)page_private(pages[i]));
#endif
ClearPagePrivate(pages[i]);
}
}
free_xenballooned_pages(nr_pages, pages);
}
EXPORT_SYMBOL(gnttab_free_pages);
/* Handling of paged out grant targets (GNTST_eagain) */ /* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256 #define MAX_DELAY 256
static inline void static inline void
...@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, ...@@ -727,30 +782,87 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < count; i++) {
/* Retry eagain maps */ /* Retry eagain maps */
for (i = 0; i < count; i++)
if (map_ops[i].status == GNTST_eagain) if (map_ops[i].status == GNTST_eagain)
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__); &map_ops[i].status, __func__);
if (map_ops[i].status == GNTST_okay) {
struct xen_page_foreign *foreign;
SetPageForeign(pages[i]);
foreign = xen_page_foreign(pages[i]);
foreign->domid = map_ops[i].dom;
foreign->gref = map_ops[i].ref;
}
}
return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
} }
EXPORT_SYMBOL_GPL(gnttab_map_refs); EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count) struct page **pages, unsigned int count)
{ {
unsigned int i;
int ret; int ret;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret) if (ret)
return ret; return ret;
return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); for (i = 0; i < count; i++)
ClearPageForeign(pages[i]);
return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
} }
EXPORT_SYMBOL_GPL(gnttab_unmap_refs); EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
#define GNTTAB_UNMAP_REFS_DELAY 5
static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
static void gnttab_unmap_work(struct work_struct *work)
{
struct gntab_unmap_queue_data
*unmap_data = container_of(work,
struct gntab_unmap_queue_data,
gnttab_work.work);
if (unmap_data->age != UINT_MAX)
unmap_data->age++;
__gnttab_unmap_refs_async(unmap_data);
}
static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
{
int ret;
int pc;
for (pc = 0; pc < item->count; pc++) {
if (page_count(item->pages[pc]) > 1) {
unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
schedule_delayed_work(&item->gnttab_work,
msecs_to_jiffies(delay));
return;
}
}
ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
item->pages, item->count);
item->done(ret, item);
}
void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
{
INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
item->age = 0;
__gnttab_unmap_refs_async(item);
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{ {
int rc; int rc;
......
...@@ -105,10 +105,16 @@ static void do_suspend(void) ...@@ -105,10 +105,16 @@ static void do_suspend(void)
err = freeze_processes(); err = freeze_processes();
if (err) { if (err) {
pr_err("%s: freeze failed %d\n", __func__, err); pr_err("%s: freeze processes failed %d\n", __func__, err);
goto out; goto out;
} }
err = freeze_kernel_threads();
if (err) {
pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
goto out_thaw;
}
err = dpm_suspend_start(PMSG_FREEZE); err = dpm_suspend_start(PMSG_FREEZE);
if (err) { if (err) {
pr_err("%s: dpm_suspend_start %d\n", __func__, err); pr_err("%s: dpm_suspend_start %d\n", __func__, err);
......
...@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = { ...@@ -374,7 +374,7 @@ static struct frontswap_ops tmem_frontswap_ops = {
}; };
#endif #endif
static int xen_tmem_init(void) static int __init xen_tmem_init(void)
{ {
if (!xen_domain()) if (!xen_domain())
return 0; return 0;
......
...@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num) ...@@ -227,7 +227,7 @@ static void put_free_pages(struct page **page, int num)
return; return;
if (i > scsiback_max_buffer_pages) { if (i > scsiback_max_buffer_pages) {
n = min(num, i - scsiback_max_buffer_pages); n = min(num, i - scsiback_max_buffer_pages);
free_xenballooned_pages(n, page + num - n); gnttab_free_pages(n, page + num - n);
n = num - n; n = num - n;
} }
spin_lock_irqsave(&free_pages_lock, flags); spin_lock_irqsave(&free_pages_lock, flags);
...@@ -244,7 +244,7 @@ static int get_free_page(struct page **page) ...@@ -244,7 +244,7 @@ static int get_free_page(struct page **page)
spin_lock_irqsave(&free_pages_lock, flags); spin_lock_irqsave(&free_pages_lock, flags);
if (list_empty(&scsiback_free_pages)) { if (list_empty(&scsiback_free_pages)) {
spin_unlock_irqrestore(&free_pages_lock, flags); spin_unlock_irqrestore(&free_pages_lock, flags);
return alloc_xenballooned_pages(1, page, false); return gnttab_alloc_pages(1, page);
} }
page[0] = list_first_entry(&scsiback_free_pages, struct page, lru); page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
list_del(&page[0]->lru); list_del(&page[0]->lru);
...@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void) ...@@ -2106,7 +2106,7 @@ static void __exit scsiback_exit(void)
while (free_pages_num) { while (free_pages_num) {
if (get_free_page(&page)) if (get_free_page(&page))
BUG(); BUG();
free_xenballooned_pages(1, &page); gnttab_free_pages(1, &page);
} }
scsiback_deregister_configfs(); scsiback_deregister_configfs();
xenbus_unregister_driver(&scsiback_driver); xenbus_unregister_driver(&scsiback_driver);
......
...@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type, ...@@ -326,10 +326,13 @@ static int xenbus_write_transaction(unsigned msg_type,
} }
if (msg_type == XS_TRANSACTION_START) { if (msg_type == XS_TRANSACTION_START) {
if (u->u.msg.type == XS_ERROR)
kfree(trans);
else {
trans->handle.id = simple_strtoul(reply, NULL, 0); trans->handle.id = simple_strtoul(reply, NULL, 0);
list_add(&trans->list, &u->transactions); list_add(&trans->list, &u->transactions);
} else if (msg_type == XS_TRANSACTION_END) { }
} else if (u->u.msg.type == XS_TRANSACTION_END) {
list_for_each_entry(trans, &u->transactions, list) list_for_each_entry(trans, &u->transactions, list)
if (trans->handle.id == u->u.msg.tx_id) if (trans->handle.id == u->u.msg.tx_id)
break; break;
......
...@@ -290,6 +290,14 @@ struct vm_operations_struct { ...@@ -290,6 +290,14 @@ struct vm_operations_struct {
/* called by sys_remap_file_pages() to populate non-linear mapping */ /* called by sys_remap_file_pages() to populate non-linear mapping */
int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
unsigned long size, pgoff_t pgoff); unsigned long size, pgoff_t pgoff);
/*
* Called by vm_normal_page() for special PTEs to find the
* page for @addr. This is useful if the default behavior
* (using pte_page()) would not find the correct page.
*/
struct page *(*find_special_page)(struct vm_area_struct *vma,
unsigned long addr);
}; };
struct mmu_gather; struct mmu_gather;
......
...@@ -121,8 +121,12 @@ enum pageflags { ...@@ -121,8 +121,12 @@ enum pageflags {
PG_fscache = PG_private_2, /* page backed by cache */ PG_fscache = PG_private_2, /* page backed by cache */
/* XEN */ /* XEN */
/* Pinned in Xen as a read-only pagetable page. */
PG_pinned = PG_owner_priv_1, PG_pinned = PG_owner_priv_1,
/* Pinned as part of domain save (see xen_mm_pin_all()). */
PG_savepinned = PG_dirty, PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1,
/* SLOB */ /* SLOB */
PG_slob_free = PG_private, PG_slob_free = PG_private,
...@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab) ...@@ -215,6 +219,7 @@ __PAGEFLAG(Slab, slab)
PAGEFLAG(Checked, checked) /* Used by some filesystems */ PAGEFLAG(Checked, checked) /* Used by some filesystems */
PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */
PAGEFLAG(Foreign, foreign); /* Xen */
PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__SETPAGEFLAG(SwapBacked, swapbacked) __SETPAGEFLAG(SwapBacked, swapbacked)
......
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <xen/features.h> #include <xen/features.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#define GNTTAB_RESERVED_XENSTORE 1 #define GNTTAB_RESERVED_XENSTORE 1
...@@ -58,6 +60,22 @@ struct gnttab_free_callback { ...@@ -58,6 +60,22 @@ struct gnttab_free_callback {
u16 count; u16 count;
}; };
struct gntab_unmap_queue_data;
typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
struct gntab_unmap_queue_data
{
struct delayed_work gnttab_work;
void *data;
gnttab_unmap_refs_done done;
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
struct page **pages;
unsigned int count;
unsigned int age;
};
int gnttab_init(void); int gnttab_init(void);
int gnttab_suspend(void); int gnttab_suspend(void);
int gnttab_resume(void); int gnttab_resume(void);
...@@ -163,12 +181,17 @@ void gnttab_free_auto_xlat_frames(void); ...@@ -163,12 +181,17 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops, struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kunmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count); struct page **pages, unsigned int count);
void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
/* Perform a batch of grant map/copy operations. Retry every batch slot /* Perform a batch of grant map/copy operations. Retry every batch slot
* for which the hypervisor returns GNTST_eagain. This is typically due * for which the hypervisor returns GNTST_eagain. This is typically due
...@@ -182,4 +205,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, ...@@ -182,4 +205,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count); void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count); void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
struct xen_page_foreign {
domid_t domid;
grant_ref_t gref;
};
static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
{
if (!PageForeign(page))
return NULL;
#if BITS_PER_LONG < 64
return (struct xen_page_foreign *)page->private;
#else
BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
return (struct xen_page_foreign *)&page->private;
#endif
}
#endif /* __ASM_GNTTAB_H__ */ #endif /* __ASM_GNTTAB_H__ */
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */ /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
#define XENFEAT_mmu_pt_update_preserve_ad 5 #define XENFEAT_mmu_pt_update_preserve_ad 5
/*
* If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
* available pte bits.
*/
#define XENFEAT_gnttab_map_avail_bits 7
/* x86: Does this Xen host support the HVM callback vector type? */ /* x86: Does this Xen host support the HVM callback vector type? */
#define XENFEAT_hvm_callback_vector 8 #define XENFEAT_hvm_callback_vector 8
......
...@@ -525,6 +525,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush); ...@@ -525,6 +525,13 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
#define _GNTMAP_contains_pte (4) #define _GNTMAP_contains_pte (4)
#define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte)
/*
* Bits to be placed in guest kernel available PTE bits (architecture
* dependent; only supported when XENFEAT_gnttab_map_avail_bits is set).
*/
#define _GNTMAP_guest_avail0 (16)
#define GNTMAP_guest_avail_mask ((uint32_t)~0 << _GNTMAP_guest_avail0)
/* /*
* Values for error status returns. All errors are -ve. * Values for error status returns. All errors are -ve.
*/ */
......
...@@ -754,6 +754,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -754,6 +754,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
if (HAVE_PTE_SPECIAL) { if (HAVE_PTE_SPECIAL) {
if (likely(!pte_special(pte))) if (likely(!pte_special(pte)))
goto check_pfn; goto check_pfn;
if (vma->vm_ops && vma->vm_ops->find_special_page)
return vma->vm_ops->find_special_page(vma, addr);
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return NULL; return NULL;
if (!is_zero_pfn(pfn)) if (!is_zero_pfn(pfn))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment