Commit 162e3717 authored by David Vrabel's avatar David Vrabel Committed by Konrad Rzeszutek Wilk

x86/xen: safely map and unmap grant frames when in atomic context

arch_gnttab_map_frames() and arch_gnttab_unmap_frames() are called in
atomic context but were calling alloc_vm_area() which might sleep.

Also, if a driver attempts to allocate a grant ref from an interrupt
and the table needs expanding, then the CPU may already by in lazy MMU
mode and apply_to_page_range() will BUG when it tries to re-enable
lazy MMU mode.

These two functions are only used in PV guests.

Introduce arch_gnttab_init() to allocates the virtual address space in
advance.

Avoid the use of apply_to_page_range() by using saving and using the
array of PTE addresses from the alloc_vm_area() call.

N.B. 'alloc_vm_area' pre-allocates the pagetable so there is no need
to worry about having to do a PGD/PUD/PMD walk (like apply_to_page_range
does) and we can instead do set_pte.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
----
[v2: Add comment about alloc_vm_area]
[v3: Fix compile error found by 0-day bot]
parent a2c5ae65
...@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, ...@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
{ {
return -ENOSYS; return -ENOSYS;
} }
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
return 0;
}
...@@ -36,99 +36,133 @@ ...@@ -36,99 +36,133 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
#include <xen/xen.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
static int map_pte_fn(pte_t *pte, struct page *pmd_page, static struct gnttab_vm_area {
unsigned long addr, void *data) struct vm_struct *area;
pte_t **ptes;
} gnttab_shared_vm_area, gnttab_status_vm_area;
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes,
void **__shared)
{ {
unsigned long **frames = (unsigned long **)data; void *shared = *__shared;
unsigned long addr;
unsigned long i;
set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); if (shared == NULL)
(*frames)++; *__shared = shared = gnttab_shared_vm_area.area->addr;
return 0;
}
/* addr = (unsigned long)shared;
* This function is used to map shared frames to store grant status. It is
* different from map_pte_fn above, the frames type here is uint64_t. for (i = 0; i < nr_gframes; i++) {
*/ set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, mfn_pte(frames[i], PAGE_KERNEL));
unsigned long addr, void *data) addr += PAGE_SIZE;
{ }
uint64_t **frames = (uint64_t **)data;
set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
(*frames)++;
return 0; return 0;
} }
static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
unsigned long addr, void *data) unsigned long max_nr_gframes,
grant_status_t **__shared)
{ {
grant_status_t *shared = *__shared;
unsigned long addr;
unsigned long i;
if (shared == NULL)
*__shared = shared = gnttab_status_vm_area.area->addr;
addr = (unsigned long)shared;
for (i = 0; i < nr_gframes; i++) {
set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
mfn_pte(frames[i], PAGE_KERNEL));
addr += PAGE_SIZE;
}
set_pte_at(&init_mm, addr, pte, __pte(0));
return 0; return 0;
} }
int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
unsigned long max_nr_gframes,
void **__shared)
{ {
int rc; pte_t **ptes;
void *shared = *__shared; unsigned long addr;
unsigned long i;
if (shared == NULL) { if (shared == gnttab_status_vm_area.area->addr)
struct vm_struct *area = ptes = gnttab_status_vm_area.ptes;
alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); else
BUG_ON(area == NULL); ptes = gnttab_shared_vm_area.ptes;
shared = area->addr;
*__shared = shared;
}
rc = apply_to_page_range(&init_mm, (unsigned long)shared, addr = (unsigned long)shared;
PAGE_SIZE * nr_gframes,
map_pte_fn, &frames); for (i = 0; i < nr_gframes; i++) {
return rc; set_pte_at(&init_mm, addr, ptes[i], __pte(0));
addr += PAGE_SIZE;
}
} }
int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
unsigned long max_nr_gframes,
grant_status_t **__shared)
{ {
int rc; area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
grant_status_t *shared = *__shared; if (area->ptes == NULL)
return -ENOMEM;
if (shared == NULL) { area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
/* No need to pass in PTE as we are going to do it if (area->area == NULL) {
* in apply_to_page_range anyhow. */ kfree(area->ptes);
struct vm_struct *area = return -ENOMEM;
alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
BUG_ON(area == NULL);
shared = area->addr;
*__shared = shared;
} }
rc = apply_to_page_range(&init_mm, (unsigned long)shared, return 0;
PAGE_SIZE * nr_gframes,
map_pte_fn_status, &frames);
return rc;
} }
void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) static void arch_gnttab_vfree(struct gnttab_vm_area *area)
{ {
apply_to_page_range(&init_mm, (unsigned long)shared, free_vm_area(area->area);
PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); kfree(area->ptes);
} }
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
{
int ret;
if (!xen_pv_domain())
return 0;
ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
if (ret < 0)
return ret;
/*
* Always allocate the space for the status frames in case
* we're migrated to a host with V2 support.
*/
ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
if (ret < 0)
goto err;
return 0;
err:
arch_gnttab_vfree(&gnttab_shared_vm_area);
return -ENOMEM;
}
#ifdef CONFIG_XEN_PVH #ifdef CONFIG_XEN_PVH
#include <xen/balloon.h> #include <xen/balloon.h>
#include <xen/events.h> #include <xen/events.h>
#include <xen/xen.h>
#include <linux/slab.h> #include <linux/slab.h>
static int __init xlated_setup_gnttab_pages(void) static int __init xlated_setup_gnttab_pages(void)
{ {
......
...@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries) ...@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
int gnttab_init(void) int gnttab_init(void)
{ {
int i; int i;
unsigned long max_nr_grant_frames;
unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs; unsigned int nr_init_grefs;
int ret; int ret;
gnttab_request_version(); gnttab_request_version();
max_nr_grant_frames = gnttab_max_grant_frames();
nr_grant_frames = 1; nr_grant_frames = 1;
/* Determine the maximum number of frames required for the /* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor. * grant reference free list on the current hypervisor.
*/ */
BUG_ON(grefs_per_grant_frame == 0); BUG_ON(grefs_per_grant_frame == 0);
max_nr_glist_frames = (gnttab_max_grant_frames() * max_nr_glist_frames = (max_nr_grant_frames *
grefs_per_grant_frame / RPP); grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
...@@ -1223,6 +1225,11 @@ int gnttab_init(void) ...@@ -1223,6 +1225,11 @@ int gnttab_init(void)
} }
} }
ret = arch_gnttab_init(max_nr_grant_frames,
nr_status_frames(max_nr_grant_frames));
if (ret < 0)
goto ini_nomem;
if (gnttab_setup() < 0) { if (gnttab_setup() < 0) {
ret = -ENODEV; ret = -ENODEV;
goto ini_nomem; goto ini_nomem;
......
...@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr, ...@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
unmap->dev_bus_addr = 0; unmap->dev_bus_addr = 0;
} }
int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
unsigned long max_nr_gframes, unsigned long max_nr_gframes,
void **__shared); void **__shared);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment