Commit b5521fe9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xsa396-5.17-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:
 "Several Linux PV device frontends are using the grant table interfaces
  for removing access rights of the backends in ways being subject to
  race conditions, resulting in potential data leaks, data corruption by
  malicious backends, and denial of service triggered by malicious
  backends:

   - blkfront, netfront, scsifront and the gntalloc driver are testing
     whether a grant reference is still in use. If this is not the case,
     they assume that a following removal of the granted access will
     always succeed, which is not true in case the backend has mapped
     the granted page between those two operations.

     As a result the backend can keep access to the memory page of the
     guest no matter how the page will be used after the frontend I/O
     has finished. The xenbus driver has a similar problem, as it
     doesn't check the success of removing the granted access of a
     shared ring buffer.

   - blkfront, netfront, scsifront, usbfront, dmabuf, xenbus, 9p,
     kbdfront, and pvcalls are using a functionality to delay freeing a
     grant reference until it is no longer in use, but the freeing of
     the related data page is not synchronized with dropping the granted
     access.

     As a result the backend can keep access to the memory page even
     after it has been freed and then re-used for a different purpose.

   - netfront will fail a BUG_ON() assertion if it fails to revoke
     access in the rx path.

     This will result in a Denial of Service (DoS) situation of the
     guest which can be triggered by the backend"

* tag 'xsa396-5.17-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  xen/netfront: react properly to failing gnttab_end_foreign_access_ref()
  xen/gnttab: fix gnttab_end_foreign_access() without page specified
  xen/pvcalls: use alloc/free_pages_exact()
  xen/9p: use alloc/free_pages_exact()
  xen/usb: don't use gnttab_end_foreign_access() in xenhcd_gnttab_done()
  xen: remove gnttab_query_foreign_access()
  xen/gntalloc: don't use gnttab_query_foreign_access()
  xen/scsifront: don't use gnttab_query_foreign_access() for mapped status
  xen/netfront: don't use gnttab_query_foreign_access() for mapped status
  xen/blkfront: don't use gnttab_query_foreign_access() for mapped status
  xen/grant-table: add gnttab_try_end_foreign_access()
  xen/xenbus: don't let xenbus_grant_ring() remove grants in error case
parents 3bf7edc8 66e3531b
......@@ -1288,7 +1288,8 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
}
}
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
free_pages_exact(rinfo->ring.sring,
info->nr_ring_pages * XEN_PAGE_SIZE);
rinfo->ring.sring = NULL;
if (rinfo->irq)
......@@ -1372,9 +1373,15 @@ static int blkif_get_final_status(enum blk_req_status s1,
return BLKIF_RSP_OKAY;
}
static bool blkif_completion(unsigned long *id,
struct blkfront_ring_info *rinfo,
struct blkif_response *bret)
/*
* Return values:
* 1 response processed.
* 0 missing further responses.
* -1 error while processing.
*/
static int blkif_completion(unsigned long *id,
struct blkfront_ring_info *rinfo,
struct blkif_response *bret)
{
int i = 0;
struct scatterlist *sg;
......@@ -1397,7 +1404,7 @@ static bool blkif_completion(unsigned long *id,
/* Wait the second response if not yet here. */
if (s2->status < REQ_DONE)
return false;
return 0;
bret->status = blkif_get_final_status(s->status,
s2->status);
......@@ -1448,42 +1455,43 @@ static bool blkif_completion(unsigned long *id,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < num_grant; i++) {
if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
/*
* If the grant is still mapped by the backend (the
* backend has chosen to make this grant persistent)
* we add it at the head of the list, so it will be
* reused first.
*/
if (!info->feature_persistent)
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
s->grants_used[i]->gref);
if (!info->feature_persistent) {
pr_alert("backed has not unmapped grant: %u\n",
s->grants_used[i]->gref);
return -1;
}
list_add(&s->grants_used[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
/*
* If the grant is not mapped by the backend we end the
* foreign access and add it to the tail of the list,
* so it will not be picked again unless we run out of
* persistent grants.
* If the grant is not mapped by the backend we add it
* to the tail of the list, so it will not be picked
* again unless we run out of persistent grants.
*/
gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
s->grants_used[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
}
}
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
if (!info->feature_persistent)
pr_alert_ratelimited("backed has not unmapped grant: %u\n",
s->indirect_grants[i]->gref);
if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
if (!info->feature_persistent) {
pr_alert("backed has not unmapped grant: %u\n",
s->indirect_grants[i]->gref);
return -1;
}
list_add(&s->indirect_grants[i]->node, &rinfo->grants);
rinfo->persistent_gnts_c++;
} else {
struct page *indirect_page;
gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
/*
* Add the used indirect page back to the list of
* available pages for indirect grefs.
......@@ -1498,7 +1506,7 @@ static bool blkif_completion(unsigned long *id,
}
}
return true;
return 1;
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
......@@ -1564,12 +1572,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
}
if (bret.operation != BLKIF_OP_DISCARD) {
int ret;
/*
* We may need to wait for an extra response if the
* I/O request is split in 2
*/
if (!blkif_completion(&id, rinfo, &bret))
ret = blkif_completion(&id, rinfo, &bret);
if (!ret)
continue;
if (unlikely(ret < 0))
goto err;
}
if (add_id_to_freelist(rinfo, id)) {
......@@ -1676,8 +1689,7 @@ static int setup_blkring(struct xenbus_device *dev,
for (i = 0; i < info->nr_ring_pages; i++)
rinfo->ring_ref[i] = GRANT_INVALID_REF;
sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
get_order(ring_size));
sring = alloc_pages_exact(ring_size, GFP_NOIO);
if (!sring) {
xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
return -ENOMEM;
......@@ -1687,7 +1699,7 @@ static int setup_blkring(struct xenbus_device *dev,
err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
if (err < 0) {
free_pages((unsigned long)sring, get_order(ring_size));
free_pages_exact(sring, ring_size);
rinfo->ring.sring = NULL;
goto fail;
}
......@@ -2532,11 +2544,10 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
node) {
if (gnt_list_entry->gref == GRANT_INVALID_REF ||
gnttab_query_foreign_access(gnt_list_entry->gref))
!gnttab_try_end_foreign_access(gnt_list_entry->gref))
continue;
list_del(&gnt_list_entry->node);
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--;
gnt_list_entry->gref = GRANT_INVALID_REF;
list_add_tail(&gnt_list_entry->node, &rinfo->grants);
......
......@@ -424,14 +424,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) {
if (unlikely(!gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id], GNTMAP_readonly))) {
dev_alert(dev,
"Grant still in use by backend domain\n");
goto err;
}
gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
......@@ -990,7 +988,6 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct device *dev = &queue->info->netdev->dev;
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
unsigned long ret;
int slots = 1;
int err = 0;
u32 verdict;
......@@ -1032,8 +1029,13 @@ static int xennet_get_responses(struct netfront_queue *queue,
goto next;
}
ret = gnttab_end_foreign_access_ref(ref, 0);
BUG_ON(!ret);
if (!gnttab_end_foreign_access_ref(ref, 0)) {
dev_alert(dev,
"Grant still in use by backend domain\n");
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
return -EINVAL;
}
gnttab_release_grant_reference(&queue->gref_rx_head, ref);
......@@ -1254,6 +1256,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
&need_xdp_flush);
if (unlikely(err)) {
if (queue->info->broken) {
spin_unlock(&queue->rx_lock);
return 0;
}
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
......@@ -1918,7 +1924,7 @@ static int setup_netfront(struct xenbus_device *dev,
struct netfront_queue *queue, unsigned int feature_split_evtchn)
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
struct xen_netif_rx_sring *rxs = NULL;
grant_ref_t gref;
int err;
......@@ -1938,21 +1944,21 @@ static int setup_netfront(struct xenbus_device *dev,
err = xenbus_grant_ring(dev, txs, 1, &gref);
if (err < 0)
goto grant_tx_ring_fail;
goto fail;
queue->tx_ring_ref = gref;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating rx ring page");
goto alloc_rx_ring_fail;
goto fail;
}
SHARED_RING_INIT(rxs);
FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
err = xenbus_grant_ring(dev, rxs, 1, &gref);
if (err < 0)
goto grant_rx_ring_fail;
goto fail;
queue->rx_ring_ref = gref;
if (feature_split_evtchn)
......@@ -1965,22 +1971,28 @@ static int setup_netfront(struct xenbus_device *dev,
err = setup_netfront_single(queue);
if (err)
goto alloc_evtchn_fail;
goto fail;
return 0;
/* If we fail to setup netfront, it is safe to just revoke access to
* granted pages because backend is not accessing it at this point.
*/
alloc_evtchn_fail:
gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
grant_rx_ring_fail:
free_page((unsigned long)rxs);
alloc_rx_ring_fail:
gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
grant_tx_ring_fail:
free_page((unsigned long)txs);
fail:
fail:
if (queue->rx_ring_ref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(queue->rx_ring_ref, 0,
(unsigned long)rxs);
queue->rx_ring_ref = GRANT_INVALID_REF;
} else {
free_page((unsigned long)rxs);
}
if (queue->tx_ring_ref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(queue->tx_ring_ref, 0,
(unsigned long)txs);
queue->tx_ring_ref = GRANT_INVALID_REF;
} else {
free_page((unsigned long)txs);
}
return err;
}
......
......@@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info,
return;
for (i = 0; i < shadow->nr_grants; i++) {
if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
"grant still in use by backend\n");
BUG();
}
gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
}
kfree(shadow->sg);
......
......@@ -716,8 +716,9 @@ static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
return 0;
}
static void xenhcd_gnttab_done(struct usb_shadow *shadow)
static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
{
struct usb_shadow *shadow = info->shadow + id;
int nr_segs = 0;
int i;
......@@ -726,8 +727,10 @@ static void xenhcd_gnttab_done(struct usb_shadow *shadow)
if (xenusb_pipeisoc(shadow->req.pipe))
nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
for (i = 0; i < nr_segs; i++)
gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL);
for (i = 0; i < nr_segs; i++) {
if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
xenhcd_set_error(info, "backend didn't release grant");
}
shadow->req.nr_buffer_segs = 0;
shadow->req.u.isoc.nr_frame_desc_segs = 0;
......@@ -841,7 +844,9 @@ static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
req_id = urbp->req_id;
if (!urbp->unlinked) {
xenhcd_gnttab_done(&info->shadow[req_id]);
xenhcd_gnttab_done(info, req_id);
if (info->error)
return;
if (urbp->urb->status == -EINPROGRESS)
/* not dequeued */
xenhcd_giveback_urb(info, urbp->urb,
......@@ -942,8 +947,7 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
rp = info->urb_ring.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
xenhcd_set_error(info, "Illegal index on urb-ring");
spin_unlock_irqrestore(&info->lock, flags);
return 0;
goto err;
}
rmb(); /* ensure we see queued responses up to "rp" */
......@@ -952,11 +956,13 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
id = res.id;
if (id >= XENUSB_URB_RING_SIZE) {
xenhcd_set_error(info, "Illegal data on urb-ring");
continue;
goto err;
}
if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
xenhcd_gnttab_done(&info->shadow[id]);
xenhcd_gnttab_done(info, id);
if (info->error)
goto err;
urb = info->shadow[id].urb;
if (likely(urb)) {
urb->actual_length = res.actual_length;
......@@ -978,6 +984,10 @@ static int xenhcd_urb_request_done(struct xenhcd_info *info)
spin_unlock_irqrestore(&info->lock, flags);
return more_to_do;
err:
spin_unlock_irqrestore(&info->lock, flags);
return 0;
}
static int xenhcd_conn_notify(struct xenhcd_info *info)
......
......@@ -169,20 +169,14 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
__del_gref(gref);
}
/* It's possible for the target domain to map the just-allocated grant
* references by blindly guessing their IDs; if this is done, then
* __del_gref will leave them in the queue_gref list. They need to be
* added to the global list so that we can free them when they are no
* longer referenced.
*/
if (unlikely(!list_empty(&queue_gref)))
list_splice_tail(&queue_gref, &gref_list);
mutex_unlock(&gref_mutex);
return rc;
}
static void __del_gref(struct gntalloc_gref *gref)
{
unsigned long addr;
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
uint8_t *tmp = kmap(gref->page);
tmp[gref->notify.pgoff] = 0;
......@@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref)
gref->notify.flags = 0;
if (gref->gref_id) {
if (gnttab_query_foreign_access(gref->gref_id))
return;
if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
return;
gnttab_free_grant_reference(gref->gref_id);
if (gref->page) {
addr = (unsigned long)page_to_virt(gref->page);
gnttab_end_foreign_access(gref->gref_id, 0, addr);
} else
gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
list_del(&gref->next_gref);
if (gref->page)
__free_page(gref->page);
kfree(gref);
}
......
......@@ -134,12 +134,9 @@ struct gnttab_ops {
*/
unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
/*
* Query the status of a grant entry. Ref parameter is reference of
* queried grant entry, return value is the status of queried entry.
* Detailed status(writing/reading) can be gotten from the return value
* by bit operations.
* Read the frame number related to a given grant reference.
*/
int (*query_foreign_access)(grant_ref_t ref);
unsigned long (*read_frame)(grant_ref_t ref);
};
struct unmap_refs_callback_data {
......@@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
static int gnttab_query_foreign_access_v1(grant_ref_t ref)
{
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
}
static int gnttab_query_foreign_access_v2(grant_ref_t ref)
{
return grstatus[ref] & (GTF_reading|GTF_writing);
}
int gnttab_query_foreign_access(grant_ref_t ref)
{
return gnttab_interface->query_foreign_access(ref);
}
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
......@@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
{
return gnttab_shared.v1[ref].frame;
}
static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
{
return gnttab_shared.v2[ref].full_page.frame;
}
struct deferred_entry {
struct list_head list;
grant_ref_t ref;
......@@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
put_free_entry(entry->ref);
if (entry->page) {
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
entry->ref, page_to_pfn(entry->page));
put_page(entry->page);
} else
pr_info("freeing g.e. %#x\n", entry->ref);
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
entry->ref, page_to_pfn(entry->page));
put_page(entry->page);
kfree(entry);
entry = NULL;
} else {
......@@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused)
static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
struct page *page)
{
struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
struct deferred_entry *entry;
gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
const char *what = KERN_WARNING "leaking";
entry = kmalloc(sizeof(*entry), gfp);
if (!page) {
unsigned long gfn = gnttab_interface->read_frame(ref);
page = pfn_to_page(gfn_to_pfn(gfn));
get_page(page);
}
if (entry) {
unsigned long flags;
......@@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
what, ref, page ? page_to_pfn(page) : -1);
}
int gnttab_try_end_foreign_access(grant_ref_t ref)
{
int ret = _gnttab_end_foreign_access_ref(ref, 0);
if (ret)
put_free_entry(ref);
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
{
if (gnttab_end_foreign_access_ref(ref, readonly)) {
put_free_entry(ref);
if (gnttab_try_end_foreign_access(ref)) {
if (page != 0)
put_page(virt_to_page(page));
} else
......@@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = {
.update_entry = gnttab_update_entry_v1,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
.query_foreign_access = gnttab_query_foreign_access_v1,
.read_frame = gnttab_read_frame_v1,
};
static const struct gnttab_ops gnttab_v2_ops = {
......@@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = {
.update_entry = gnttab_update_entry_v2,
.end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
.end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
.query_foreign_access = gnttab_query_foreign_access_v2,
.read_frame = gnttab_read_frame_v2,
};
static bool gnttab_need_v2(void)
......
......@@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map)
if (!map->active.ring)
return;
free_pages((unsigned long)map->active.data.in,
map->active.ring->ring_order);
free_pages_exact(map->active.data.in,
PAGE_SIZE << map->active.ring->ring_order);
free_page((unsigned long)map->active.ring);
}
......@@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map)
goto out;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER,
GFP_KERNEL | __GFP_ZERO);
if (!bytes)
goto out;
......
......@@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
unsigned int nr_pages, grant_ref_t *grefs)
{
int err;
int i, j;
unsigned int i;
grant_ref_t gref_head;
err = gnttab_alloc_grant_references(nr_pages, &gref_head);
if (err) {
xenbus_dev_fatal(dev, err, "granting access to ring page");
return err;
}
for (i = 0; i < nr_pages; i++) {
unsigned long gfn;
......@@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
else
gfn = virt_to_gfn(vaddr);
err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"granting access to ring page");
goto fail;
}
grefs[i] = err;
grefs[i] = gnttab_claim_grant_reference(&gref_head);
gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
gfn, 0);
vaddr = vaddr + XEN_PAGE_SIZE;
}
return 0;
fail:
for (j = 0; j < i; j++)
gnttab_end_foreign_access_ref(grefs[j], 0);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
......
......@@ -104,17 +104,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
* access has been ended, free the given page too. Access will be ended
* immediately iff the grant entry is not in use, otherwise it will happen
* some time later. page may be 0, in which case no freeing will occur.
* Note that the granted page might still be accessed (read or write) by the
* other side after gnttab_end_foreign_access() returns, so even if page was
* specified as 0 it is not allowed to just reuse the page for other
* purposes immediately. gnttab_end_foreign_access() will take an additional
* reference to the granted page in this case, which is dropped only after
* the grant is no longer in use.
* This requires that multi page allocations for areas subject to
* gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
* via free_pages_exact()) in order to avoid high order pages.
*/
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page);
/*
* End access through the given grant reference, iff the grant entry is
* no longer in use. In case of success ending foreign access, the
* grant reference is deallocated.
* Return 1 if the grant entry was freed, 0 if it is still in use.
*/
int gnttab_try_end_foreign_access(grant_ref_t ref);
int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
int gnttab_query_foreign_access(grant_ref_t ref);
/*
* operations on reserved batches of grant references
*/
......
......@@ -281,9 +281,9 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
ref = priv->rings[i].intf->ref[j];
gnttab_end_foreign_access(ref, 0, 0);
}
free_pages((unsigned long)priv->rings[i].data.in,
priv->rings[i].intf->ring_order -
(PAGE_SHIFT - XEN_PAGE_SHIFT));
free_pages_exact(priv->rings[i].data.in,
1UL << (priv->rings[i].intf->ring_order +
XEN_PAGE_SHIFT));
}
gnttab_end_foreign_access(priv->rings[i].ref, 0, 0);
free_page((unsigned long)priv->rings[i].intf);
......@@ -322,8 +322,8 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
if (ret < 0)
goto out;
ring->ref = ret;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
order - (PAGE_SHIFT - XEN_PAGE_SHIFT));
bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT),
GFP_KERNEL | __GFP_ZERO);
if (!bytes) {
ret = -ENOMEM;
goto out;
......@@ -354,9 +354,7 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
if (bytes) {
for (i--; i >= 0; i--)
gnttab_end_foreign_access(ring->intf->ref[i], 0, 0);
free_pages((unsigned long)bytes,
ring->intf->ring_order -
(PAGE_SHIFT - XEN_PAGE_SHIFT));
free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT));
}
gnttab_end_foreign_access(ring->ref, 0, 0);
free_page((unsigned long)ring->intf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment