Commit 31552ee3 authored by Roger Pau Monne's avatar Roger Pau Monne Committed by Konrad Rzeszutek Wilk

xen-blkback: expand map/unmap functions

Preparatory change for implementing indirect descriptors. Change
xen_blkbk_{map/unmap} in order to be able to map/unmap a random amount
of grants (previously it was limited to
BLKIF_MAX_SEGMENTS_PER_REQUEST). Also, remove the usage of pending_req
in the map/unmap functions, so we can map/unmap grants without needing
to pass a pending_req.
Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: xen-devel@lists.xen.org
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent bf0720c4
...@@ -163,10 +163,6 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) ...@@ -163,10 +163,6 @@ static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
#define pending_handle(_req, _seg) \
(_req->grant_handles[_seg])
static int do_block_io_op(struct xen_blkif *blkif); static int do_block_io_op(struct xen_blkif *blkif);
static int dispatch_rw_block_io(struct xen_blkif *blkif, static int dispatch_rw_block_io(struct xen_blkif *blkif,
struct blkif_request *req, struct blkif_request *req,
...@@ -648,50 +644,57 @@ struct seg_buf { ...@@ -648,50 +644,57 @@ struct seg_buf {
* Unmap the grant references, and also remove the M2P over-rides * Unmap the grant references, and also remove the M2P over-rides
* used in the 'pending_req'. * used in the 'pending_req'.
*/ */
static void xen_blkbk_unmap(struct pending_req *req) static void xen_blkbk_unmap(struct xen_blkif *blkif,
grant_handle_t handles[],
struct page *pages[],
struct persistent_gnt *persistent_gnts[],
int num)
{ {
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0; unsigned int i, invcount = 0;
grant_handle_t handle;
struct xen_blkif *blkif = req->blkif;
int ret; int ret;
for (i = 0; i < req->nr_pages; i++) { for (i = 0; i < num; i++) {
if (req->persistent_gnts[i] != NULL) { if (persistent_gnts[i] != NULL) {
put_persistent_gnt(blkif, req->persistent_gnts[i]); put_persistent_gnt(blkif, persistent_gnts[i]);
continue; continue;
} }
handle = pending_handle(req, i); if (handles[i] == BLKBACK_INVALID_HANDLE)
pages[invcount] = req->pages[i];
if (handle == BLKBACK_INVALID_HANDLE)
continue; continue;
gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[invcount]), unmap_pages[invcount] = pages[i];
GNTMAP_host_map, handle); gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]),
pending_handle(req, i) = BLKBACK_INVALID_HANDLE; GNTMAP_host_map, handles[i]);
invcount++; handles[i] = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
invcount = 0;
}
}
if (invcount) {
ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
} }
ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
BUG_ON(ret);
put_free_pages(blkif, pages, invcount);
} }
static int xen_blkbk_map(struct blkif_request *req, static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[],
struct pending_req *pending_req, struct persistent_gnt *persistent_gnts[],
struct seg_buf seg[], grant_handle_t handles[],
struct page *pages[]) struct page *pages[],
int num, bool ro)
{ {
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt **persistent_gnts = pending_req->persistent_gnts;
struct persistent_gnt *persistent_gnt = NULL; struct persistent_gnt *persistent_gnt = NULL;
struct xen_blkif *blkif = pending_req->blkif;
phys_addr_t addr = 0; phys_addr_t addr = 0;
int i, seg_idx, new_map_idx; int i, seg_idx, new_map_idx;
int nseg = req->u.rw.nr_segments;
int segs_to_map = 0; int segs_to_map = 0;
int ret = 0; int ret = 0;
int last_map = 0, map_until = 0;
int use_persistent_gnts; int use_persistent_gnts;
use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
...@@ -701,13 +704,14 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -701,13 +704,14 @@ static int xen_blkbk_map(struct blkif_request *req,
* assign map[..] with the PFN of the page in our domain with the * assign map[..] with the PFN of the page in our domain with the
* corresponding grant reference for each page. * corresponding grant reference for each page.
*/ */
for (i = 0; i < nseg; i++) { again:
for (i = map_until; i < num; i++) {
uint32_t flags; uint32_t flags;
if (use_persistent_gnts) if (use_persistent_gnts)
persistent_gnt = get_persistent_gnt( persistent_gnt = get_persistent_gnt(
blkif, blkif,
req->u.rw.seg[i].gref); grefs[i]);
if (persistent_gnt) { if (persistent_gnt) {
/* /*
...@@ -723,13 +727,15 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -723,13 +727,15 @@ static int xen_blkbk_map(struct blkif_request *req,
pages_to_gnt[segs_to_map] = pages[i]; pages_to_gnt[segs_to_map] = pages[i];
persistent_gnts[i] = NULL; persistent_gnts[i] = NULL;
flags = GNTMAP_host_map; flags = GNTMAP_host_map;
if (!use_persistent_gnts && if (!use_persistent_gnts && ro)
(pending_req->operation != BLKIF_OP_READ))
flags |= GNTMAP_readonly; flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[segs_to_map++], addr, gnttab_set_map_op(&map[segs_to_map++], addr,
flags, req->u.rw.seg[i].gref, flags, grefs[i],
blkif->domid); blkif->domid);
} }
map_until = i + 1;
if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
break;
} }
if (segs_to_map) { if (segs_to_map) {
...@@ -742,26 +748,19 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -742,26 +748,19 @@ static int xen_blkbk_map(struct blkif_request *req,
* so that when we access vaddr(pending_req,i) it has the contents of * so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain. * the page from the other domain.
*/ */
for (seg_idx = 0, new_map_idx = 0; seg_idx < nseg; seg_idx++) { for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
if (!persistent_gnts[seg_idx]) { if (!persistent_gnts[seg_idx]) {
/* This is a newly mapped grant */ /* This is a newly mapped grant */
BUG_ON(new_map_idx >= segs_to_map); BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) { if (unlikely(map[new_map_idx].status != 0)) {
pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
pending_handle(pending_req, seg_idx) = BLKBACK_INVALID_HANDLE; handles[seg_idx] = BLKBACK_INVALID_HANDLE;
ret |= 1; ret |= 1;
new_map_idx++; goto next;
/*
* No need to set unmap_seg bit, since
* we can not unmap this grant because
* the handle is invalid.
*/
continue;
} }
pending_handle(pending_req, seg_idx) = map[new_map_idx].handle; handles[seg_idx] = map[new_map_idx].handle;
} else { } else {
/* This grant is persistent and already mapped */ continue;
goto next;
} }
if (use_persistent_gnts && if (use_persistent_gnts &&
blkif->persistent_gnt_c < xen_blkif_max_pgrants) { blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
...@@ -777,7 +776,7 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -777,7 +776,7 @@ static int xen_blkbk_map(struct blkif_request *req,
* allocate the persistent_gnt struct * allocate the persistent_gnt struct
* map this grant non-persistenly * map this grant non-persistenly
*/ */
goto next_unmap; goto next;
} }
persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->gnt = map[new_map_idx].ref;
persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->handle = map[new_map_idx].handle;
...@@ -786,13 +785,12 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -786,13 +785,12 @@ static int xen_blkbk_map(struct blkif_request *req,
persistent_gnt)) { persistent_gnt)) {
kfree(persistent_gnt); kfree(persistent_gnt);
persistent_gnt = NULL; persistent_gnt = NULL;
goto next_unmap; goto next;
} }
persistent_gnts[seg_idx] = persistent_gnt; persistent_gnts[seg_idx] = persistent_gnt;
pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, blkif->persistent_gnt_c, persistent_gnt->gnt, blkif->persistent_gnt_c,
xen_blkif_max_pgrants); xen_blkif_max_pgrants);
new_map_idx++;
goto next; goto next;
} }
if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
...@@ -800,15 +798,18 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -800,15 +798,18 @@ static int xen_blkbk_map(struct blkif_request *req,
pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
blkif->domid, blkif->vbd.handle); blkif->domid, blkif->vbd.handle);
} }
next_unmap:
/* /*
* We could not map this grant persistently, so use it as * We could not map this grant persistently, so use it as
* a non-persistent grant. * a non-persistent grant.
*/ */
new_map_idx++;
next: next:
seg[seg_idx].offset = (req->u.rw.seg[seg_idx].first_sect << 9); new_map_idx++;
} }
segs_to_map = 0;
last_map = map_until;
if (map_until != num)
goto again;
return ret; return ret;
out_of_memory: out_of_memory:
...@@ -817,6 +818,31 @@ static int xen_blkbk_map(struct blkif_request *req, ...@@ -817,6 +818,31 @@ static int xen_blkbk_map(struct blkif_request *req,
return -ENOMEM; return -ENOMEM;
} }
static int xen_blkbk_map_seg(struct blkif_request *req,
struct pending_req *pending_req,
struct seg_buf seg[],
struct page *pages[])
{
int i, rc;
grant_ref_t grefs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
for (i = 0; i < req->u.rw.nr_segments; i++)
grefs[i] = req->u.rw.seg[i].gref;
rc = xen_blkbk_map(pending_req->blkif, grefs,
pending_req->persistent_gnts,
pending_req->grant_handles, pending_req->pages,
req->u.rw.nr_segments,
(pending_req->operation != BLKIF_OP_READ));
if (rc)
return rc;
for (i = 0; i < req->u.rw.nr_segments; i++)
seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
return 0;
}
static int dispatch_discard_io(struct xen_blkif *blkif, static int dispatch_discard_io(struct xen_blkif *blkif,
struct blkif_request *req) struct blkif_request *req)
{ {
...@@ -903,7 +929,10 @@ static void __end_block_io_op(struct pending_req *pending_req, int error) ...@@ -903,7 +929,10 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring. * the proper response on the ring.
*/ */
if (atomic_dec_and_test(&pending_req->pendcnt)) { if (atomic_dec_and_test(&pending_req->pendcnt)) {
xen_blkbk_unmap(pending_req); xen_blkbk_unmap(pending_req->blkif, pending_req->grant_handles,
pending_req->pages,
pending_req->persistent_gnts,
pending_req->nr_pages);
make_response(pending_req->blkif, pending_req->id, make_response(pending_req->blkif, pending_req->id,
pending_req->operation, pending_req->status); pending_req->operation, pending_req->status);
xen_blkif_put(pending_req->blkif); xen_blkif_put(pending_req->blkif);
...@@ -1125,7 +1154,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1125,7 +1154,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in * the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap. * xen_blkbk_unmap.
*/ */
if (xen_blkbk_map(req, pending_req, seg, pages)) if (xen_blkbk_map_seg(req, pending_req, seg, pages))
goto fail_flush; goto fail_flush;
/* /*
...@@ -1186,7 +1215,9 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1186,7 +1215,9 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
return 0; return 0;
fail_flush: fail_flush:
xen_blkbk_unmap(pending_req); xen_blkbk_unmap(blkif, pending_req->grant_handles,
pending_req->pages, pending_req->persistent_gnts,
pending_req->nr_pages);
fail_response: fail_response:
/* Haven't submitted any bio's yet. */ /* Haven't submitted any bio's yet. */
make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment