Commit 402b27f9 authored by Roger Pau Monne's avatar Roger Pau Monne Committed by Konrad Rzeszutek Wilk

xen-block: implement indirect descriptors

Indirect descriptors introduce a new block operation
(BLKIF_OP_INDIRECT) that passes grant references instead of segments
in the request. This grant references are filled with arrays of
blkif_request_segment_aligned, this way we can send more segments in a
request.

The proposed implementation sets the maximum number of indirect grefs
(frames filled with blkif_request_segment_aligned) to 256 in the
backend and 32 in the frontend. The value in the frontend has been
chosen experimentally, and the backend value has been set to a sane
value that allows expanding the maximum number of indirect descriptors
in the frontend if needed.

The migration code has changed from the previous implementation, in
which we simply remapped the segments on the shared ring. Now the
maximum number of segments allowed in a request can change depending
on the backend, so we have to requeue all the requests in the ring and
in the queue and split the bios in them if they are bigger than the
new maximum number of segments.

[v2: Fixed minor comments by Konrad.
[v1: Added padding to make the indirect request 64bit aligned.
 Added some BUGs, comments; fixed number of indirect pages in
 blkif_get_x86_{32/64}_req. Added description about the indirect operation
 in blkif.h]
Signed-off-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
[v3: Fixed spaces and tabs mix ups]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 31552ee3
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
* IO workloads. * IO workloads.
*/ */
static int xen_blkif_max_buffer_pages = 704; static int xen_blkif_max_buffer_pages = 1024;
module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages, MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in each block backend buffer"); "Maximum number of free pages to keep in each block backend buffer");
...@@ -75,7 +75,7 @@ MODULE_PARM_DESC(max_buffer_pages, ...@@ -75,7 +75,7 @@ MODULE_PARM_DESC(max_buffer_pages,
* algorithm. * algorithm.
*/ */
static int xen_blkif_max_pgrants = 352; static int xen_blkif_max_pgrants = 1056;
module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
MODULE_PARM_DESC(max_persistent_grants, MODULE_PARM_DESC(max_persistent_grants,
"Maximum number of grants to map persistently"); "Maximum number of grants to map persistently");
...@@ -636,10 +636,6 @@ int xen_blkif_schedule(void *arg) ...@@ -636,10 +636,6 @@ int xen_blkif_schedule(void *arg)
return 0; return 0;
} }
struct seg_buf {
unsigned int offset;
unsigned int nsec;
};
/* /*
* Unmap the grant references, and also remove the M2P over-rides * Unmap the grant references, and also remove the M2P over-rides
* used in the 'pending_req'. * used in the 'pending_req'.
...@@ -818,29 +814,69 @@ static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[], ...@@ -818,29 +814,69 @@ static int xen_blkbk_map(struct xen_blkif *blkif, grant_ref_t grefs[],
return -ENOMEM; return -ENOMEM;
} }
static int xen_blkbk_map_seg(struct blkif_request *req, static int xen_blkbk_map_seg(struct pending_req *pending_req,
struct pending_req *pending_req,
struct seg_buf seg[], struct seg_buf seg[],
struct page *pages[]) struct page *pages[])
{ {
int i, rc; int rc;
grant_ref_t grefs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
for (i = 0; i < req->u.rw.nr_segments; i++)
grefs[i] = req->u.rw.seg[i].gref;
rc = xen_blkbk_map(pending_req->blkif, grefs, rc = xen_blkbk_map(pending_req->blkif, pending_req->grefs,
pending_req->persistent_gnts, pending_req->persistent_gnts,
pending_req->grant_handles, pending_req->pages, pending_req->grant_handles, pending_req->pages,
req->u.rw.nr_segments, pending_req->nr_pages,
(pending_req->operation != BLKIF_OP_READ)); (pending_req->operation != BLKIF_OP_READ));
if (rc)
return rc; return rc;
}
for (i = 0; i < req->u.rw.nr_segments; i++) static int xen_blkbk_parse_indirect(struct blkif_request *req,
seg[i].offset = (req->u.rw.seg[i].first_sect << 9); struct pending_req *pending_req,
struct seg_buf seg[],
struct phys_req *preq)
{
struct persistent_gnt **persistent =
pending_req->indirect_persistent_gnts;
struct page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
struct blkif_request_segment_aligned *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
rc = xen_blkbk_map(blkif, req->u.indirect.indirect_grefs,
persistent, pending_req->indirect_handles,
pages, indirect_grefs, true);
if (rc)
goto unmap;
for (n = 0, i = 0; n < nseg; n++) {
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
kunmap_atomic(segments);
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]);
}
i = n % SEGS_PER_INDIRECT_FRAME;
pending_req->grefs[n] = segments[i].gref;
seg[n].nsec = segments[i].last_sect -
segments[i].first_sect + 1;
seg[n].offset = (segments[i].first_sect << 9);
if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
(segments[i].last_sect < segments[i].first_sect)) {
rc = -EINVAL;
goto unmap;
}
preq->nr_sects += seg[n].nsec;
}
return 0; unmap:
if (segments)
kunmap_atomic(segments);
xen_blkbk_unmap(blkif, pending_req->indirect_handles,
pages, persistent, indirect_grefs);
return rc;
} }
static int dispatch_discard_io(struct xen_blkif *blkif, static int dispatch_discard_io(struct xen_blkif *blkif,
...@@ -1013,6 +1049,7 @@ __do_block_io_op(struct xen_blkif *blkif) ...@@ -1013,6 +1049,7 @@ __do_block_io_op(struct xen_blkif *blkif)
case BLKIF_OP_WRITE: case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_INDIRECT:
if (dispatch_rw_block_io(blkif, &req, pending_req)) if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done; goto done;
break; break;
...@@ -1059,17 +1096,28 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1059,17 +1096,28 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
struct pending_req *pending_req) struct pending_req *pending_req)
{ {
struct phys_req preq; struct phys_req preq;
struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct seg_buf *seg = pending_req->seg;
unsigned int nseg; unsigned int nseg;
struct bio *bio = NULL; struct bio *bio = NULL;
struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct bio **biolist = pending_req->biolist;
int i, nbio = 0; int i, nbio = 0;
int operation; int operation;
struct blk_plug plug; struct blk_plug plug;
bool drain = false; bool drain = false;
struct page **pages = pending_req->pages; struct page **pages = pending_req->pages;
unsigned short req_operation;
req_operation = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.indirect_op : req->operation;
if ((req->operation == BLKIF_OP_INDIRECT) &&
(req_operation != BLKIF_OP_READ) &&
(req_operation != BLKIF_OP_WRITE)) {
pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
req_operation);
goto fail_response;
}
switch (req->operation) { switch (req_operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
blkif->st_rd_req++; blkif->st_rd_req++;
operation = READ; operation = READ;
...@@ -1091,33 +1139,47 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1091,33 +1139,47 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
} }
/* Check that the number of segments is sane. */ /* Check that the number of segments is sane. */
nseg = req->u.rw.nr_segments; nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;
if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
(nseg > MAX_INDIRECT_SEGMENTS))) {
pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
nseg); nseg);
/* Haven't submitted any bio's yet. */ /* Haven't submitted any bio's yet. */
goto fail_response; goto fail_response;
} }
preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0; preq.nr_sects = 0;
pending_req->blkif = blkif; pending_req->blkif = blkif;
pending_req->id = req->u.rw.id; pending_req->id = req->u.rw.id;
pending_req->operation = req->operation; pending_req->operation = req_operation;
pending_req->status = BLKIF_RSP_OKAY; pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg; pending_req->nr_pages = nseg;
if (req->operation != BLKIF_OP_INDIRECT) {
preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
for (i = 0; i < nseg; i++) { for (i = 0; i < nseg; i++) {
pending_req->grefs[i] = req->u.rw.seg[i].gref;
seg[i].nsec = req->u.rw.seg[i].last_sect - seg[i].nsec = req->u.rw.seg[i].last_sect -
req->u.rw.seg[i].first_sect + 1; req->u.rw.seg[i].first_sect + 1;
seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
(req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) (req->u.rw.seg[i].last_sect <
req->u.rw.seg[i].first_sect))
goto fail_response; goto fail_response;
preq.nr_sects += seg[i].nsec; preq.nr_sects += seg[i].nsec;
}
} else {
preq.dev = req->u.indirect.handle;
preq.sector_number = req->u.indirect.sector_number;
if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
goto fail_response;
} }
if (xen_vbd_translate(&preq, blkif, operation) != 0) { if (xen_vbd_translate(&preq, blkif, operation) != 0) {
...@@ -1154,7 +1216,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1154,7 +1216,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in * the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap. * xen_blkbk_unmap.
*/ */
if (xen_blkbk_map_seg(req, pending_req, seg, pages)) if (xen_blkbk_map_seg(pending_req, seg, pages))
goto fail_flush; goto fail_flush;
/* /*
...@@ -1220,7 +1282,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1220,7 +1282,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
pending_req->nr_pages); pending_req->nr_pages);
fail_response: fail_response:
/* Haven't submitted any bio's yet. */ /* Haven't submitted any bio's yet. */
make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR); make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
free_req(blkif, pending_req); free_req(blkif, pending_req);
msleep(1); /* back off a bit */ msleep(1); /* back off a bit */
return -EIO; return -EIO;
......
...@@ -50,6 +50,19 @@ ...@@ -50,6 +50,19 @@
__func__, __LINE__, ##args) __func__, __LINE__, ##args)
/*
* This is the maximum number of segments that would be allowed in indirect
* requests. This value will also be passed to the frontend.
*/
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
/* Not a real protocol. Used to generate ring structs which contain /* Not a real protocol. Used to generate ring structs which contain
* the elements common to all protocols only. This way we get a * the elements common to all protocols only. This way we get a
* compiler-checkable way to use common struct elements, so we can * compiler-checkable way to use common struct elements, so we can
...@@ -83,12 +96,31 @@ struct blkif_x86_32_request_other { ...@@ -83,12 +96,31 @@ struct blkif_x86_32_request_other {
uint64_t id; /* private guest value, echoed in resp */ uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_32_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
uint64_t id;
blkif_sector_t sector_number;
blkif_vdev_t handle;
uint16_t _pad1;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
/*
* The maximum number of indirect segments (and pages) that will
* be used is determined by MAX_INDIRECT_SEGMENTS, this value
* is also exported to the guest (via xenstore
* feature-max-indirect-segments entry), so the frontend knows how
* many indirect segments the backend supports.
*/
uint64_t _pad2; /* make it 64 byte aligned */
} __attribute__((__packed__));
struct blkif_x86_32_request { struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */ uint8_t operation; /* BLKIF_OP_??? */
union { union {
struct blkif_x86_32_request_rw rw; struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard; struct blkif_x86_32_request_discard discard;
struct blkif_x86_32_request_other other; struct blkif_x86_32_request_other other;
struct blkif_x86_32_request_indirect indirect;
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
...@@ -127,12 +159,32 @@ struct blkif_x86_64_request_other { ...@@ -127,12 +159,32 @@ struct blkif_x86_64_request_other {
uint64_t id; /* private guest value, echoed in resp */ uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_64_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
uint64_t id;
blkif_sector_t sector_number;
blkif_vdev_t handle;
uint16_t _pad2;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
/*
* The maximum number of indirect segments (and pages) that will
* be used is determined by MAX_INDIRECT_SEGMENTS, this value
* is also exported to the guest (via xenstore
* feature-max-indirect-segments entry), so the frontend knows how
* many indirect segments the backend supports.
*/
uint32_t _pad3; /* make it 64 byte aligned */
} __attribute__((__packed__));
struct blkif_x86_64_request { struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */ uint8_t operation; /* BLKIF_OP_??? */
union { union {
struct blkif_x86_64_request_rw rw; struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard; struct blkif_x86_64_request_discard discard;
struct blkif_x86_64_request_other other; struct blkif_x86_64_request_other other;
struct blkif_x86_64_request_indirect indirect;
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
...@@ -266,6 +318,11 @@ struct xen_blkif { ...@@ -266,6 +318,11 @@ struct xen_blkif {
wait_queue_head_t waiting_to_free; wait_queue_head_t waiting_to_free;
}; };
struct seg_buf {
unsigned long offset;
unsigned int nsec;
};
/* /*
* Each outstanding request that we've passed to the lower device layers has a * Each outstanding request that we've passed to the lower device layers has a
* 'pending_req' allocated to it. Each buffer_head that completes decrements * 'pending_req' allocated to it. Each buffer_head that completes decrements
...@@ -280,9 +337,16 @@ struct pending_req { ...@@ -280,9 +337,16 @@ struct pending_req {
unsigned short operation; unsigned short operation;
int status; int status;
struct list_head free_list; struct list_head free_list;
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[MAX_INDIRECT_SEGMENTS];
struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnts[MAX_INDIRECT_SEGMENTS];
grant_handle_t grant_handles[BLKIF_MAX_SEGMENTS_PER_REQUEST]; grant_handle_t grant_handles[MAX_INDIRECT_SEGMENTS];
grant_ref_t grefs[MAX_INDIRECT_SEGMENTS];
/* Indirect descriptors */
struct persistent_gnt *indirect_persistent_gnts[MAX_INDIRECT_PAGES];
struct page *indirect_pages[MAX_INDIRECT_PAGES];
grant_handle_t indirect_handles[MAX_INDIRECT_PAGES];
struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
struct bio *biolist[MAX_INDIRECT_SEGMENTS];
}; };
...@@ -321,7 +385,7 @@ struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); ...@@ -321,7 +385,7 @@ struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
static inline void blkif_get_x86_32_req(struct blkif_request *dst, static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src) struct blkif_x86_32_request *src)
{ {
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation; dst->operation = src->operation;
switch (src->operation) { switch (src->operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
...@@ -344,6 +408,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, ...@@ -344,6 +408,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
dst->u.discard.sector_number = src->u.discard.sector_number; dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors; dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break; break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
barrier();
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < j; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default: default:
/* /*
* Don't know how to translate this op. Only get the * Don't know how to translate this op. Only get the
...@@ -357,7 +433,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, ...@@ -357,7 +433,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
static inline void blkif_get_x86_64_req(struct blkif_request *dst, static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src) struct blkif_x86_64_request *src)
{ {
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation; dst->operation = src->operation;
switch (src->operation) { switch (src->operation) {
case BLKIF_OP_READ: case BLKIF_OP_READ:
...@@ -380,6 +456,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, ...@@ -380,6 +456,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
dst->u.discard.sector_number = src->u.discard.sector_number; dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors; dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break; break;
case BLKIF_OP_INDIRECT:
dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
dst->u.indirect.handle = src->u.indirect.handle;
dst->u.indirect.id = src->u.indirect.id;
dst->u.indirect.sector_number = src->u.indirect.sector_number;
barrier();
j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
for (i = 0; i < j; i++)
dst->u.indirect.indirect_grefs[i] =
src->u.indirect.indirect_grefs[i];
break;
default: default:
/* /*
* Don't know how to translate this op. Only get the * Don't know how to translate this op. Only get the
......
...@@ -107,6 +107,8 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) ...@@ -107,6 +107,8 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
struct xen_blkif *blkif; struct xen_blkif *blkif;
int i; int i;
BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL); blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif) if (!blkif)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -709,6 +711,11 @@ static void connect(struct backend_info *be) ...@@ -709,6 +711,11 @@ static void connect(struct backend_info *be)
dev->nodename); dev->nodename);
goto abort; goto abort;
} }
err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
MAX_INDIRECT_SEGMENTS);
if (err)
dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
dev->nodename, err);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd)); (unsigned long long)vbd_sz(&be->blkif->vbd));
......
...@@ -74,12 +74,27 @@ struct grant { ...@@ -74,12 +74,27 @@ struct grant {
struct blk_shadow { struct blk_shadow {
struct blkif_request req; struct blkif_request req;
struct request *request; struct request *request;
struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct grant **grants_used;
struct grant **indirect_grants;
};
struct split_bio {
struct bio *bio;
atomic_t pending;
int err;
}; };
static DEFINE_MUTEX(blkfront_mutex); static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops; static const struct block_device_operations xlvbd_block_fops;
/*
* Maximum number of segments in indirect requests, the actual value used by
* the frontend driver is the minimum of this value and the value provided
* by the backend driver.
*/
static unsigned int xen_blkif_max_segments = 32;
#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
/* /*
...@@ -98,7 +113,7 @@ struct blkfront_info ...@@ -98,7 +113,7 @@ struct blkfront_info
enum blkif_state connected; enum blkif_state connected;
int ring_ref; int ring_ref;
struct blkif_front_ring ring; struct blkif_front_ring ring;
struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct scatterlist *sg;
unsigned int evtchn, irq; unsigned int evtchn, irq;
struct request_queue *rq; struct request_queue *rq;
struct work_struct work; struct work_struct work;
...@@ -114,6 +129,7 @@ struct blkfront_info ...@@ -114,6 +129,7 @@ struct blkfront_info
unsigned int discard_granularity; unsigned int discard_granularity;
unsigned int discard_alignment; unsigned int discard_alignment;
unsigned int feature_persistent:1; unsigned int feature_persistent:1;
unsigned int max_indirect_segments;
int is_ready; int is_ready;
}; };
...@@ -142,6 +158,13 @@ static DEFINE_SPINLOCK(minor_lock); ...@@ -142,6 +158,13 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */ #define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
(PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_info *info) static int get_id_from_freelist(struct blkfront_info *info)
{ {
unsigned long free = info->shadow_free; unsigned long free = info->shadow_free;
...@@ -358,7 +381,8 @@ static int blkif_queue_request(struct request *req) ...@@ -358,7 +381,8 @@ static int blkif_queue_request(struct request *req)
struct blkif_request *ring_req; struct blkif_request *ring_req;
unsigned long id; unsigned long id;
unsigned int fsect, lsect; unsigned int fsect, lsect;
int i, ref; int i, ref, n;
struct blkif_request_segment_aligned *segments = NULL;
/* /*
* Used to store if we are able to queue the request by just using * Used to store if we are able to queue the request by just using
...@@ -369,21 +393,27 @@ static int blkif_queue_request(struct request *req) ...@@ -369,21 +393,27 @@ static int blkif_queue_request(struct request *req)
grant_ref_t gref_head; grant_ref_t gref_head;
struct grant *gnt_list_entry = NULL; struct grant *gnt_list_entry = NULL;
struct scatterlist *sg; struct scatterlist *sg;
int nseg, max_grefs;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1; return 1;
/* Check if we have enought grants to allocate a requests */ max_grefs = info->max_indirect_segments ?
if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) { info->max_indirect_segments +
INDIRECT_GREFS(info->max_indirect_segments) :
BLKIF_MAX_SEGMENTS_PER_REQUEST;
/* Check if we have enough grants to allocate a requests */
if (info->persistent_gnts_c < max_grefs) {
new_persistent_gnts = 1; new_persistent_gnts = 1;
if (gnttab_alloc_grant_references( if (gnttab_alloc_grant_references(
BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c, max_grefs - info->persistent_gnts_c,
&gref_head) < 0) { &gref_head) < 0) {
gnttab_request_free_callback( gnttab_request_free_callback(
&info->callback, &info->callback,
blkif_restart_queue_callback, blkif_restart_queue_callback,
info, info,
BLKIF_MAX_SEGMENTS_PER_REQUEST); max_grefs);
return 1; return 1;
} }
} else } else
...@@ -394,13 +424,39 @@ static int blkif_queue_request(struct request *req) ...@@ -394,13 +424,39 @@ static int blkif_queue_request(struct request *req)
id = get_id_from_freelist(info); id = get_id_from_freelist(info);
info->shadow[id].request = req; info->shadow[id].request = req;
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
} else {
BUG_ON(info->max_indirect_segments == 0 &&
req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
BUG_ON(info->max_indirect_segments &&
req->nr_phys_segments > info->max_indirect_segments);
nseg = blk_rq_map_sg(req->q, req, info->sg);
ring_req->u.rw.id = id; ring_req->u.rw.id = id;
if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
/*
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->u.indirect.handle = info->handle;
ring_req->u.indirect.nr_segments = nseg;
} else {
ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->u.rw.handle = info->handle; ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ? ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ; BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
/* /*
* Ideally we can do an unordered flush-to-disk. In case the * Ideally we can do an unordered flush-to-disk. In case the
...@@ -411,25 +467,24 @@ static int blkif_queue_request(struct request *req) ...@@ -411,25 +467,24 @@ static int blkif_queue_request(struct request *req)
*/ */
ring_req->operation = info->flush_op; ring_req->operation = info->flush_op;
} }
ring_req->u.rw.nr_segments = nseg;
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) { }
/* id, sector_number and handle are set above. */ for_each_sg(info->sg, sg, nseg, i) {
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
} else {
ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
info->sg);
BUG_ON(ring_req->u.rw.nr_segments >
BLKIF_MAX_SEGMENTS_PER_REQUEST);
for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
fsect = sg->offset >> 9; fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1; lsect = fsect + (sg->length >> 9) - 1;
if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
if (segments)
kunmap_atomic(segments);
n = i / SEGS_PER_INDIRECT_FRAME;
gnt_list_entry = get_grant(&gref_head, info);
info->shadow[id].indirect_grants[n] = gnt_list_entry;
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
}
gnt_list_entry = get_grant(&gref_head, info); gnt_list_entry = get_grant(&gref_head, info);
ref = gnt_list_entry->gref; ref = gnt_list_entry->gref;
...@@ -441,8 +496,7 @@ static int blkif_queue_request(struct request *req) ...@@ -441,8 +496,7 @@ static int blkif_queue_request(struct request *req)
BUG_ON(sg->offset + sg->length > PAGE_SIZE); BUG_ON(sg->offset + sg->length > PAGE_SIZE);
shared_data = kmap_atomic( shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
pfn_to_page(gnt_list_entry->pfn));
bvec_data = kmap_atomic(sg_page(sg)); bvec_data = kmap_atomic(sg_page(sg));
/* /*
...@@ -461,14 +515,24 @@ static int blkif_queue_request(struct request *req) ...@@ -461,14 +515,24 @@ static int blkif_queue_request(struct request *req)
kunmap_atomic(bvec_data); kunmap_atomic(bvec_data);
kunmap_atomic(shared_data); kunmap_atomic(shared_data);
} }
if (ring_req->operation != BLKIF_OP_INDIRECT) {
ring_req->u.rw.seg[i] = ring_req->u.rw.seg[i] =
(struct blkif_request_segment) { (struct blkif_request_segment) {
.gref = ref, .gref = ref,
.first_sect = fsect, .first_sect = fsect,
.last_sect = lsect }; .last_sect = lsect };
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
(struct blkif_request_segment_aligned) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
} }
} }
if (segments)
kunmap_atomic(segments);
}
info->ring.req_prod_pvt++; info->ring.req_prod_pvt++;
...@@ -542,7 +606,8 @@ static void do_blkif_request(struct request_queue *rq) ...@@ -542,7 +606,8 @@ static void do_blkif_request(struct request_queue *rq)
flush_requests(info); flush_requests(info);
} }
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
unsigned int segments)
{ {
struct request_queue *rq; struct request_queue *rq;
struct blkfront_info *info = gd->private_data; struct blkfront_info *info = gd->private_data;
...@@ -571,7 +636,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) ...@@ -571,7 +636,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
blk_queue_max_segment_size(rq, PAGE_SIZE); blk_queue_max_segment_size(rq, PAGE_SIZE);
/* Ensure a merged request will fit in a single I/O ring slot. */ /* Ensure a merged request will fit in a single I/O ring slot. */
blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); blk_queue_max_segments(rq, segments);
/* Make sure buffer addresses are sector-aligned. */ /* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511); blk_queue_dma_alignment(rq, 511);
...@@ -588,13 +653,16 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) ...@@ -588,13 +653,16 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
static void xlvbd_flush(struct blkfront_info *info) static void xlvbd_flush(struct blkfront_info *info)
{ {
blk_queue_flush(info->rq, info->feature_flush); blk_queue_flush(info->rq, info->feature_flush);
printk(KERN_INFO "blkfront: %s: %s: %s %s\n", printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n",
info->gd->disk_name, info->gd->disk_name,
info->flush_op == BLKIF_OP_WRITE_BARRIER ? info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
"flush diskcache" : "barrier or flush"), "flush diskcache" : "barrier or flush"),
info->feature_flush ? "enabled" : "disabled", info->feature_flush ? "enabled;" : "disabled;",
info->feature_persistent ? "using persistent grants" : ""); "persistent grants:",
info->feature_persistent ? "enabled;" : "disabled;",
"indirect descriptors:",
info->max_indirect_segments ? "enabled;" : "disabled;");
} }
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
...@@ -734,7 +802,9 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, ...@@ -734,7 +802,9 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->driverfs_dev = &(info->xbdev->dev); gd->driverfs_dev = &(info->xbdev->dev);
set_capacity(gd, capacity); set_capacity(gd, capacity);
if (xlvbd_init_blk_queue(gd, sector_size)) { if (xlvbd_init_blk_queue(gd, sector_size,
info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
del_gendisk(gd); del_gendisk(gd);
goto release; goto release;
} }
...@@ -818,6 +888,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) ...@@ -818,6 +888,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
{ {
struct grant *persistent_gnt; struct grant *persistent_gnt;
struct grant *n; struct grant *n;
int i, j, segs;
/* Prevent new requests being issued until we fix things up. */ /* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock); spin_lock_irq(&info->io_lock);
...@@ -843,6 +914,47 @@ static void blkif_free(struct blkfront_info *info, int suspend) ...@@ -843,6 +914,47 @@ static void blkif_free(struct blkfront_info *info, int suspend)
} }
BUG_ON(info->persistent_gnts_c != 0); BUG_ON(info->persistent_gnts_c != 0);
kfree(info->sg);
info->sg = NULL;
for (i = 0; i < BLK_RING_SIZE; i++) {
/*
* Clear persistent grants present in requests already
* on the shared ring
*/
if (!info->shadow[i].request)
goto free_shadow;
segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
info->shadow[i].req.u.indirect.nr_segments :
info->shadow[i].req.u.rw.nr_segments;
for (j = 0; j < segs; j++) {
persistent_gnt = info->shadow[i].grants_used[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
}
if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
/*
* If this is not an indirect operation don't try to
* free indirect segments
*/
goto free_shadow;
for (j = 0; j < INDIRECT_GREFS(segs); j++) {
persistent_gnt = info->shadow[i].indirect_grants[j];
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
__free_page(pfn_to_page(persistent_gnt->pfn));
kfree(persistent_gnt);
}
free_shadow:
kfree(info->shadow[i].grants_used);
info->shadow[i].grants_used = NULL;
kfree(info->shadow[i].indirect_grants);
info->shadow[i].indirect_grants = NULL;
}
/* No more gnttab callback work. */ /* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback); gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&info->io_lock); spin_unlock_irq(&info->io_lock);
...@@ -873,6 +985,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, ...@@ -873,6 +985,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
char *bvec_data; char *bvec_data;
void *shared_data; void *shared_data;
unsigned int offset = 0; unsigned int offset = 0;
int nseg;
nseg = s->req.operation == BLKIF_OP_INDIRECT ?
s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
if (bret->operation == BLKIF_OP_READ) { if (bret->operation == BLKIF_OP_READ) {
/* /*
...@@ -885,7 +1001,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, ...@@ -885,7 +1001,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE); BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
if (bvec->bv_offset < offset) if (bvec->bv_offset < offset)
i++; i++;
BUG_ON(i >= s->req.u.rw.nr_segments); BUG_ON(i >= nseg);
shared_data = kmap_atomic( shared_data = kmap_atomic(
pfn_to_page(s->grants_used[i]->pfn)); pfn_to_page(s->grants_used[i]->pfn));
bvec_data = bvec_kmap_irq(bvec, &flags); bvec_data = bvec_kmap_irq(bvec, &flags);
...@@ -897,10 +1013,16 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, ...@@ -897,10 +1013,16 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
} }
} }
/* Add the persistent grant into the list of free grants */ /* Add the persistent grant into the list of free grants */
for (i = 0; i < s->req.u.rw.nr_segments; i++) { for (i = 0; i < nseg; i++) {
list_add(&s->grants_used[i]->node, &info->persistent_gnts); list_add(&s->grants_used[i]->node, &info->persistent_gnts);
info->persistent_gnts_c++; info->persistent_gnts_c++;
} }
if (s->req.operation == BLKIF_OP_INDIRECT) {
for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
info->persistent_gnts_c++;
}
}
} }
static irqreturn_t blkif_interrupt(int irq, void *dev_id) static irqreturn_t blkif_interrupt(int irq, void *dev_id)
...@@ -1034,14 +1156,6 @@ static int setup_blkring(struct xenbus_device *dev, ...@@ -1034,14 +1156,6 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring); SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
/* Allocate memory for grants */
err = fill_grant_buffer(info, BLK_RING_SIZE *
BLKIF_MAX_SEGMENTS_PER_REQUEST);
if (err)
goto fail;
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) { if (err < 0) {
free_page((unsigned long)sring); free_page((unsigned long)sring);
...@@ -1223,13 +1337,84 @@ static int blkfront_probe(struct xenbus_device *dev, ...@@ -1223,13 +1337,84 @@ static int blkfront_probe(struct xenbus_device *dev,
return 0; return 0;
} }
/*
* This is a clone of md_trim_bio, used to split a bio into smaller ones
*/
static void trim_bio(struct bio *bio, int offset, int size)
{
/* 'bio' is a cloned bio which we need to trim to match
* the given offset and size.
* This requires adjusting bi_sector, bi_size, and bi_io_vec
*/
int i;
struct bio_vec *bvec;
int sofar = 0;
size <<= 9;
if (offset == 0 && size == bio->bi_size)
return;
bio->bi_sector += offset;
bio->bi_size = size;
offset <<= 9;
clear_bit(BIO_SEG_VALID, &bio->bi_flags);
while (bio->bi_idx < bio->bi_vcnt &&
bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
/* remove this whole bio_vec */
offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
bio->bi_idx++;
}
if (bio->bi_idx < bio->bi_vcnt) {
bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
}
/* avoid any complications with bi_idx being non-zero*/
if (bio->bi_idx) {
memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
(bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
bio->bi_vcnt -= bio->bi_idx;
bio->bi_idx = 0;
}
/* Make sure vcnt and last bv are not too big */
bio_for_each_segment(bvec, bio, i) {
if (sofar + bvec->bv_len > size)
bvec->bv_len = size - sofar;
if (bvec->bv_len == 0) {
bio->bi_vcnt = i;
break;
}
sofar += bvec->bv_len;
}
}
static void split_bio_end(struct bio *bio, int error)
{
struct split_bio *split_bio = bio->bi_private;
if (error)
split_bio->err = error;
if (atomic_dec_and_test(&split_bio->pending)) {
split_bio->bio->bi_phys_segments = 0;
bio_endio(split_bio->bio, split_bio->err);
kfree(split_bio);
}
bio_put(bio);
}
static int blkif_recover(struct blkfront_info *info) static int blkif_recover(struct blkfront_info *info)
{ {
int i; int i;
struct blkif_request *req; struct request *req, *n;
struct blk_shadow *copy; struct blk_shadow *copy;
int j; int rc;
struct bio *bio, *cloned_bio;
struct bio_list bio_list, merge_bio;
unsigned int segs, offset;
int pending, size;
struct split_bio *split_bio;
struct list_head requests;
/* Stage 1: Make a safe copy of the shadow state. */ /* Stage 1: Make a safe copy of the shadow state. */
copy = kmemdup(info->shadow, sizeof(info->shadow), copy = kmemdup(info->shadow, sizeof(info->shadow),
...@@ -1244,36 +1429,64 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -1244,36 +1429,64 @@ static int blkif_recover(struct blkfront_info *info)
info->shadow_free = info->ring.req_prod_pvt; info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff; info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
/* Stage 3: Find pending requests and requeue them. */ rc = blkfront_setup_indirect(info);
if (rc) {
kfree(copy);
return rc;
}
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs);
bio_list_init(&bio_list);
INIT_LIST_HEAD(&requests);
for (i = 0; i < BLK_RING_SIZE; i++) { for (i = 0; i < BLK_RING_SIZE; i++) {
/* Not in use? */ /* Not in use? */
if (!copy[i].request) if (!copy[i].request)
continue; continue;
/* Grab a request slot and copy shadow state into it. */ /*
req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); * Get the bios in the request so we can re-queue them.
*req = copy[i].req; */
if (copy[i].request->cmd_flags &
/* We get a new request id, and must reset the shadow state. */ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
req->u.rw.id = get_id_from_freelist(info); /*
memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i])); * Flush operations don't contain bios, so
* we need to requeue the whole request
if (req->operation != BLKIF_OP_DISCARD) { */
/* Rewrite any grant references invalidated by susp/resume. */ list_add(&copy[i].request->queuelist, &requests);
for (j = 0; j < req->u.rw.nr_segments; j++) continue;
gnttab_grant_foreign_access_ref(
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(copy[i].grants_used[j]->pfn),
0);
} }
info->shadow[req->u.rw.id].req = *req; merge_bio.head = copy[i].request->bio;
merge_bio.tail = copy[i].request->biotail;
info->ring.req_prod_pvt++; bio_list_merge(&bio_list, &merge_bio);
copy[i].request->bio = NULL;
blk_put_request(copy[i].request);
} }
kfree(copy); kfree(copy);
/*
* Empty the queue, this is important because we might have
* requests in the queue with more segments than what we
* can handle now.
*/
spin_lock_irq(&info->io_lock);
while ((req = blk_fetch_request(info->rq)) != NULL) {
if (req->cmd_flags &
(REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
list_add(&req->queuelist, &requests);
continue;
}
merge_bio.head = req->bio;
merge_bio.tail = req->biotail;
bio_list_merge(&bio_list, &merge_bio);
req->bio = NULL;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
pr_alert("diskcache flush request found!\n");
__blk_put_request(info->rq, req);
}
spin_unlock_irq(&info->io_lock);
xenbus_switch_state(info->xbdev, XenbusStateConnected); xenbus_switch_state(info->xbdev, XenbusStateConnected);
spin_lock_irq(&info->io_lock); spin_lock_irq(&info->io_lock);
...@@ -1281,14 +1494,50 @@ static int blkif_recover(struct blkfront_info *info) ...@@ -1281,14 +1494,50 @@ static int blkif_recover(struct blkfront_info *info)
/* Now safe for us to use the shared ring */ /* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED; info->connected = BLKIF_STATE_CONNECTED;
/* Send off requeued requests */
flush_requests(info);
/* Kick any other new requests queued since we resumed */ /* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info); kick_pending_request_queues(info);
list_for_each_entry_safe(req, n, &requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
blk_requeue_request(info->rq, req);
}
spin_unlock_irq(&info->io_lock); spin_unlock_irq(&info->io_lock);
while ((bio = bio_list_pop(&bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
if (bio_segments(bio) > segs) {
/*
* This bio has more segments than what we can
* handle, we have to split it.
*/
pending = (bio_segments(bio) + segs - 1) / segs;
split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
BUG_ON(split_bio == NULL);
atomic_set(&split_bio->pending, pending);
split_bio->bio = bio;
for (i = 0; i < pending; i++) {
offset = (i * segs * PAGE_SIZE) >> 9;
size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
(unsigned int)(bio->bi_size >> 9) - offset);
cloned_bio = bio_clone(bio, GFP_NOIO);
BUG_ON(cloned_bio == NULL);
trim_bio(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
submit_bio(cloned_bio->bi_rw, cloned_bio);
}
/*
* Now we have to wait for all those smaller bios to
* end, so we can also end the "parent" bio.
*/
continue;
}
/* We don't need to split this bio */
submit_bio(bio->bi_rw, bio);
}
return 0; return 0;
} }
...@@ -1308,8 +1557,12 @@ static int blkfront_resume(struct xenbus_device *dev) ...@@ -1308,8 +1557,12 @@ static int blkfront_resume(struct xenbus_device *dev)
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = talk_to_blkback(dev, info); err = talk_to_blkback(dev, info);
if (info->connected == BLKIF_STATE_SUSPENDED && !err)
err = blkif_recover(info); /*
* We have to wait for the backend to switch to
* connected state, since we want to read which
* features it supports.
*/
return err; return err;
} }
...@@ -1387,6 +1640,61 @@ static void blkfront_setup_discard(struct blkfront_info *info) ...@@ -1387,6 +1640,61 @@ static void blkfront_setup_discard(struct blkfront_info *info)
kfree(type); kfree(type);
} }
static int blkfront_setup_indirect(struct blkfront_info *info)
{
unsigned int indirect_segments, segs;
int err, i;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"feature-max-indirect-segments", "%u", &indirect_segments,
NULL);
if (err) {
info->max_indirect_segments = 0;
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
} else {
info->max_indirect_segments = min(indirect_segments,
xen_blkif_max_segments);
segs = info->max_indirect_segments;
}
info->sg = kzalloc(sizeof(info->sg[0]) * segs, GFP_KERNEL);
if (info->sg == NULL)
goto out_of_memory;
sg_init_table(info->sg, segs);
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
if (err)
goto out_of_memory;
for (i = 0; i < BLK_RING_SIZE; i++) {
info->shadow[i].grants_used = kzalloc(
sizeof(info->shadow[i].grants_used[0]) * segs,
GFP_NOIO);
if (info->max_indirect_segments)
info->shadow[i].indirect_grants = kzalloc(
sizeof(info->shadow[i].indirect_grants[0]) *
INDIRECT_GREFS(segs),
GFP_NOIO);
if ((info->shadow[i].grants_used == NULL) ||
(info->max_indirect_segments &&
(info->shadow[i].indirect_grants == NULL)))
goto out_of_memory;
}
return 0;
out_of_memory:
kfree(info->sg);
info->sg = NULL;
for (i = 0; i < BLK_RING_SIZE; i++) {
kfree(info->shadow[i].grants_used);
info->shadow[i].grants_used = NULL;
kfree(info->shadow[i].indirect_grants);
info->shadow[i].indirect_grants = NULL;
}
return -ENOMEM;
}
/* /*
* Invoked when the backend is finally 'ready' (and has told produced * Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc). * the details about the physical device - #sectors, size, etc).
...@@ -1414,8 +1722,15 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -1414,8 +1722,15 @@ static void blkfront_connect(struct blkfront_info *info)
set_capacity(info->gd, sectors); set_capacity(info->gd, sectors);
revalidate_disk(info->gd); revalidate_disk(info->gd);
/* fall through */ return;
case BLKIF_STATE_SUSPENDED: case BLKIF_STATE_SUSPENDED:
/*
* If we are recovering from suspension, we need to wait
* for the backend to announce it's features before
* reconnecting, at least we need to know if the backend
* supports indirect descriptors, and how many.
*/
blkif_recover(info);
return; return;
default: default:
...@@ -1483,6 +1798,13 @@ static void blkfront_connect(struct blkfront_info *info) ...@@ -1483,6 +1798,13 @@ static void blkfront_connect(struct blkfront_info *info)
else else
info->feature_persistent = persistent; info->feature_persistent = persistent;
err = blkfront_setup_indirect(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
return;
}
err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
if (err) { if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
......
...@@ -102,6 +102,30 @@ typedef uint64_t blkif_sector_t; ...@@ -102,6 +102,30 @@ typedef uint64_t blkif_sector_t;
*/ */
#define BLKIF_OP_DISCARD 5 #define BLKIF_OP_DISCARD 5
/*
* Recognized if "feature-max-indirect-segments" in present in the backend
* xenbus info. The "feature-max-indirect-segments" node contains the maximum
* number of segments allowed by the backend per request. If the node is
* present, the frontend might use blkif_request_indirect structs in order to
* issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
* maximum number of indirect segments is fixed by the backend, but the
* frontend can issue requests with any number of indirect segments as long as
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
* This pages are filled with an array of blkif_request_segment_aligned
* that hold the information about the segments. The number of indirect
* pages to use is determined by the maximum number of segments
* a indirect request contains. Every indirect page can contain a maximum
* of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
* so to calculate the number of indirect pages to use we have to do
* ceil(indirect_segments/512).
*
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
*/
#define BLKIF_OP_INDIRECT 6
/* /*
* Maximum scatter/gather segments per request. * Maximum scatter/gather segments per request.
* This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE. * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
...@@ -109,6 +133,16 @@ typedef uint64_t blkif_sector_t; ...@@ -109,6 +133,16 @@ typedef uint64_t blkif_sector_t;
*/ */
#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
struct blkif_request_segment_aligned {
grant_ref_t gref; /* reference to I/O buffer frame */
/* @first_sect: first sector in frame to transfer (inclusive). */
/* @last_sect: last sector in frame to transfer (inclusive). */
uint8_t first_sect, last_sect;
uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
} __attribute__((__packed__));
struct blkif_request_rw { struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */ uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */ blkif_vdev_t handle; /* only for read/write requests */
...@@ -147,12 +181,31 @@ struct blkif_request_other { ...@@ -147,12 +181,31 @@ struct blkif_request_other {
uint64_t id; /* private guest value, echoed in resp */ uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
#ifdef CONFIG_X86_64
uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
#endif
uint64_t id;
blkif_sector_t sector_number;
blkif_vdev_t handle;
uint16_t _pad2;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
#ifdef CONFIG_X86_64
uint32_t _pad3; /* make it 64 byte aligned */
#else
uint64_t _pad3; /* make it 64 byte aligned */
#endif
} __attribute__((__packed__));
struct blkif_request { struct blkif_request {
uint8_t operation; /* BLKIF_OP_??? */ uint8_t operation; /* BLKIF_OP_??? */
union { union {
struct blkif_request_rw rw; struct blkif_request_rw rw;
struct blkif_request_discard discard; struct blkif_request_discard discard;
struct blkif_request_other other; struct blkif_request_other other;
struct blkif_request_indirect indirect;
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment