Commit 33204663 authored by Julien Grall's avatar Julien Grall Committed by David Vrabel

block/xen-blkfront: Split blkif_queue_request in 2

Currently, blkif_queue_request has 2 distinct execution path:
    - Send a discard request
    - Send a read/write request

The function is also allocating grants to use for generating the
request. Although, this is only used for read/write request.

Rather than having a function with 2 distinct execution path, separate
the function in 2. This will also remove one level of tabulation.
Signed-off-by: default avatarJulien Grall <julien.grall@citrix.com>
Reviewed-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent 3922f32c
......@@ -394,13 +394,35 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
/*
* Generate a Xen blkfront IO request from a blk layer request. Reads
* and writes are handled as expected.
*
* @req: a request struct
*/
static int blkif_queue_request(struct request *req)
static int blkif_queue_discard_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
struct blkif_request *ring_req;
unsigned long id;
/* Fill out a communications ring structure. */
ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = get_id_from_freelist(info);
info->shadow[id].request = req;
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
info->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
info->shadow[id].req = *ring_req;
return 0;
}
static int blkif_queue_rw_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
struct blkif_request *ring_req;
......@@ -420,9 +442,6 @@ static int blkif_queue_request(struct request *req)
struct scatterlist *sg;
int nseg, max_grefs;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
max_grefs = req->nr_phys_segments;
if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
/*
......@@ -452,16 +471,6 @@ static int blkif_queue_request(struct request *req)
id = get_id_from_freelist(info);
info->shadow[id].request = req;
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
ring_req->u.discard.id = id;
ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
} else {
BUG_ON(info->max_indirect_segments == 0 &&
req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
BUG_ON(info->max_indirect_segments &&
......@@ -487,11 +496,11 @@ static int blkif_queue_request(struct request *req)
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
/*
* Ideally we can do an unordered flush-to-disk. In case the
* backend onlysupports barriers, use that. A barrier request
* a superset of FUA, so we can implement it the same
* way. (It's also a FLUSH+FUA, since it is
* guaranteed ordered WRT previous writes.)
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
* A barrier request a superset of FUA, so we can
* implement it the same way. (It's also a FLUSH+FUA,
* since it is guaranteed ordered WRT previous writes.)
*/
switch (info->feature_flush &
((REQ_FLUSH|REQ_FUA))) {
......@@ -524,7 +533,10 @@ static int blkif_queue_request(struct request *req)
if (!info->feature_persistent) {
struct page *indirect_page;
/* Fetch a pre-allocated page to use for indirect grefs */
/*
* Fetch a pre-allocated page to use for
* indirect grefs
*/
BUG_ON(list_empty(&info->indirect_pages));
indirect_page = list_first_entry(&info->indirect_pages,
struct page, lru);
......@@ -584,7 +596,6 @@ static int blkif_queue_request(struct request *req)
}
if (segments)
kunmap_atomic(segments);
}
info->ring.req_prod_pvt++;
......@@ -597,6 +608,24 @@ static int blkif_queue_request(struct request *req)
return 0;
}
/*
* Generate a Xen blkfront IO request from a blk layer request. Reads
* and writes are handled as expected.
*
* @req: a request struct
*/
static int blkif_queue_request(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
return blkif_queue_discard_req(req);
else
return blkif_queue_rw_req(req);
}
static inline void flush_requests(struct blkfront_info *info)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment