Commit 6684fa1c authored by Julien Grall's avatar Julien Grall Committed by David Vrabel

block/xen-blkback: s/nr_pages/nr_segs/

Make the code less confusing to read now that Linux may not have the
same page size as Xen.
Signed-off-by: default avatarJulien Grall <julien.grall@citrix.com>
Acked-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
parent ee4b7179
...@@ -729,7 +729,7 @@ static void xen_blkbk_unmap_and_respond(struct pending_req *req) ...@@ -729,7 +729,7 @@ static void xen_blkbk_unmap_and_respond(struct pending_req *req)
struct grant_page **pages = req->segments; struct grant_page **pages = req->segments;
unsigned int invcount; unsigned int invcount;
invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages, invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
req->unmap, req->unmap_pages); req->unmap, req->unmap_pages);
work->data = req; work->data = req;
...@@ -915,7 +915,7 @@ static int xen_blkbk_map_seg(struct pending_req *pending_req) ...@@ -915,7 +915,7 @@ static int xen_blkbk_map_seg(struct pending_req *pending_req)
int rc; int rc;
rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
pending_req->nr_pages, pending_req->nr_segs,
(pending_req->operation != BLKIF_OP_READ)); (pending_req->operation != BLKIF_OP_READ));
return rc; return rc;
...@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, ...@@ -931,7 +931,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
int indirect_grefs, rc, n, nseg, i; int indirect_grefs, rc, n, nseg, i;
struct blkif_request_segment *segments = NULL; struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages; nseg = pending_req->nr_segs;
indirect_grefs = INDIRECT_PAGES(nseg); indirect_grefs = INDIRECT_PAGES(nseg);
BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
...@@ -1251,7 +1251,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1251,7 +1251,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
pending_req->id = req->u.rw.id; pending_req->id = req->u.rw.id;
pending_req->operation = req_operation; pending_req->operation = req_operation;
pending_req->status = BLKIF_RSP_OKAY; pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg; pending_req->nr_segs = nseg;
if (req->operation != BLKIF_OP_INDIRECT) { if (req->operation != BLKIF_OP_INDIRECT) {
preq.dev = req->u.rw.handle; preq.dev = req->u.rw.handle;
...@@ -1372,7 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, ...@@ -1372,7 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
fail_flush: fail_flush:
xen_blkbk_unmap(blkif, pending_req->segments, xen_blkbk_unmap(blkif, pending_req->segments,
pending_req->nr_pages); pending_req->nr_segs);
fail_response: fail_response:
/* Haven't submitted any bio's yet. */ /* Haven't submitted any bio's yet. */
make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
......
...@@ -343,7 +343,7 @@ struct grant_page { ...@@ -343,7 +343,7 @@ struct grant_page {
struct pending_req { struct pending_req {
struct xen_blkif *blkif; struct xen_blkif *blkif;
u64 id; u64 id;
int nr_pages; int nr_segs;
atomic_t pendcnt; atomic_t pendcnt;
unsigned short operation; unsigned short operation;
int status; int status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment