Commit 8f5a695d authored by Juergen Gross's avatar Juergen Gross

xen/blkfront: don't take local copy of a request from the ring page

In order to avoid a malicious backend being able to influence the local
copy of a request build the request locally first and then copy it to
the ring page instead of doing it the other way round as today.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Acked-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Link: https://lore.kernel.org/r/20210730103854.12681-3-jgross@suse.comSigned-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 71b66243
...@@ -533,7 +533,7 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, ...@@ -533,7 +533,7 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
rinfo->shadow[id].status = REQ_WAITING; rinfo->shadow[id].status = REQ_WAITING;
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
(*ring_req)->u.rw.id = id; rinfo->shadow[id].req.u.rw.id = id;
return id; return id;
} }
...@@ -541,11 +541,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, ...@@ -541,11 +541,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo) static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
{ {
struct blkfront_info *info = rinfo->dev_info; struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req; struct blkif_request *ring_req, *final_ring_req;
unsigned long id; unsigned long id;
/* Fill out a communications ring structure. */ /* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req); id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
ring_req->operation = BLKIF_OP_DISCARD; ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req); ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
...@@ -556,8 +557,8 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf ...@@ -556,8 +557,8 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
else else
ring_req->u.discard.flag = 0; ring_req->u.discard.flag = 0;
/* Keep a private copy so we can reissue requests when recovering. */ /* Copy the request to the ring page. */
rinfo->shadow[id].req = *ring_req; *final_ring_req = *ring_req;
return 0; return 0;
} }
...@@ -690,6 +691,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ...@@ -690,6 +691,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
{ {
struct blkfront_info *info = rinfo->dev_info; struct blkfront_info *info = rinfo->dev_info;
struct blkif_request *ring_req, *extra_ring_req = NULL; struct blkif_request *ring_req, *extra_ring_req = NULL;
struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
unsigned long id, extra_id = NO_ASSOCIATED_ID; unsigned long id, extra_id = NO_ASSOCIATED_ID;
bool require_extra_req = false; bool require_extra_req = false;
int i; int i;
...@@ -734,7 +736,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ...@@ -734,7 +736,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
} }
/* Fill out a communications ring structure. */ /* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req); id = blkif_ring_get_request(rinfo, req, &final_ring_req);
ring_req = &rinfo->shadow[id].req;
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
num_grant = 0; num_grant = 0;
...@@ -785,7 +788,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ...@@ -785,7 +788,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
ring_req->u.rw.nr_segments = num_grant; ring_req->u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) { if (unlikely(require_extra_req)) {
extra_id = blkif_ring_get_request(rinfo, req, extra_id = blkif_ring_get_request(rinfo, req,
&extra_ring_req); &final_extra_ring_req);
extra_ring_req = &rinfo->shadow[extra_id].req;
/* /*
* Only the first request contains the scatter-gather * Only the first request contains the scatter-gather
* list. * list.
...@@ -827,10 +832,10 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri ...@@ -827,10 +832,10 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (setup.segments) if (setup.segments)
kunmap_atomic(setup.segments); kunmap_atomic(setup.segments);
/* Keep a private copy so we can reissue requests when recovering. */ /* Copy request(s) to the ring page. */
rinfo->shadow[id].req = *ring_req; *final_ring_req = *ring_req;
if (unlikely(require_extra_req)) if (unlikely(require_extra_req))
rinfo->shadow[extra_id].req = *extra_ring_req; *final_extra_ring_req = *extra_ring_req;
if (new_persistent_gnts) if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head); gnttab_free_grant_references(setup.gref_head);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment