Commit 0e367ae4 authored by David Vrabel's avatar David Vrabel Committed by Konrad Rzeszutek Wilk

xen/blkback: correctly respond to unknown, non-native requests

If the frontend is using a non-native protocol (e.g., a 64-bit
frontend with a 32-bit backend) and it sent an unrecognized request,
the request was not translated and the response would have the
incorrect ID.  This may cause the frontend driver to behave
incorrectly or crash.

Since the ID field in the request is always in the same place,
regardless of the request type we can get the correct ID and make a
valid response (which will report BLKIF_RSP_EOPNOTSUPP).

This bug affected 64-bit SLES 11 guests when using a 32-bit backend.
This guest does a BLKIF_OP_RESERVED_1 (BLKIF_OP_PACKET in the SLES
source) and would crash in blkif_int() as the ID in the response would
be invalid.
Signed-off-by: default avatarDavid Vrabel <david.vrabel@citrix.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent a72d9002
...@@ -679,6 +679,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif, ...@@ -679,6 +679,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
return err; return err;
} }
static int dispatch_other_io(struct xen_blkif *blkif,
struct blkif_request *req,
struct pending_req *pending_req)
{
free_req(pending_req);
make_response(blkif, req->u.other.id, req->operation,
BLKIF_RSP_EOPNOTSUPP);
return -EIO;
}
static void xen_blk_drain_io(struct xen_blkif *blkif) static void xen_blk_drain_io(struct xen_blkif *blkif)
{ {
atomic_set(&blkif->drain, 1); atomic_set(&blkif->drain, 1);
...@@ -800,17 +810,30 @@ __do_block_io_op(struct xen_blkif *blkif) ...@@ -800,17 +810,30 @@ __do_block_io_op(struct xen_blkif *blkif)
/* Apply all sanity checks to /private copy/ of request. */ /* Apply all sanity checks to /private copy/ of request. */
barrier(); barrier();
if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
switch (req.operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
free_req(pending_req); free_req(pending_req);
if (dispatch_discard_io(blkif, &req)) if (dispatch_discard_io(blkif, &req))
goto done;
break; break;
} else if (dispatch_rw_block_io(blkif, &req, pending_req)) default:
if (dispatch_other_io(blkif, &req, pending_req))
goto done;
break; break;
}
/* Yield point for this unbounded loop. */ /* Yield point for this unbounded loop. */
cond_resched(); cond_resched();
} }
done:
return more_to_do; return more_to_do;
} }
......
...@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard { ...@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
uint64_t nr_sectors; uint64_t nr_sectors;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_32_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2;
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_x86_32_request { struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */ uint8_t operation; /* BLKIF_OP_??? */
union { union {
struct blkif_x86_32_request_rw rw; struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard; struct blkif_x86_32_request_discard discard;
struct blkif_x86_32_request_other other;
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
...@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard { ...@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
uint64_t nr_sectors; uint64_t nr_sectors;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_64_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2;
uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_x86_64_request { struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */ uint8_t operation; /* BLKIF_OP_??? */
union { union {
struct blkif_x86_64_request_rw rw; struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard; struct blkif_x86_64_request_discard discard;
struct blkif_x86_64_request_other other;
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
...@@ -278,6 +293,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, ...@@ -278,6 +293,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
dst->u.discard.nr_sectors = src->u.discard.nr_sectors; dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break; break;
default: default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break; break;
} }
} }
...@@ -309,6 +329,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, ...@@ -309,6 +329,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
dst->u.discard.nr_sectors = src->u.discard.nr_sectors; dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break; break;
default: default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break; break;
} }
} }
......
...@@ -138,11 +138,21 @@ struct blkif_request_discard { ...@@ -138,11 +138,21 @@ struct blkif_request_discard {
uint8_t _pad3; uint8_t _pad3;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2; /* only for read/write requests */
#ifdef CONFIG_X86_64
uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_request { struct blkif_request {
uint8_t operation; /* BLKIF_OP_??? */ uint8_t operation; /* BLKIF_OP_??? */
union { union {
struct blkif_request_rw rw; struct blkif_request_rw rw;
struct blkif_request_discard discard; struct blkif_request_discard discard;
struct blkif_request_other other;
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment