Commit 089bc014 authored by Jan Beulich's avatar Jan Beulich Committed by Konrad Rzeszutek Wilk

xen-blkback: don't leak stack data via response ring

Rather than constructing a local structure instance on the stack, fill
the fields directly on the shared ring, just like other backends do.
Build on the fact that all response structure flavors are actually
identical (the old code did make this assumption too).

This is XSA-216.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent a24fa22c
...@@ -1433,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, ...@@ -1433,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id, static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st) unsigned short op, int st)
{ {
struct blkif_response resp; struct blkif_response *resp;
unsigned long flags; unsigned long flags;
union blkif_back_rings *blk_rings; union blkif_back_rings *blk_rings;
int notify; int notify;
resp.id = id;
resp.operation = op;
resp.status = st;
spin_lock_irqsave(&ring->blk_ring_lock, flags); spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings; blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */ /* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) { switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE: case BLKIF_PROTOCOL_NATIVE:
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->native,
&resp, sizeof(resp)); blk_rings->native.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_32: case BLKIF_PROTOCOL_X86_32:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_32,
&resp, sizeof(resp)); blk_rings->x86_32.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_64: case BLKIF_PROTOCOL_X86_64:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_64,
&resp, sizeof(resp)); blk_rings->x86_64.rsp_prod_pvt);
break; break;
default: default:
BUG(); BUG();
} }
resp->id = id;
resp->operation = op;
resp->status = st;
blk_rings->common.rsp_prod_pvt++; blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags); spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
......
...@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues; ...@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request { struct blkif_common_request {
char dummy; char dummy;
}; };
struct blkif_common_response {
char dummy; /* i386 protocol version */
};
struct blkif_x86_32_request_rw { struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */ uint8_t nr_segments; /* number of segments */
...@@ -129,14 +128,6 @@ struct blkif_x86_32_request { ...@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
/* i386 protocol version */
#pragma pack(push, 4)
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
#pragma pack(pop)
/* x86_64 protocol version */ /* x86_64 protocol version */
struct blkif_x86_64_request_rw { struct blkif_x86_64_request_rw {
...@@ -193,18 +184,12 @@ struct blkif_x86_64_request { ...@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
struct blkif_common_response); struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
struct blkif_x86_32_response); struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
struct blkif_x86_64_response); struct blkif_response);
union blkif_back_rings { union blkif_back_rings {
struct blkif_back_ring native; struct blkif_back_ring native;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment