Commit 2a6f71ad authored by Bob Liu's avatar Bob Liu Committed by Konrad Rzeszutek Wilk

xen-blkfront: fix resume issues after a migration

After a migrate to another host (which may not have multiqueue
support), the number of rings (block hardware queues)
may be changed and the ring info structure will also be reallocated.

This patch fixes two related bugs:
 * call blk_mq_update_nr_hw_queues() to make blk-core know the number
   of hardware queues have been changed.
 * Don't store rinfo pointer to hctx->driver_data, because rinfo may be
   reallocated so use hctx->queue_num to get the rinfo structure instead.
Signed-off-by: default avatarBob Liu <bob.liu@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent efd15352
...@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd) const struct blk_mq_queue_data *qd)
{ {
unsigned long flags; unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; int qid = hctx->queue_num;
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
BUG_ON(info->nr_rings <= qid);
rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq); blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags); spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring)) if (RING_FULL(&rinfo->ring))
...@@ -901,20 +905,9 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -901,20 +905,9 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_BUSY;
} }
static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct blkfront_info *info = (struct blkfront_info *)data;
BUG_ON(info->nr_rings <= index);
hctx->driver_data = &info->rinfo[index];
return 0;
}
static struct blk_mq_ops blkfront_mq_ops = { static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq, .queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue, .map_queue = blk_mq_map_queue,
.init_hctx = blk_mq_init_hctx,
}; };
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
...@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, ...@@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return PTR_ERR(rq); return PTR_ERR(rq);
} }
rq->queuedata = info;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) { if (info->feature_discard) {
...@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev) ...@@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev)
return err; return err;
err = talk_to_blkback(dev, info); err = talk_to_blkback(dev, info);
if (!err)
blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
/* /*
* We have to wait for the backend to switch to * We have to wait for the backend to switch to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment