Commit 2609587c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

xen-blkfront: don't use req->errors

xen-blkfron is the last users using rq->errros for passing back error to
blk-mq, and I'd like to get rid of that.  In the longer run the driver
should be moving more of the completion processing into .complete, but
this is the minimal change to move forward for now.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarRoger Pau Monné <roger.pau@citrix.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 4dda4735
...@@ -115,6 +115,15 @@ struct split_bio { ...@@ -115,6 +115,15 @@ struct split_bio {
atomic_t pending; atomic_t pending;
}; };
struct blkif_req {
int error;
};
static inline struct blkif_req *blkif_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
}
static DEFINE_MUTEX(blkfront_mutex); static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops; static const struct block_device_operations xlvbd_block_fops;
...@@ -907,8 +916,14 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -907,8 +916,14 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_BUSY;
} }
static void blkif_complete_rq(struct request *rq)
{
blk_mq_end_request(rq, blkif_req(rq)->error);
}
static const struct blk_mq_ops blkfront_mq_ops = { static const struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq, .queue_rq = blkif_queue_rq,
.complete = blkif_complete_rq,
}; };
static void blkif_set_queue_limits(struct blkfront_info *info) static void blkif_set_queue_limits(struct blkfront_info *info)
...@@ -969,7 +984,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, ...@@ -969,7 +984,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
info->tag_set.queue_depth = BLK_RING_SIZE(info); info->tag_set.queue_depth = BLK_RING_SIZE(info);
info->tag_set.numa_node = NUMA_NO_NODE; info->tag_set.numa_node = NUMA_NO_NODE;
info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
info->tag_set.cmd_size = 0; info->tag_set.cmd_size = sizeof(struct blkif_req);
info->tag_set.driver_data = info; info->tag_set.driver_data = info;
if (blk_mq_alloc_tag_set(&info->tag_set)) if (blk_mq_alloc_tag_set(&info->tag_set))
...@@ -1543,7 +1558,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -1543,7 +1558,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long flags; unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id; struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
struct blkfront_info *info = rinfo->dev_info; struct blkfront_info *info = rinfo->dev_info;
int error;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -1587,37 +1601,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -1587,37 +1601,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue; continue;
} }
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; blkif_req(req)->error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) { switch (bret->operation) {
case BLKIF_OP_DISCARD: case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq; struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n", printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation)); info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP; blkif_req(req)->error = -EOPNOTSUPP;
info->feature_discard = 0; info->feature_discard = 0;
info->feature_secdiscard = 0; info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECERASE, rq); queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
} }
blk_mq_complete_request(req, error);
break; break;
case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n", printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation)); info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP; blkif_req(req)->error = -EOPNOTSUPP;
} }
if (unlikely(bret->status == BLKIF_RSP_ERROR && if (unlikely(bret->status == BLKIF_RSP_ERROR &&
rinfo->shadow[id].req.u.rw.nr_segments == 0)) { rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation)); info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP; blkif_req(req)->error = -EOPNOTSUPP;
} }
if (unlikely(error)) { if (unlikely(blkif_req(req)->error)) {
if (error == -EOPNOTSUPP) if (blkif_req(req)->error == -EOPNOTSUPP)
error = 0; blkif_req(req)->error = 0;
info->feature_fua = 0; info->feature_fua = 0;
info->feature_flush = 0; info->feature_flush = 0;
xlvbd_flush(info); xlvbd_flush(info);
...@@ -1629,11 +1642,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) ...@@ -1629,11 +1642,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status); "request: %x\n", bret->status);
blk_mq_complete_request(req, error);
break; break;
default: default:
BUG(); BUG();
} }
blk_mq_complete_request(req, 0);
} }
rinfo->ring.rsp_cons = i; rinfo->ring.rsp_cons = i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment