Commit 522a15db authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A smaller collection of fixes that have come up since the initial
  merge window pull request.  This contains:

   - error handling cleanup and support for larger than 16 byte cdbs in
     sg_io() from Christoph.  The latter just matches what bsg and
     friends support, sg_io() got left out in the merge.

   - an option for brd to expose partitions in /proc/partitions.  They
     are hidden by default for compat reasons.  From Dmitry Monakhov.

   - a few blk-mq fixes from me - killing a dead/unused flag, fix for
     merging happening even if turned off, and correction of a few
     comments.

   - removal of unnecessary ->owner setting in systemace.  From Michal
     Simek.

   - two related fixes for a problem with nesting freezing of queues in
     blk-mq.  One from Ming Lei removing an unecessary freeze operation,
     and another from Tejun fixing the nesting regression introduced in
     the merge window.

   - fix for a BUG_ON() at bio_endio time when protection info is
     attached and the IO has an error.  From Sagi Grimberg.

   - two scsi_ioctl bug fixes for regressions with scsi-mq from Tony
     Battersby.

   - a cfq weight update fix and subsequent comment update from Toshiaki
     Makita"

* 'for-linus' of git://git.kernel.dk/linux-block:
  cfq-iosched: Add comments on update timing of weight
  cfq-iosched: Fix wrong children_weight calculation
  block: fix error handling in sg_io
  fix regression in SCSI_IOCTL_SEND_COMMAND
  scsi-mq: fix requests that use a separate CDB buffer
  block: support > 16 byte CDBs for SG_IO
  block: cleanup error handling in sg_io
  brd: add ram disk visibility option
  block: systemace: Remove .owner field for driver
  blk-mq: blk_mq_freeze_queue() should allow nesting
  blk-mq: correct a few wrong/bad comments
  block: Fix BUG_ON when pi errors occur
  blk-mq: don't allow merges if turned off for the queue
  blk-mq: get rid of unused BLK_MQ_F_SHOULD_SORT flag
  blk-mq: fix WARNING "percpu_ref_kill() called more than once!"
parents 9e36c633 7b5af5cf
...@@ -520,7 +520,7 @@ void bio_integrity_endio(struct bio *bio, int error) ...@@ -520,7 +520,7 @@ void bio_integrity_endio(struct bio *bio, int error)
*/ */
if (error) { if (error) {
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
bio_endio(bio, error); bio_endio_nodec(bio, error);
return; return;
} }
......
...@@ -1252,7 +1252,6 @@ void blk_rq_set_block_pc(struct request *rq) ...@@ -1252,7 +1252,6 @@ void blk_rq_set_block_pc(struct request *rq)
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
memset(rq->__cmd, 0, sizeof(rq->__cmd)); memset(rq->__cmd, 0, sizeof(rq->__cmd));
rq->cmd = rq->__cmd;
} }
EXPORT_SYMBOL(blk_rq_set_block_pc); EXPORT_SYMBOL(blk_rq_set_block_pc);
......
...@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) ...@@ -112,18 +112,22 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
*/ */
void blk_mq_freeze_queue(struct request_queue *q) void blk_mq_freeze_queue(struct request_queue *q)
{ {
bool freeze;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
q->mq_freeze_depth++; freeze = !q->mq_freeze_depth++;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
percpu_ref_kill(&q->mq_usage_counter); if (freeze) {
blk_mq_run_queues(q, false); percpu_ref_kill(&q->mq_usage_counter);
blk_mq_run_queues(q, false);
}
wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
} }
static void blk_mq_unfreeze_queue(struct request_queue *q) static void blk_mq_unfreeze_queue(struct request_queue *q)
{ {
bool wake = false; bool wake;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
wake = !--q->mq_freeze_depth; wake = !--q->mq_freeze_depth;
...@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ...@@ -172,6 +176,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
/* tag was already set */ /* tag was already set */
rq->errors = 0; rq->errors = 0;
rq->cmd = rq->__cmd;
rq->extra_len = 0; rq->extra_len = 0;
rq->sense_len = 0; rq->sense_len = 0;
rq->resid_len = 0; rq->resid_len = 0;
...@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) ...@@ -1068,13 +1074,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
blk_account_io_start(rq, 1); blk_account_io_start(rq, 1);
} }
static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
{
return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
!blk_queue_nomerges(hctx->queue);
}
static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx, struct blk_mq_ctx *ctx,
struct request *rq, struct bio *bio) struct request *rq, struct bio *bio)
{ {
struct request_queue *q = hctx->queue; if (!hctx_allow_merges(hctx)) {
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
insert_rq: insert_rq:
...@@ -1082,6 +1092,8 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, ...@@ -1082,6 +1092,8 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
return false; return false;
} else { } else {
struct request_queue *q = hctx->queue;
spin_lock(&ctx->lock); spin_lock(&ctx->lock);
if (!blk_mq_attempt_merge(q, ctx, bio)) { if (!blk_mq_attempt_merge(q, ctx, bio)) {
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
...@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, ...@@ -1574,7 +1586,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q,
hctx->tags = set->tags[i]; hctx->tags = set->tags[i];
/* /*
* Allocate space for all possible cpus to avoid allocation in * Allocate space for all possible cpus to avoid allocation at
* runtime * runtime
*/ */
hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *), hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
...@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q) ...@@ -1662,8 +1674,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
/* /*
* If not software queues are mapped to this hardware queue, * If no software queues are mapped to this hardware queue,
* disable it and free the request entries * disable it and free the request entries.
*/ */
if (!hctx->nr_ctx) { if (!hctx->nr_ctx) {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
...@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) ...@@ -1713,14 +1725,10 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
{ {
struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_tag_set *set = q->tag_set;
blk_mq_freeze_queue(q);
mutex_lock(&set->tag_list_lock); mutex_lock(&set->tag_list_lock);
list_del_init(&q->tag_set_list); list_del_init(&q->tag_set_list);
blk_mq_update_tag_set_depth(set); blk_mq_update_tag_set_depth(set);
mutex_unlock(&set->tag_list_lock); mutex_unlock(&set->tag_list_lock);
blk_mq_unfreeze_queue(q);
} }
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
......
...@@ -1272,15 +1272,22 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) ...@@ -1272,15 +1272,22 @@ __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
rb_insert_color(&cfqg->rb_node, &st->rb); rb_insert_color(&cfqg->rb_node, &st->rb);
} }
/*
* This has to be called only on activation of cfqg
*/
static void static void
cfq_update_group_weight(struct cfq_group *cfqg) cfq_update_group_weight(struct cfq_group *cfqg)
{ {
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
if (cfqg->new_weight) { if (cfqg->new_weight) {
cfqg->weight = cfqg->new_weight; cfqg->weight = cfqg->new_weight;
cfqg->new_weight = 0; cfqg->new_weight = 0;
} }
}
static void
cfq_update_group_leaf_weight(struct cfq_group *cfqg)
{
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
if (cfqg->new_leaf_weight) { if (cfqg->new_leaf_weight) {
cfqg->leaf_weight = cfqg->new_leaf_weight; cfqg->leaf_weight = cfqg->new_leaf_weight;
...@@ -1299,7 +1306,12 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) ...@@ -1299,7 +1306,12 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
/* add to the service tree */ /* add to the service tree */
BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
cfq_update_group_weight(cfqg); /*
* Update leaf_weight. We cannot update weight at this point
* because cfqg might already have been activated and is
* contributing its current weight to the parent's child_weight.
*/
cfq_update_group_leaf_weight(cfqg);
__cfq_group_service_tree_add(st, cfqg); __cfq_group_service_tree_add(st, cfqg);
/* /*
...@@ -1323,6 +1335,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) ...@@ -1323,6 +1335,7 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
*/ */
while ((parent = cfqg_parent(pos))) { while ((parent = cfqg_parent(pos))) {
if (propagate) { if (propagate) {
cfq_update_group_weight(pos);
propagate = !parent->nr_active++; propagate = !parent->nr_active++;
parent->children_weight += pos->weight; parent->children_weight += pos->weight;
} }
......
...@@ -279,7 +279,6 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, ...@@ -279,7 +279,6 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
r = blk_rq_unmap_user(bio); r = blk_rq_unmap_user(bio);
if (!ret) if (!ret)
ret = r; ret = r;
blk_put_request(rq);
return ret; return ret;
} }
...@@ -297,8 +296,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -297,8 +296,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->interface_id != 'S') if (hdr->interface_id != 'S')
return -EINVAL; return -EINVAL;
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO; return -EIO;
...@@ -317,16 +314,23 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -317,16 +314,23 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->flags & SG_FLAG_Q_AT_HEAD) if (hdr->flags & SG_FLAG_Q_AT_HEAD)
at_head = 1; at_head = 1;
ret = -ENOMEM;
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
if (!rq) if (!rq)
return -ENOMEM; goto out;
blk_rq_set_block_pc(rq); blk_rq_set_block_pc(rq);
if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { if (hdr->cmd_len > BLK_MAX_CDB) {
blk_put_request(rq); rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
return -EFAULT; if (!rq->cmd)
goto out_put_request;
} }
ret = -EFAULT;
if (blk_fill_sghdr_rq(q, rq, hdr, mode))
goto out_free_cdb;
ret = 0;
if (hdr->iovec_count) { if (hdr->iovec_count) {
size_t iov_data_len; size_t iov_data_len;
struct iovec *iov = NULL; struct iovec *iov = NULL;
...@@ -335,7 +339,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -335,7 +339,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
0, NULL, &iov); 0, NULL, &iov);
if (ret < 0) { if (ret < 0) {
kfree(iov); kfree(iov);
goto out; goto out_free_cdb;
} }
iov_data_len = ret; iov_data_len = ret;
...@@ -358,7 +362,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -358,7 +362,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
GFP_KERNEL); GFP_KERNEL);
if (ret) if (ret)
goto out; goto out_free_cdb;
bio = rq->bio; bio = rq->bio;
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
...@@ -376,9 +380,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -376,9 +380,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
hdr->duration = jiffies_to_msecs(jiffies - start_time); hdr->duration = jiffies_to_msecs(jiffies - start_time);
return blk_complete_sghdr_rq(rq, hdr, bio); ret = blk_complete_sghdr_rq(rq, hdr, bio);
out:
out_free_cdb:
if (rq->cmd != rq->__cmd)
kfree(rq->cmd);
out_put_request:
blk_put_request(rq); blk_put_request(rq);
out:
return ret; return ret;
} }
...@@ -448,6 +457,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -448,6 +457,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
} }
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
if (!rq) {
err = -ENOMEM;
goto error;
}
blk_rq_set_block_pc(rq);
cmdlen = COMMAND_SIZE(opcode); cmdlen = COMMAND_SIZE(opcode);
...@@ -501,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -501,7 +515,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
rq->sense = sense; rq->sense = sense;
rq->sense_len = 0; rq->sense_len = 0;
blk_rq_set_block_pc(rq);
blk_execute_rq(q, disk, rq, 0); blk_execute_rq(q, disk, rq, 0);
...@@ -521,7 +534,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -521,7 +534,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
error: error:
kfree(buffer); kfree(buffer);
blk_put_request(rq); if (rq)
blk_put_request(rq);
return err; return err;
} }
EXPORT_SYMBOL_GPL(sg_scsi_ioctl); EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
......
...@@ -442,12 +442,15 @@ static int rd_nr; ...@@ -442,12 +442,15 @@ static int rd_nr;
int rd_size = CONFIG_BLK_DEV_RAM_SIZE; int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
static int max_part; static int max_part;
static int part_shift; static int part_shift;
static int part_show = 0;
module_param(rd_nr, int, S_IRUGO); module_param(rd_nr, int, S_IRUGO);
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
module_param(rd_size, int, S_IRUGO); module_param(rd_size, int, S_IRUGO);
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
module_param(max_part, int, S_IRUGO); module_param(max_part, int, S_IRUGO);
MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
module_param(part_show, int, S_IRUGO);
MODULE_PARM_DESC(part_show, "Control RAM disk visibility in /proc/partitions");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
MODULE_ALIAS("rd"); MODULE_ALIAS("rd");
...@@ -501,7 +504,8 @@ static struct brd_device *brd_alloc(int i) ...@@ -501,7 +504,8 @@ static struct brd_device *brd_alloc(int i)
disk->fops = &brd_fops; disk->fops = &brd_fops;
disk->private_data = brd; disk->private_data = brd;
disk->queue = brd->brd_queue; disk->queue = brd->brd_queue;
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; if (!part_show)
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
sprintf(disk->disk_name, "ram%d", i); sprintf(disk->disk_name, "ram%d", i);
set_capacity(disk, rd_size * 2); set_capacity(disk, rd_size * 2);
......
...@@ -1203,7 +1203,6 @@ static struct platform_driver ace_platform_driver = { ...@@ -1203,7 +1203,6 @@ static struct platform_driver ace_platform_driver = {
.probe = ace_probe, .probe = ace_probe,
.remove = ace_remove, .remove = ace_remove,
.driver = { .driver = {
.owner = THIS_MODULE,
.name = "xsysace", .name = "xsysace",
.of_match_table = ace_of_match, .of_match_table = ace_of_match,
}, },
......
...@@ -1808,7 +1808,6 @@ static int scsi_mq_prep_fn(struct request *req) ...@@ -1808,7 +1808,6 @@ static int scsi_mq_prep_fn(struct request *req)
cmd->tag = req->tag; cmd->tag = req->tag;
req->cmd = req->__cmd;
cmd->cmnd = req->cmd; cmd->cmnd = req->cmd;
cmd->prot_op = SCSI_PROT_NORMAL; cmd->prot_op = SCSI_PROT_NORMAL;
......
...@@ -127,10 +127,9 @@ enum { ...@@ -127,10 +127,9 @@ enum {
BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */ BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_SHOULD_SORT = 1 << 1, BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_TAG_SHARED = 1 << 2, BLK_MQ_F_SG_MERGE = 1 << 2,
BLK_MQ_F_SG_MERGE = 1 << 3, BLK_MQ_F_SYSFS_UP = 1 << 3,
BLK_MQ_F_SYSFS_UP = 1 << 4,
BLK_MQ_S_STOPPED = 0, BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_TAG_ACTIVE = 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment