Commit 68cf8d0c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.12/core' of git://git.kernel.dk/linux-block

Pull block IO fixes from Jens Axboe:
 "After merge window, no new stuff this time only a collection of neatly
  confined and simple fixes"

* 'for-3.12/core' of git://git.kernel.dk/linux-block:
  cfq: explicitly use 64bit divide operation for 64bit arguments
  block: Add nr_bios to block_rq_remap tracepoint
  If the queue is dying then we only call the rq->end_io callout. This leaves bios setup on the request, because the caller assumes when the blk_execute_rq_nowait/blk_execute_rq call has completed that the rq->bios have been cleaned up.
  bio-integrity: Fix use of bs->bio_integrity_pool after free
  blkcg: relocate root_blkg setting and clearing
  block: Convert kmalloc_node(...GFP_ZERO...) to kzalloc_node(...)
  block: trace all devices plug operation
parents 0fbf2cc9 f3cff25f
...@@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, ...@@ -235,8 +235,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg->online = true; blkg->online = true;
spin_unlock(&blkcg->lock); spin_unlock(&blkcg->lock);
if (!ret) if (!ret) {
if (blkcg == &blkcg_root) {
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
}
return blkg; return blkg;
}
/* @blkg failed fully initialized, use the usual release path */ /* @blkg failed fully initialized, use the usual release path */
blkg_put(blkg); blkg_put(blkg);
...@@ -334,6 +339,15 @@ static void blkg_destroy(struct blkcg_gq *blkg) ...@@ -334,6 +339,15 @@ static void blkg_destroy(struct blkcg_gq *blkg)
if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL); rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
* If root blkg is destroyed. Just clear the pointer since root_rl
* does not take reference on root blkg.
*/
if (blkcg == &blkcg_root) {
blkg->q->root_blkg = NULL;
blkg->q->root_rl.blkg = NULL;
}
/* /*
* Put the reference taken at the time of creation so that when all * Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed. * queues are gone, group can be destroyed.
...@@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q) ...@@ -360,13 +374,6 @@ static void blkg_destroy_all(struct request_queue *q)
blkg_destroy(blkg); blkg_destroy(blkg);
spin_unlock(&blkcg->lock); spin_unlock(&blkcg->lock);
} }
/*
* root blkg is destroyed. Just clear the pointer since
* root_rl does not take reference on root blkg.
*/
q->root_blkg = NULL;
q->root_rl.blkg = NULL;
} }
/* /*
...@@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -970,8 +977,6 @@ int blkcg_activate_policy(struct request_queue *q,
ret = PTR_ERR(blkg); ret = PTR_ERR(blkg);
goto out_unlock; goto out_unlock;
} }
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
list_for_each_entry(blkg, &q->blkg_list, q_node) list_for_each_entry(blkg, &q->blkg_list, q_node)
cnt++; cnt++;
......
...@@ -1549,11 +1549,9 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1549,11 +1549,9 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
if (plug) { if (plug) {
/* /*
* If this is the first request added after a plug, fire * If this is the first request added after a plug, fire
* of a plug trace. If others have been added before, check * of a plug trace.
* if we have multiple devices in this plug. If so, make a
* note to sort the list before dispatch.
*/ */
if (list_empty(&plug->list)) if (!request_count)
trace_block_plug(q); trace_block_plug(q);
else { else {
if (request_count >= BLK_MAX_REQUEST_COUNT) { if (request_count >= BLK_MAX_REQUEST_COUNT) {
......
...@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -68,9 +68,9 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) { if (unlikely(blk_queue_dying(q))) {
rq->cmd_flags |= REQ_QUIET;
rq->errors = -ENXIO; rq->errors = -ENXIO;
if (rq->end_io) __blk_end_request_all(rq, rq->errors);
rq->end_io(rq, rq->errors);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return; return;
} }
......
...@@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, ...@@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
if (samples) { if (samples) {
v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
do_div(v, samples); v = div64_u64(v, samples);
} }
__blkg_prfill_u64(sf, pd, v); __blkg_prfill_u64(sf, pd, v);
return 0; return 0;
...@@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) ...@@ -4358,7 +4358,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
if (!eq) if (!eq)
return -ENOMEM; return -ENOMEM;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
if (!cfqd) { if (!cfqd) {
kobject_put(&eq->kobj); kobject_put(&eq->kobj);
return -ENOMEM; return -ENOMEM;
......
...@@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e) ...@@ -346,7 +346,7 @@ static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
if (!eq) if (!eq)
return -ENOMEM; return -ENOMEM;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd) { if (!dd) {
kobject_put(&eq->kobj); kobject_put(&eq->kobj);
return -ENOMEM; return -ENOMEM;
......
...@@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, ...@@ -155,7 +155,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
{ {
struct elevator_queue *eq; struct elevator_queue *eq;
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq)) if (unlikely(!eq))
goto err; goto err;
......
...@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id) ...@@ -1252,8 +1252,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
{ {
struct gendisk *disk; struct gendisk *disk;
disk = kmalloc_node(sizeof(struct gendisk), disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
GFP_KERNEL | __GFP_ZERO, node_id);
if (disk) { if (disk) {
if (!init_part_stats(&disk->part0)) { if (!init_part_stats(&disk->part0)) {
kfree(disk); kfree(disk);
......
...@@ -735,7 +735,7 @@ void bioset_integrity_free(struct bio_set *bs) ...@@ -735,7 +735,7 @@ void bioset_integrity_free(struct bio_set *bs)
mempool_destroy(bs->bio_integrity_pool); mempool_destroy(bs->bio_integrity_pool);
if (bs->bvec_integrity_pool) if (bs->bvec_integrity_pool)
mempool_destroy(bs->bio_integrity_pool); mempool_destroy(bs->bvec_integrity_pool);
} }
EXPORT_SYMBOL(bioset_integrity_free); EXPORT_SYMBOL(bioset_integrity_free);
......
...@@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) ...@@ -862,6 +862,17 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
return blk_queue_get_max_sectors(q, rq->cmd_flags); return blk_queue_get_max_sectors(q, rq->cmd_flags);
} }
static inline unsigned int blk_rq_count_bios(struct request *rq)
{
unsigned int nr_bios = 0;
struct bio *bio;
__rq_for_each_bio(bio, rq)
nr_bios++;
return nr_bios;
}
/* /*
* Request issue related functions. * Request issue related functions.
*/ */
......
...@@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap, ...@@ -618,6 +618,7 @@ TRACE_EVENT(block_rq_remap,
__field( unsigned int, nr_sector ) __field( unsigned int, nr_sector )
__field( dev_t, old_dev ) __field( dev_t, old_dev )
__field( sector_t, old_sector ) __field( sector_t, old_sector )
__field( unsigned int, nr_bios )
__array( char, rwbs, RWBS_LEN) __array( char, rwbs, RWBS_LEN)
), ),
...@@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap, ...@@ -627,15 +628,16 @@ TRACE_EVENT(block_rq_remap,
__entry->nr_sector = blk_rq_sectors(rq); __entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev; __entry->old_dev = dev;
__entry->old_sector = from; __entry->old_sector = from;
__entry->nr_bios = blk_rq_count_bios(rq);
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
), ),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector, (unsigned long long)__entry->sector,
__entry->nr_sector, __entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev), MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector) (unsigned long long)__entry->old_sector, __entry->nr_bios)
); );
#endif /* _TRACE_BLOCK_H */ #endif /* _TRACE_BLOCK_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment