Commit e37459b8 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'blk-mq/core' into for-3.13/core

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>

Conflicts:
	block/blk-timeout.c
parents c7d1ba41 e7e24500
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o \ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
partition-generic.o partitions/ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o partitions/
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
...@@ -48,7 +49,7 @@ DEFINE_IDA(blk_queue_ida); ...@@ -48,7 +49,7 @@ DEFINE_IDA(blk_queue_ida);
/* /*
* For the allocated request tables * For the allocated request tables
*/ */
static struct kmem_cache *request_cachep; struct kmem_cache *request_cachep = NULL;
/* /*
* For queue allocation * For queue allocation
...@@ -60,42 +61,6 @@ struct kmem_cache *blk_requestq_cachep; ...@@ -60,42 +61,6 @@ struct kmem_cache *blk_requestq_cachep;
*/ */
static struct workqueue_struct *kblockd_workqueue; static struct workqueue_struct *kblockd_workqueue;
static void drive_stat_acct(struct request *rq, int new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
int cpu;
if (!blk_do_io_stat(rq))
return;
cpu = part_stat_lock();
if (!new_io) {
part = rq->part;
part_stat_inc(cpu, part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
/*
* The partition is already being removed,
* the request will be accounted on the disk only
*
* We take a reference on disk->part0 although that
* partition will never be deleted, so we can treat
* it as any other partition.
*/
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
part_round_stats(cpu, part);
part_inc_in_flight(part, rw);
rq->part = part;
}
part_stat_unlock();
}
void blk_queue_congestion_threshold(struct request_queue *q) void blk_queue_congestion_threshold(struct request_queue *q)
{ {
int nr; int nr;
...@@ -145,7 +110,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq) ...@@ -145,7 +110,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->cmd = rq->__cmd; rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB; rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1; rq->tag = -1;
rq->ref_count = 1;
rq->start_time = jiffies; rq->start_time = jiffies;
set_start_time_ns(rq); set_start_time_ns(rq);
rq->part = NULL; rq->part = NULL;
...@@ -174,9 +138,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg) ...@@ -174,9 +138,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{ {
int bit; int bit;
printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq->cmd_flags); (unsigned long long) rq->cmd_flags);
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq), (unsigned long long)blk_rq_pos(rq),
...@@ -595,9 +559,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -595,9 +559,12 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q) if (!q)
return NULL; return NULL;
if (percpu_counter_init(&q->mq_usage_counter, 0))
goto fail_q;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0) if (q->id < 0)
goto fail_q; goto fail_c;
q->backing_dev_info.ra_pages = q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
...@@ -644,6 +611,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -644,6 +611,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1; q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
init_waitqueue_head(&q->mq_freeze_wq);
if (blkcg_init_queue(q)) if (blkcg_init_queue(q))
goto fail_bdi; goto fail_bdi;
...@@ -653,6 +622,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -653,6 +622,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
fail_id: fail_id:
ida_simple_remove(&blk_queue_ida, q->id); ida_simple_remove(&blk_queue_ida, q->id);
fail_c:
percpu_counter_destroy(&q->mq_usage_counter);
fail_q: fail_q:
kmem_cache_free(blk_requestq_cachep, q); kmem_cache_free(blk_requestq_cachep, q);
return NULL; return NULL;
...@@ -1119,7 +1090,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -1119,7 +1090,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
goto retry; goto retry;
} }
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) static struct request *blk_old_get_request(struct request_queue *q, int rw,
gfp_t gfp_mask)
{ {
struct request *rq; struct request *rq;
...@@ -1136,6 +1108,14 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) ...@@ -1136,6 +1108,14 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
return rq; return rq;
} }
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
return blk_mq_alloc_request(q, rw, gfp_mask, false);
else
return blk_old_get_request(q, rw, gfp_mask);
}
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
/** /**
...@@ -1221,7 +1201,7 @@ EXPORT_SYMBOL(blk_requeue_request); ...@@ -1221,7 +1201,7 @@ EXPORT_SYMBOL(blk_requeue_request);
static void add_acct_request(struct request_queue *q, struct request *rq, static void add_acct_request(struct request_queue *q, struct request *rq,
int where) int where)
{ {
drive_stat_acct(rq, 1); blk_account_io_start(rq, true);
__elv_add_request(q, rq, where); __elv_add_request(q, rq, where);
} }
...@@ -1282,8 +1262,6 @@ void __blk_put_request(struct request_queue *q, struct request *req) ...@@ -1282,8 +1262,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
{ {
if (unlikely(!q)) if (unlikely(!q))
return; return;
if (unlikely(--req->ref_count))
return;
blk_pm_put_request(req); blk_pm_put_request(req);
...@@ -1312,12 +1290,17 @@ EXPORT_SYMBOL_GPL(__blk_put_request); ...@@ -1312,12 +1290,17 @@ EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req) void blk_put_request(struct request *req)
{ {
unsigned long flags;
struct request_queue *q = req->q; struct request_queue *q = req->q;
spin_lock_irqsave(q->queue_lock, flags); if (q->mq_ops)
__blk_put_request(q, req); blk_mq_free_request(req);
spin_unlock_irqrestore(q->queue_lock, flags); else {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__blk_put_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
}
} }
EXPORT_SYMBOL(blk_put_request); EXPORT_SYMBOL(blk_put_request);
...@@ -1353,8 +1336,8 @@ void blk_add_request_payload(struct request *rq, struct page *page, ...@@ -1353,8 +1336,8 @@ void blk_add_request_payload(struct request *rq, struct page *page,
} }
EXPORT_SYMBOL_GPL(blk_add_request_payload); EXPORT_SYMBOL_GPL(blk_add_request_payload);
static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio) struct bio *bio)
{ {
const int ff = bio->bi_rw & REQ_FAILFAST_MASK; const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
...@@ -1371,12 +1354,12 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req, ...@@ -1371,12 +1354,12 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->__data_len += bio->bi_size; req->__data_len += bio->bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0); blk_account_io_start(req, false);
return true; return true;
} }
static bool bio_attempt_front_merge(struct request_queue *q, bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct request *req, struct bio *bio) struct bio *bio)
{ {
const int ff = bio->bi_rw & REQ_FAILFAST_MASK; const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
...@@ -1401,12 +1384,12 @@ static bool bio_attempt_front_merge(struct request_queue *q, ...@@ -1401,12 +1384,12 @@ static bool bio_attempt_front_merge(struct request_queue *q,
req->__data_len += bio->bi_size; req->__data_len += bio->bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0); blk_account_io_start(req, false);
return true; return true;
} }
/** /**
* attempt_plug_merge - try to merge with %current's plugged list * blk_attempt_plug_merge - try to merge with %current's plugged list
* @q: request_queue new bio is being queued at * @q: request_queue new bio is being queued at
* @bio: new bio being queued * @bio: new bio being queued
* @request_count: out parameter for number of traversed plugged requests * @request_count: out parameter for number of traversed plugged requests
...@@ -1422,12 +1405,13 @@ static bool bio_attempt_front_merge(struct request_queue *q, ...@@ -1422,12 +1405,13 @@ static bool bio_attempt_front_merge(struct request_queue *q,
* reliable access to the elevator outside queue lock. Only check basic * reliable access to the elevator outside queue lock. Only check basic
* merging parameters without querying the elevator. * merging parameters without querying the elevator.
*/ */
static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count) unsigned int *request_count)
{ {
struct blk_plug *plug; struct blk_plug *plug;
struct request *rq; struct request *rq;
bool ret = false; bool ret = false;
struct list_head *plug_list;
if (blk_queue_nomerges(q)) if (blk_queue_nomerges(q))
goto out; goto out;
...@@ -1437,7 +1421,12 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, ...@@ -1437,7 +1421,12 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
goto out; goto out;
*request_count = 0; *request_count = 0;
list_for_each_entry_reverse(rq, &plug->list, queuelist) { if (q->mq_ops)
plug_list = &plug->mq_list;
else
plug_list = &plug->list;
list_for_each_entry_reverse(rq, plug_list, queuelist) {
int el_ret; int el_ret;
if (rq->q == q) if (rq->q == q)
...@@ -1505,7 +1494,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1505,7 +1494,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing * Check if we can merge with the plugged list before grabbing
* any locks. * any locks.
*/ */
if (attempt_plug_merge(q, bio, &request_count)) if (blk_attempt_plug_merge(q, bio, &request_count))
return; return;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
...@@ -1573,7 +1562,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1573,7 +1562,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
} }
} }
list_add_tail(&req->queuelist, &plug->list); list_add_tail(&req->queuelist, &plug->list);
drive_stat_acct(req, 1); blk_account_io_start(req, true);
} else { } else {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where); add_acct_request(q, req, where);
...@@ -2027,7 +2016,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) ...@@ -2027,7 +2016,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
} }
EXPORT_SYMBOL_GPL(blk_rq_err_bytes); EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
static void blk_account_io_completion(struct request *req, unsigned int bytes) void blk_account_io_completion(struct request *req, unsigned int bytes)
{ {
if (blk_do_io_stat(req)) { if (blk_do_io_stat(req)) {
const int rw = rq_data_dir(req); const int rw = rq_data_dir(req);
...@@ -2041,7 +2030,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) ...@@ -2041,7 +2030,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
} }
} }
static void blk_account_io_done(struct request *req) void blk_account_io_done(struct request *req)
{ {
/* /*
* Account IO completion. flush_rq isn't accounted as a * Account IO completion. flush_rq isn't accounted as a
...@@ -2089,6 +2078,42 @@ static inline struct request *blk_pm_peek_request(struct request_queue *q, ...@@ -2089,6 +2078,42 @@ static inline struct request *blk_pm_peek_request(struct request_queue *q,
} }
#endif #endif
void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
int cpu;
if (!blk_do_io_stat(rq))
return;
cpu = part_stat_lock();
if (!new_io) {
part = rq->part;
part_stat_inc(cpu, part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
/*
* The partition is already being removed,
* the request will be accounted on the disk only
*
* We take a reference on disk->part0 although that
* partition will never be deleted, so we can treat
* it as any other partition.
*/
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
part_round_stats(cpu, part);
part_inc_in_flight(part, rw);
rq->part = part;
}
part_stat_unlock();
}
/** /**
* blk_peek_request - peek at the top of a request queue * blk_peek_request - peek at the top of a request queue
* @q: request queue to peek at * @q: request queue to peek at
...@@ -2465,7 +2490,6 @@ static void blk_finish_request(struct request *req, int error) ...@@ -2465,7 +2490,6 @@ static void blk_finish_request(struct request *req, int error)
if (req->cmd_flags & REQ_DONTPREP) if (req->cmd_flags & REQ_DONTPREP)
blk_unprep_request(req); blk_unprep_request(req);
blk_account_io_done(req); blk_account_io_done(req);
if (req->end_io) if (req->end_io)
...@@ -2887,6 +2911,7 @@ void blk_start_plug(struct blk_plug *plug) ...@@ -2887,6 +2911,7 @@ void blk_start_plug(struct blk_plug *plug)
plug->magic = PLUG_MAGIC; plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list); INIT_LIST_HEAD(&plug->cb_list);
/* /*
...@@ -2984,6 +3009,10 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -2984,6 +3009,10 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(plug->magic != PLUG_MAGIC); BUG_ON(plug->magic != PLUG_MAGIC);
flush_plug_callbacks(plug, from_schedule); flush_plug_callbacks(plug, from_schedule);
if (!list_empty(&plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
if (list_empty(&plug->list)) if (list_empty(&plug->list))
return; return;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include "blk.h" #include "blk.h"
...@@ -24,7 +25,6 @@ static void blk_end_sync_rq(struct request *rq, int error) ...@@ -24,7 +25,6 @@ static void blk_end_sync_rq(struct request *rq, int error)
struct completion *waiting = rq->end_io_data; struct completion *waiting = rq->end_io_data;
rq->end_io_data = NULL; rq->end_io_data = NULL;
__blk_put_request(rq->q, rq);
/* /*
* complete last, if this is a stack request the process (and thus * complete last, if this is a stack request the process (and thus
...@@ -59,6 +59,12 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -59,6 +59,12 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq->rq_disk = bd_disk; rq->rq_disk = bd_disk;
rq->end_io = done; rq->end_io = done;
if (q->mq_ops) {
blk_mq_insert_request(q, rq, true);
return;
}
/* /*
* need to check this before __blk_run_queue(), because rq can * need to check this before __blk_run_queue(), because rq can
* be freed before that returns. * be freed before that returns.
...@@ -103,12 +109,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, ...@@ -103,12 +109,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
int err = 0; int err = 0;
unsigned long hang_check; unsigned long hang_check;
/*
* we need an extra reference to the request, so we can look at
* it after io completion
*/
rq->ref_count++;
if (!rq->sense) { if (!rq->sense) {
memset(sense, 0, sizeof(sense)); memset(sense, 0, sizeof(sense));
rq->sense = sense; rq->sense = sense;
......
...@@ -69,8 +69,10 @@ ...@@ -69,8 +69,10 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/blk-mq.h>
#include "blk.h" #include "blk.h"
#include "blk-mq.h"
/* FLUSH/FUA sequences */ /* FLUSH/FUA sequences */
enum { enum {
...@@ -124,6 +126,24 @@ static void blk_flush_restore_request(struct request *rq) ...@@ -124,6 +126,24 @@ static void blk_flush_restore_request(struct request *rq)
/* make @rq a normal request */ /* make @rq a normal request */
rq->cmd_flags &= ~REQ_FLUSH_SEQ; rq->cmd_flags &= ~REQ_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io; rq->end_io = rq->flush.saved_end_io;
blk_clear_rq_complete(rq);
}
static void mq_flush_data_run(struct work_struct *work)
{
struct request *rq;
rq = container_of(work, struct request, mq_flush_data);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
}
static void blk_mq_flush_data_insert(struct request *rq)
{
INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_data);
} }
/** /**
...@@ -136,7 +156,7 @@ static void blk_flush_restore_request(struct request *rq) ...@@ -136,7 +156,7 @@ static void blk_flush_restore_request(struct request *rq)
* completion and trigger the next step. * completion and trigger the next step.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(q->queue_lock) * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
* *
* RETURNS: * RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise. * %true if requests were added to the dispatch queue, %false otherwise.
...@@ -146,7 +166,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, ...@@ -146,7 +166,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; struct list_head *pending = &q->flush_queue[q->flush_pending_idx];
bool queued = false; bool queued = false, kicked;
BUG_ON(rq->flush.seq & seq); BUG_ON(rq->flush.seq & seq);
rq->flush.seq |= seq; rq->flush.seq |= seq;
...@@ -167,8 +187,12 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, ...@@ -167,8 +187,12 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA: case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight); list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
list_add(&rq->queuelist, &q->queue_head); if (q->mq_ops)
queued = true; blk_mq_flush_data_insert(rq);
else {
list_add(&rq->queuelist, &q->queue_head);
queued = true;
}
break; break;
case REQ_FSEQ_DONE: case REQ_FSEQ_DONE:
...@@ -181,28 +205,43 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, ...@@ -181,28 +205,43 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
BUG_ON(!list_empty(&rq->queuelist)); BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list); list_del_init(&rq->flush.list);
blk_flush_restore_request(rq); blk_flush_restore_request(rq);
__blk_end_request_all(rq, error); if (q->mq_ops)
blk_mq_end_io(rq, error);
else
__blk_end_request_all(rq, error);
break; break;
default: default:
BUG(); BUG();
} }
return blk_kick_flush(q) | queued; kicked = blk_kick_flush(q);
/* blk_mq_run_flush will run queue */
if (q->mq_ops)
return queued;
return kicked | queued;
} }
static void flush_end_io(struct request *flush_rq, int error) static void flush_end_io(struct request *flush_rq, int error)
{ {
struct request_queue *q = flush_rq->q; struct request_queue *q = flush_rq->q;
struct list_head *running = &q->flush_queue[q->flush_running_idx]; struct list_head *running;
bool queued = false; bool queued = false;
struct request *rq, *n; struct request *rq, *n;
unsigned long flags = 0;
if (q->mq_ops) {
blk_mq_free_request(flush_rq);
spin_lock_irqsave(&q->mq_flush_lock, flags);
}
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx); BUG_ON(q->flush_pending_idx == q->flush_running_idx);
/* account completion of the flush request */ /* account completion of the flush request */
q->flush_running_idx ^= 1; q->flush_running_idx ^= 1;
elv_completed_request(q, flush_rq);
if (!q->mq_ops)
elv_completed_request(q, flush_rq);
/* and push the waiting requests to the next stage */ /* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) { list_for_each_entry_safe(rq, n, running, flush.list) {
...@@ -223,9 +262,48 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -223,9 +262,48 @@ static void flush_end_io(struct request *flush_rq, int error)
* directly into request_fn may confuse the driver. Always use * directly into request_fn may confuse the driver. Always use
* kblockd. * kblockd.
*/ */
if (queued || q->flush_queue_delayed) if (queued || q->flush_queue_delayed) {
blk_run_queue_async(q); if (!q->mq_ops)
blk_run_queue_async(q);
else
/*
* This can be optimized to only run queues with requests
* queued if necessary.
*/
blk_mq_run_queues(q, true);
}
q->flush_queue_delayed = 0; q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
static void mq_flush_work(struct work_struct *work)
{
struct request_queue *q;
struct request *rq;
q = container_of(work, struct request_queue, mq_flush_work);
/* We don't need set REQ_FLUSH_SEQ, it's for consistency */
rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
__GFP_WAIT|GFP_ATOMIC, true);
rq->cmd_type = REQ_TYPE_FS;
rq->end_io = flush_end_io;
blk_mq_run_request(rq, true, false);
}
/*
* We can't directly use q->flush_rq, because it doesn't have tag and is not in
* hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
* so offload the work to workqueue.
*
* Note: we assume a flush request finished in any hardware queue will flush
* the whole disk cache.
*/
static void mq_run_flush(struct request_queue *q)
{
kblockd_schedule_work(q, &q->mq_flush_work);
} }
/** /**
...@@ -236,7 +314,7 @@ static void flush_end_io(struct request *flush_rq, int error) ...@@ -236,7 +314,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* Please read the comment at the top of this file for more info. * Please read the comment at the top of this file for more info.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(q->queue_lock) * spin_lock_irq(q->queue_lock or q->mq_flush_lock)
* *
* RETURNS: * RETURNS:
* %true if flush was issued, %false otherwise. * %true if flush was issued, %false otherwise.
...@@ -261,13 +339,18 @@ static bool blk_kick_flush(struct request_queue *q) ...@@ -261,13 +339,18 @@ static bool blk_kick_flush(struct request_queue *q)
* Issue flush and toggle pending_idx. This makes pending_idx * Issue flush and toggle pending_idx. This makes pending_idx
* different from running_idx, which means flush is in flight. * different from running_idx, which means flush is in flight.
*/ */
q->flush_pending_idx ^= 1;
if (q->mq_ops) {
mq_run_flush(q);
return true;
}
blk_rq_init(q, &q->flush_rq); blk_rq_init(q, &q->flush_rq);
q->flush_rq.cmd_type = REQ_TYPE_FS; q->flush_rq.cmd_type = REQ_TYPE_FS;
q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
q->flush_rq.rq_disk = first_rq->rq_disk; q->flush_rq.rq_disk = first_rq->rq_disk;
q->flush_rq.end_io = flush_end_io; q->flush_rq.end_io = flush_end_io;
q->flush_pending_idx ^= 1;
list_add_tail(&q->flush_rq.queuelist, &q->queue_head); list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
return true; return true;
} }
...@@ -284,16 +367,37 @@ static void flush_data_end_io(struct request *rq, int error) ...@@ -284,16 +367,37 @@ static void flush_data_end_io(struct request *rq, int error)
blk_run_queue_async(q); blk_run_queue_async(q);
} }
static void mq_flush_data_end_io(struct request *rq, int error)
{
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
unsigned long flags;
ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
/*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
spin_lock_irqsave(&q->mq_flush_lock, flags);
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
blk_mq_run_hw_queue(hctx, true);
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
/** /**
* blk_insert_flush - insert a new FLUSH/FUA request * blk_insert_flush - insert a new FLUSH/FUA request
* @rq: request to insert * @rq: request to insert
* *
* To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
* or __blk_mq_run_hw_queue() to dispatch request.
* @rq is being submitted. Analyze what needs to be done and put it on the * @rq is being submitted. Analyze what needs to be done and put it on the
* right queue. * right queue.
* *
* CONTEXT: * CONTEXT:
* spin_lock_irq(q->queue_lock) * spin_lock_irq(q->queue_lock) in !mq case
*/ */
void blk_insert_flush(struct request *rq) void blk_insert_flush(struct request *rq)
{ {
...@@ -316,7 +420,10 @@ void blk_insert_flush(struct request *rq) ...@@ -316,7 +420,10 @@ void blk_insert_flush(struct request *rq)
* complete the request. * complete the request.
*/ */
if (!policy) { if (!policy) {
__blk_end_bidi_request(rq, 0, 0, 0); if (q->mq_ops)
blk_mq_end_io(rq, 0);
else
__blk_end_bidi_request(rq, 0, 0, 0);
return; return;
} }
...@@ -329,7 +436,10 @@ void blk_insert_flush(struct request *rq) ...@@ -329,7 +436,10 @@ void blk_insert_flush(struct request *rq)
*/ */
if ((policy & REQ_FSEQ_DATA) && if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
list_add_tail(&rq->queuelist, &q->queue_head); if (q->mq_ops) {
blk_mq_run_request(rq, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return; return;
} }
...@@ -341,6 +451,14 @@ void blk_insert_flush(struct request *rq) ...@@ -341,6 +451,14 @@ void blk_insert_flush(struct request *rq)
INIT_LIST_HEAD(&rq->flush.list); INIT_LIST_HEAD(&rq->flush.list);
rq->cmd_flags |= REQ_FLUSH_SEQ; rq->cmd_flags |= REQ_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
if (q->mq_ops) {
rq->end_io = mq_flush_data_end_io;
spin_lock_irq(&q->mq_flush_lock);
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
spin_unlock_irq(&q->mq_flush_lock);
return;
}
rq->end_io = flush_data_end_io; rq->end_io = flush_data_end_io;
blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
...@@ -453,3 +571,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, ...@@ -453,3 +571,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return ret; return ret;
} }
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
INIT_WORK(&q->mq_flush_work, mq_flush_work);
}
...@@ -308,6 +308,17 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, ...@@ -308,6 +308,17 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
} }
/*
* blk-mq uses req->special to carry normal driver per-request payload, it
* does not indicate a prepared command that we cannot merge with.
*/
static bool req_no_special_merge(struct request *req)
{
struct request_queue *q = req->q;
return !q->mq_ops && req->special;
}
static int ll_merge_requests_fn(struct request_queue *q, struct request *req, static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next) struct request *next)
{ {
...@@ -319,7 +330,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -319,7 +330,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
* First check if the either of the requests are re-queued * First check if the either of the requests are re-queued
* requests. Can't merge them if they are. * requests. Can't merge them if they are.
*/ */
if (req->special || next->special) if (req_no_special_merge(req) || req_no_special_merge(next))
return 0; return 0;
/* /*
...@@ -416,7 +427,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, ...@@ -416,7 +427,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (rq_data_dir(req) != rq_data_dir(next) if (rq_data_dir(req) != rq_data_dir(next)
|| req->rq_disk != next->rq_disk || req->rq_disk != next->rq_disk
|| next->special) || req_no_special_merge(next))
return 0; return 0;
if (req->cmd_flags & REQ_WRITE_SAME && if (req->cmd_flags & REQ_WRITE_SAME &&
...@@ -515,7 +526,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -515,7 +526,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false; return false;
/* must be same device and not a special request */ /* must be same device and not a special request */
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
return false; return false;
/* only merge integrity protected bio into ditto rq */ /* only merge integrity protected bio into ditto rq */
......
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/blk-mq.h>
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
static int __cpuinit blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
static void __cpuinit blk_mq_cpu_notify(void *data, unsigned long action,
unsigned int cpu)
{
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
/*
* If the CPU goes away, ensure that we run any pending
* completions.
*/
struct llist_node *node;
struct request *rq;
local_irq_disable();
node = llist_del_all(&per_cpu(ipi_lists, cpu));
while (node) {
struct llist_node *next = node->next;
rq = llist_entry(node, struct request, ll_list);
__blk_mq_end_io(rq, rq->errors);
node = next;
}
local_irq_enable();
}
}
static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
.notifier_call = blk_mq_main_cpu_notify,
};
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void (*fn)(void *, unsigned long, unsigned int),
void *data)
{
notifier->notify = fn;
notifier->data = data;
}
static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
.notify = blk_mq_cpu_notify,
};
void __init blk_mq_cpu_init(void)
{
register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
blk_mq_register_cpu_notifier(&cpu_notifier);
}
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
static void show_map(unsigned int *map, unsigned int nr)
{
int i;
pr_info("blk-mq: CPU -> queue map\n");
for_each_online_cpu(i)
pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
}
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
const int cpu)
{
return cpu / ((nr_cpus + nr_queues - 1) / nr_queues);
}
static int get_first_sibling(unsigned int cpu)
{
unsigned int ret;
ret = cpumask_first(topology_thread_cpumask(cpu));
if (ret < nr_cpu_ids)
return ret;
return cpu;
}
int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
{
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
return 1;
cpumask_clear(cpus);
nr_cpus = nr_uniq_cpus = 0;
for_each_online_cpu(i) {
nr_cpus++;
first_sibling = get_first_sibling(i);
if (!cpumask_test_cpu(first_sibling, cpus))
nr_uniq_cpus++;
cpumask_set_cpu(i, cpus);
}
queue = 0;
for_each_possible_cpu(i) {
if (!cpu_online(i)) {
map[i] = 0;
continue;
}
/*
* Easy case - we have equal or more hardware queues. Or
* there are no thread siblings to take into account. Do
* 1:1 if enough, or sequential mapping if less.
*/
if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
queue++;
continue;
}
/*
* Less then nr_cpus queues, and we have some number of
* threads per cores. Map sibling threads to the same
* queue.
*/
first_sibling = get_first_sibling(i);
if (first_sibling == i) {
map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
queue);
queue++;
} else
map[i] = map[first_sibling];
}
show_map(map, nr_cpus);
free_cpumask_var(cpus);
return 0;
}
unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg)
{
unsigned int *map;
/* If cpus are offline, map them to first hctx */
map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL,
reg->numa_node);
if (!map)
return NULL;
if (!blk_mq_update_queue_map(map, reg->nr_hw_queues))
return map;
kfree(map);
return NULL;
}
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>
#include <linux/blk-mq.h>
#include "blk-mq.h"
#include "blk-mq-tag.h"
static void blk_mq_sysfs_release(struct kobject *kobj)
{
}
struct blk_mq_ctx_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_mq_ctx *, char *);
ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
};
struct blk_mq_hw_ctx_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
};
static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
char *page)
{
struct blk_mq_ctx_sysfs_entry *entry;
struct blk_mq_ctx *ctx;
struct request_queue *q;
ssize_t res;
entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
ctx = container_of(kobj, struct blk_mq_ctx, kobj);
q = ctx->queue;
if (!entry->show)
return -EIO;
res = -ENOENT;
mutex_lock(&q->sysfs_lock);
if (!blk_queue_dying(q))
res = entry->show(ctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length)
{
struct blk_mq_ctx_sysfs_entry *entry;
struct blk_mq_ctx *ctx;
struct request_queue *q;
ssize_t res;
entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
ctx = container_of(kobj, struct blk_mq_ctx, kobj);
q = ctx->queue;
if (!entry->store)
return -EIO;
res = -ENOENT;
mutex_lock(&q->sysfs_lock);
if (!blk_queue_dying(q))
res = entry->store(ctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
struct attribute *attr, char *page)
{
struct blk_mq_hw_ctx_sysfs_entry *entry;
struct blk_mq_hw_ctx *hctx;
struct request_queue *q;
ssize_t res;
entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
q = hctx->queue;
if (!entry->show)
return -EIO;
res = -ENOENT;
mutex_lock(&q->sysfs_lock);
if (!blk_queue_dying(q))
res = entry->show(hctx, page);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
struct attribute *attr, const char *page,
size_t length)
{
struct blk_mq_hw_ctx_sysfs_entry *entry;
struct blk_mq_hw_ctx *hctx;
struct request_queue *q;
ssize_t res;
entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
q = hctx->queue;
if (!entry->store)
return -EIO;
res = -ENOENT;
mutex_lock(&q->sysfs_lock);
if (!blk_queue_dying(q))
res = entry->store(hctx, page, length);
mutex_unlock(&q->sysfs_lock);
return res;
}
static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
{
return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
ctx->rq_dispatched[0]);
}
static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
{
return sprintf(page, "%lu\n", ctx->rq_merged);
}
static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
{
return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
ctx->rq_completed[0]);
}
static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
{
char *start_page = page;
struct request *rq;
page += sprintf(page, "%s:\n", msg);
list_for_each_entry(rq, list, queuelist)
page += sprintf(page, "\t%p\n", rq);
return page - start_page;
}
static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
{
ssize_t ret;
spin_lock(&ctx->lock);
ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
spin_unlock(&ctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
char *page)
{
return sprintf(page, "%lu\n", hctx->queued);
}
static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return sprintf(page, "%lu\n", hctx->run);
}
static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
char *page)
{
char *start_page = page;
int i;
page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
unsigned long d = 1U << (i - 1);
page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
}
return page - start_page;
}
static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
char *page)
{
ssize_t ret;
spin_lock(&hctx->lock);
ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
spin_unlock(&hctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
{
ssize_t ret;
spin_lock(&hctx->lock);
ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
spin_unlock(&hctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t len)
{
struct blk_mq_ctx *ctx;
unsigned long ret;
unsigned int i;
if (kstrtoul(page, 10, &ret)) {
pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
return -EINVAL;
}
spin_lock(&hctx->lock);
if (ret)
hctx->flags |= BLK_MQ_F_SHOULD_IPI;
else
hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
spin_unlock(&hctx->lock);
hctx_for_each_ctx(hctx, ctx, i)
ctx->ipi_redirect = !!ret;
return len;
}
static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
};
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
.attr = {.name = "merged", .mode = S_IRUGO },
.show = blk_mq_sysfs_merged_show,
};
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
.attr = {.name = "completed", .mode = S_IRUGO },
.show = blk_mq_sysfs_completed_show,
};
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
.attr = {.name = "rq_list", .mode = S_IRUGO },
.show = blk_mq_sysfs_rq_list_show,
};
static struct attribute *default_ctx_attrs[] = {
&blk_mq_sysfs_dispatched.attr,
&blk_mq_sysfs_merged.attr,
&blk_mq_sysfs_completed.attr,
&blk_mq_sysfs_rq_list.attr,
NULL,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
.attr = {.name = "queued", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_queued_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
.attr = {.name = "run", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_run_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_dispatched_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
.attr = {.name = "pending", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_rq_list_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
.attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
.show = blk_mq_hw_sysfs_ipi_show,
.store = blk_mq_hw_sysfs_ipi_store,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
&blk_mq_hw_sysfs_run.attr,
&blk_mq_hw_sysfs_dispatched.attr,
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr,
NULL,
};
static const struct sysfs_ops blk_mq_sysfs_ops = {
.show = blk_mq_sysfs_show,
.store = blk_mq_sysfs_store,
};
static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
.show = blk_mq_hw_sysfs_show,
.store = blk_mq_hw_sysfs_store,
};
static struct kobj_type blk_mq_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.release = blk_mq_sysfs_release,
};
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.default_attrs = default_ctx_attrs,
.release = blk_mq_sysfs_release,
};
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
.default_attrs = default_hw_ctx_attrs,
.release = blk_mq_sysfs_release,
};
void blk_mq_unregister_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
}
int blk_mq_register_disk(struct gendisk *disk)
{
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
int ret, i, j;
kobject_init(&q->mq_kobj, &blk_mq_ktype);
ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
return ret;
kobject_uevent(&q->mq_kobj, KOBJ_ADD);
queue_for_each_hw_ctx(q, hctx, i) {
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
if (ret)
break;
if (!hctx->nr_ctx)
continue;
hctx_for_each_ctx(hctx, ctx, j) {
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
if (ret)
break;
}
}
if (ret) {
blk_mq_unregister_disk(disk);
return ret;
}
return 0;
}
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu_ida.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
/*
* Per tagged queue (tag address space) map
*/
struct blk_mq_tags {
unsigned int nr_tags;
unsigned int nr_reserved_tags;
unsigned int nr_batch_move;
unsigned int nr_max_cache;
struct percpu_ida free_tags;
struct percpu_ida reserved_tags;
};
void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
{
int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
blk_mq_put_tag(tags, tag);
}
bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
return !tags ||
percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
}
static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
{
int tag;
tag = percpu_ida_alloc(&tags->free_tags, gfp);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag + tags->nr_reserved_tags;
}
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
gfp_t gfp)
{
int tag;
if (unlikely(!tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag;
}
unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
{
if (!reserved)
return __blk_mq_get_tag(tags, gfp);
return __blk_mq_get_reserved_tag(tags, gfp);
}
static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
{
BUG_ON(tag >= tags->nr_tags);
percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
}
static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
unsigned int tag)
{
BUG_ON(tag >= tags->nr_reserved_tags);
percpu_ida_free(&tags->reserved_tags, tag);
}
void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
{
if (tag >= tags->nr_reserved_tags)
__blk_mq_put_tag(tags, tag);
else
__blk_mq_put_reserved_tag(tags, tag);
}
static int __blk_mq_tag_iter(unsigned id, void *data)
{
unsigned long *tag_map = data;
__set_bit(id, tag_map);
return 0;
}
void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
void (*fn)(void *, unsigned long *), void *data)
{
unsigned long *tag_map;
size_t map_size;
map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map)
return;
percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
if (tags->nr_reserved_tags)
percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
tag_map);
fn(data, tag_map);
kfree(tag_map);
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
unsigned int reserved_tags, int node)
{
unsigned int nr_tags, nr_cache;
struct blk_mq_tags *tags;
int ret;
if (total_tags > BLK_MQ_TAG_MAX) {
pr_err("blk-mq: tag depth too large\n");
return NULL;
}
tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
if (!tags)
return NULL;
nr_tags = total_tags - reserved_tags;
nr_cache = nr_tags / num_possible_cpus();
if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
nr_cache = BLK_MQ_TAG_CACHE_MIN;
else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
nr_cache = BLK_MQ_TAG_CACHE_MAX;
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
tags->nr_max_cache = nr_cache;
tags->nr_batch_move = max(1u, nr_cache / 2);
ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
tags->nr_reserved_tags,
tags->nr_max_cache,
tags->nr_batch_move);
if (ret)
goto err_free_tags;
if (reserved_tags) {
/*
* With max_cahe and batch set to 1, the allocator fallbacks to
* no cached. It's fine reserved tags allocation is slow.
*/
ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
1, 1);
if (ret)
goto err_reserved_tags;
}
return tags;
err_reserved_tags:
percpu_ida_destroy(&tags->free_tags);
err_free_tags:
kfree(tags);
return NULL;
}
void blk_mq_free_tags(struct blk_mq_tags *tags)
{
percpu_ida_destroy(&tags->free_tags);
percpu_ida_destroy(&tags->reserved_tags);
kfree(tags);
}
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
int cpu;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
" max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
tags->nr_batch_move, tags->nr_max_cache);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
for_each_possible_cpu(cpu) {
page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
percpu_ida_free_tags(&tags->free_tags, cpu));
}
return page - orig_page;
}
#ifndef INT_BLK_MQ_TAG_H
#define INT_BLK_MQ_TAG_H
struct blk_mq_tags;
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved);
extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
BLK_MQ_TAG_CACHE_MAX = 64,
};
enum {
BLK_MQ_TAG_FAIL = -1U,
BLK_MQ_TAG_MIN = BLK_MQ_TAG_CACHE_MIN,
BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
};
#endif
This diff is collapsed.
#ifndef INT_BLK_MQ_H
#define INT_BLK_MQ_H
struct blk_mq_ctx {
struct {
spinlock_t lock;
struct list_head rq_list;
} ____cacheline_aligned_in_smp;
unsigned int cpu;
unsigned int index_hw;
unsigned int ipi_redirect;
/* incremented at dispatch time */
unsigned long rq_dispatched[2];
unsigned long rq_merged;
/* incremented at completion time */
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
struct request_queue *queue;
struct kobject kobj;
};
void __blk_mq_end_io(struct request *rq, int error);
void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
/*
* CPU hotplug helpers
*/
struct blk_mq_cpu_notifier;
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void (*fn)(void *, unsigned long, unsigned int),
void *data);
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_cpu_init(void);
DECLARE_PER_CPU(struct llist_head, ipi_lists);
/*
* CPU -> queue mappings
*/
struct blk_mq_reg;
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
void blk_mq_add_timer(struct request *rq);
#endif
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/blktrace_api.h> #include <linux/blktrace_api.h>
#include <linux/blk-mq.h>
#include "blk.h" #include "blk.h"
#include "blk-cgroup.h" #include "blk-cgroup.h"
...@@ -542,6 +543,11 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -542,6 +543,11 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
percpu_counter_destroy(&q->mq_usage_counter);
if (q->mq_ops)
blk_mq_free_queue(q);
blk_trace_shutdown(q); blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info); bdi_destroy(&q->backing_dev_info);
...@@ -575,6 +581,7 @@ int blk_register_queue(struct gendisk *disk) ...@@ -575,6 +581,7 @@ int blk_register_queue(struct gendisk *disk)
* bypass from queue allocation. * bypass from queue allocation.
*/ */
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
ret = blk_trace_init_sysfs(dev); ret = blk_trace_init_sysfs(dev);
if (ret) if (ret)
...@@ -588,6 +595,9 @@ int blk_register_queue(struct gendisk *disk) ...@@ -588,6 +595,9 @@ int blk_register_queue(struct gendisk *disk)
kobject_uevent(&q->kobj, KOBJ_ADD); kobject_uevent(&q->kobj, KOBJ_ADD);
if (q->mq_ops)
blk_mq_register_disk(disk);
if (!q->request_fn) if (!q->request_fn)
return 0; return 0;
...@@ -610,6 +620,9 @@ void blk_unregister_queue(struct gendisk *disk) ...@@ -610,6 +620,9 @@ void blk_unregister_queue(struct gendisk *disk)
if (WARN_ON(!q)) if (WARN_ON(!q))
return; return;
if (q->mq_ops)
blk_mq_unregister_disk(disk);
if (q->request_fn) if (q->request_fn)
elv_unregister_queue(q); elv_unregister_queue(q);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
#include "blk.h" #include "blk.h"
#include "blk-mq.h"
#ifdef CONFIG_FAIL_IO_TIMEOUT #ifdef CONFIG_FAIL_IO_TIMEOUT
...@@ -88,10 +89,18 @@ static void blk_rq_timed_out(struct request *req) ...@@ -88,10 +89,18 @@ static void blk_rq_timed_out(struct request *req)
ret = q->rq_timed_out_fn(req); ret = q->rq_timed_out_fn(req);
switch (ret) { switch (ret) {
case BLK_EH_HANDLED: case BLK_EH_HANDLED:
__blk_complete_request(req); /* Can we use req->errors here? */
if (q->mq_ops)
blk_mq_complete_request(req, req->errors);
else
__blk_complete_request(req);
break; break;
case BLK_EH_RESET_TIMER: case BLK_EH_RESET_TIMER:
blk_add_timer(req); if (q->mq_ops)
blk_mq_add_timer(req);
else
blk_add_timer(req);
blk_clear_rq_complete(req); blk_clear_rq_complete(req);
break; break;
case BLK_EH_NOT_HANDLED: case BLK_EH_NOT_HANDLED:
...@@ -108,6 +117,23 @@ static void blk_rq_timed_out(struct request *req) ...@@ -108,6 +117,23 @@ static void blk_rq_timed_out(struct request *req)
} }
} }
void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set)
{
if (time_after_eq(jiffies, rq->deadline)) {
list_del_init(&rq->timeout_list);
/*
* Check if we raced with end io completion
*/
if (!blk_mark_rq_complete(rq))
blk_rq_timed_out(rq);
} else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
*next_timeout = rq->deadline;
*next_set = 1;
}
}
void blk_rq_timed_out_timer(unsigned long data) void blk_rq_timed_out_timer(unsigned long data)
{ {
struct request_queue *q = (struct request_queue *) data; struct request_queue *q = (struct request_queue *) data;
...@@ -117,21 +143,8 @@ void blk_rq_timed_out_timer(unsigned long data) ...@@ -117,21 +143,8 @@ void blk_rq_timed_out_timer(unsigned long data)
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
if (time_after_eq(jiffies, rq->deadline)) { blk_rq_check_expired(rq, &next, &next_set);
list_del_init(&rq->timeout_list);
/*
* Check if we raced with end io completion
*/
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
} else if (!next_set || time_after(next, rq->deadline)) {
next = rq->deadline;
next_set = 1;
}
}
if (next_set) if (next_set)
mod_timer(&q->timeout, round_jiffies_up(next)); mod_timer(&q->timeout, round_jiffies_up(next));
...@@ -157,15 +170,7 @@ void blk_abort_request(struct request *req) ...@@ -157,15 +170,7 @@ void blk_abort_request(struct request *req)
} }
EXPORT_SYMBOL_GPL(blk_abort_request); EXPORT_SYMBOL_GPL(blk_abort_request);
/** void __blk_add_timer(struct request *req, struct list_head *timeout_list)
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{ {
struct request_queue *q = req->q; struct request_queue *q = req->q;
unsigned long expiry; unsigned long expiry;
...@@ -183,7 +188,8 @@ void blk_add_timer(struct request *req) ...@@ -183,7 +188,8 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout; req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout; req->deadline = jiffies + req->timeout;
list_add_tail(&req->timeout_list, &q->timeout_list); if (timeout_list)
list_add_tail(&req->timeout_list, timeout_list);
/* /*
* If the timer isn't already pending or this timeout is earlier * If the timer isn't already pending or this timeout is earlier
...@@ -195,5 +201,19 @@ void blk_add_timer(struct request *req) ...@@ -195,5 +201,19 @@ void blk_add_timer(struct request *req)
if (!timer_pending(&q->timeout) || if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) time_before(expiry, q->timeout.expires))
mod_timer(&q->timeout, expiry); mod_timer(&q->timeout, expiry);
}
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{
__blk_add_timer(req, &req->q->timeout_list);
} }
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define BLK_BATCH_REQ 32 #define BLK_BATCH_REQ 32
extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *blk_requestq_cachep;
extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype; extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida; extern struct ida blk_queue_ida;
...@@ -34,14 +35,30 @@ bool __blk_end_bidi_request(struct request *rq, int error, ...@@ -34,14 +35,30 @@ bool __blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes); unsigned int nr_bytes, unsigned int bidi_bytes);
void blk_rq_timed_out_timer(unsigned long data); void blk_rq_timed_out_timer(unsigned long data);
void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set);
void __blk_add_timer(struct request *req, struct list_head *timeout_list);
void blk_delete_timer(struct request *); void blk_delete_timer(struct request *);
void blk_add_timer(struct request *); void blk_add_timer(struct request *);
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req);
/* /*
* Internal atomic flags for request handling * Internal atomic flags for request handling
*/ */
enum rq_atomic_flags { enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0, REQ_ATOM_COMPLETE = 0,
REQ_ATOM_STARTED,
}; };
/* /*
......
...@@ -15,6 +15,9 @@ menuconfig BLK_DEV ...@@ -15,6 +15,9 @@ menuconfig BLK_DEV
if BLK_DEV if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
config BLK_DEV_FD config BLK_DEV_FD
tristate "Normal floppy disk support" tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC depends on ARCH_MAY_HAVE_PC_FDC
......
...@@ -41,6 +41,7 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o ...@@ -41,6 +41,7 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
nvme-y := nvme-core.o nvme-scsi.o nvme-y := nvme-core.o nvme-scsi.o
swim_mod-y := swim.o swim_asm.o swim_mod-y := swim.o swim_asm.o
...@@ -2886,9 +2886,9 @@ static void do_fd_request(struct request_queue *q) ...@@ -2886,9 +2886,9 @@ static void do_fd_request(struct request_queue *q)
return; return;
if (WARN(atomic_read(&usage_count) == 0, if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n", "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
current_req->cmd_flags)) (unsigned long long) current_req->cmd_flags))
return; return;
if (test_and_set_bit(0, &fdc_busy)) { if (test_and_set_bit(0, &fdc_busy)) {
......
This diff is collapsed.
...@@ -1002,7 +1002,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) ...@@ -1002,7 +1002,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[0] = READ_6; SCpnt->cmnd[0] = READ_6;
SCpnt->sc_data_direction = DMA_FROM_DEVICE; SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else { } else {
scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags); scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
goto out; goto out;
} }
......
...@@ -420,6 +420,8 @@ static inline void bio_list_init(struct bio_list *bl) ...@@ -420,6 +420,8 @@ static inline void bio_list_init(struct bio_list *bl)
bl->head = bl->tail = NULL; bl->head = bl->tail = NULL;
} }
#define BIO_EMPTY_LIST { NULL, NULL }
#define bio_list_for_each(bio, bl) \ #define bio_list_for_each(bio, bl) \
for (bio = (bl)->head; bio; bio = bio->bi_next) for (bio = (bl)->head; bio; bio = bio->bi_next)
......
#ifndef BLK_MQ_H
#define BLK_MQ_H
#include <linux/blkdev.h>
struct blk_mq_tags;
struct blk_mq_cpu_notifier {
struct list_head list;
void *data;
void (*notify)(void *data, unsigned long action, unsigned int cpu);
};
struct blk_mq_hw_ctx {
struct {
spinlock_t lock;
struct list_head dispatch;
} ____cacheline_aligned_in_smp;
unsigned long state; /* BLK_MQ_S_* flags */
struct delayed_work delayed_work;
unsigned long flags; /* BLK_MQ_F_* flags */
struct request_queue *queue;
unsigned int queue_num;
void *driver_data;
unsigned int nr_ctx;
struct blk_mq_ctx **ctxs;
unsigned int nr_ctx_map;
unsigned long *ctx_map;
struct request **rqs;
struct list_head page_list;
struct blk_mq_tags *tags;
unsigned long queued;
unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 10
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int queue_depth;
unsigned int numa_node;
unsigned int cmd_size; /* per-request extra data */
struct blk_mq_cpu_notifier cpu_notifier;
struct kobject kobj;
};
struct blk_mq_reg {
struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth;
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
int numa_node;
unsigned int timeout;
unsigned int flags; /* BLK_MQ_F_* */
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
struct blk_mq_ops {
/*
* Queue request
*/
queue_rq_fn *queue_rq;
/*
* Map to specific hardware queue
*/
map_queue_fn *map_queue;
/*
* Called on request timeout
*/
rq_timed_out_fn *timeout;
/*
* Override for hctx allocations (should probably go)
*/
alloc_hctx_fn *alloc_hctx;
free_hctx_fn *free_hctx;
/*
* Called when the block layer side of a hardware queue has been
* set up, allowing the driver to allocate/init matching structures.
* Ditto for exit/teardown.
*/
init_hctx_fn *init_hctx;
exit_hctx_fn *exit_hctx;
};
enum {
BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_SHOULD_SORT = 1 << 1,
BLK_MQ_F_SHOULD_IPI = 1 << 2,
BLK_MQ_S_STOPPED = 1 << 0,
BLK_MQ_MAX_DEPTH = 2048,
};
struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
void blk_mq_free_queue(struct request_queue *);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request_queue *, struct request *, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
* size to get back to the original request.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
return pdu - sizeof(struct request);
}
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return (void *) rq + sizeof(*rq);
}
static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
unsigned int tag)
{
return hctx->rqs[tag];
}
#define queue_for_each_hw_ctx(q, hctx, i) \
for ((i) = 0, hctx = (q)->queue_hw_ctx[0]; \
(i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i])
#define queue_for_each_ctx(q, ctx, i) \
for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0); \
(i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i)))
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0, ctx = (hctx)->ctxs[0]; \
(i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)])
#define blk_ctx_sum(q, sum) \
({ \
struct blk_mq_ctx *__x; \
unsigned int __ret = 0, __i; \
\
queue_for_each_ctx((q), __x, __i) \
__ret += sum; \
__ret; \
})
#endif
...@@ -178,19 +178,20 @@ enum rq_flag_bits { ...@@ -178,19 +178,20 @@ enum rq_flag_bits {
__REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_KERNEL, /* direct IO to kernel pages */ __REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */ __REQ_PM, /* runtime pm request */
__REQ_END, /* last of chain of requests */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
#define REQ_WRITE (1 << __REQ_WRITE) #define REQ_WRITE (1ULL << __REQ_WRITE)
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC (1 << __REQ_SYNC) #define REQ_SYNC (1ULL << __REQ_SYNC)
#define REQ_META (1 << __REQ_META) #define REQ_META (1ULL << __REQ_META)
#define REQ_PRIO (1 << __REQ_PRIO) #define REQ_PRIO (1ULL << __REQ_PRIO)
#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_DISCARD (1ULL << __REQ_DISCARD)
#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME) #define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
#define REQ_NOIDLE (1 << __REQ_NOIDLE) #define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \ #define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
...@@ -206,28 +207,29 @@ enum rq_flag_bits { ...@@ -206,28 +207,29 @@ enum rq_flag_bits {
#define REQ_NOMERGE_FLAGS \ #define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
#define REQ_RAHEAD (1 << __REQ_RAHEAD) #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED) #define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
#define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SORTED (1ULL << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
#define REQ_FUA (1 << __REQ_FUA) #define REQ_FUA (1ULL << __REQ_FUA)
#define REQ_NOMERGE (1 << __REQ_NOMERGE) #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED) #define REQ_STARTED (1ULL << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP) #define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED) #define REQ_QUEUED (1ULL << __REQ_QUEUED)
#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
#define REQ_FAILED (1 << __REQ_FAILED) #define REQ_FAILED (1ULL << __REQ_FAILED)
#define REQ_QUIET (1 << __REQ_QUIET) #define REQ_QUIET (1ULL << __REQ_QUIET)
#define REQ_PREEMPT (1 << __REQ_PREEMPT) #define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
#define REQ_ALLOCED (1 << __REQ_ALLOCED) #define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
#define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
#define REQ_FLUSH (1 << __REQ_FLUSH) #define REQ_FLUSH (1ULL << __REQ_FLUSH)
#define REQ_FLUSH_SEQ (1 << __REQ_FLUSH_SEQ) #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
#define REQ_IO_STAT (1 << __REQ_IO_STAT) #define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE) #define REQ_SECURE (1ULL << __REQ_SECURE)
#define REQ_KERNEL (1 << __REQ_KERNEL) #define REQ_KERNEL (1ULL << __REQ_KERNEL)
#define REQ_PM (1 << __REQ_PM) #define REQ_PM (1ULL << __REQ_PM)
#define REQ_END (1ULL << __REQ_END)
#endif /* __LINUX_BLK_TYPES_H */ #endif /* __LINUX_BLK_TYPES_H */
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/major.h> #include <linux/major.h>
#include <linux/genhd.h> #include <linux/genhd.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/llist.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
...@@ -94,12 +95,19 @@ enum rq_cmd_type_bits { ...@@ -94,12 +95,19 @@ enum rq_cmd_type_bits {
* as well! * as well!
*/ */
struct request { struct request {
struct list_head queuelist; union {
struct call_single_data csd; struct list_head queuelist;
struct llist_node ll_list;
};
union {
struct call_single_data csd;
struct work_struct mq_flush_data;
};
struct request_queue *q; struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
unsigned int cmd_flags; u64 cmd_flags;
enum rq_cmd_type_bits cmd_type; enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags; unsigned long atomic_flags;
...@@ -160,8 +168,6 @@ struct request { ...@@ -160,8 +168,6 @@ struct request {
unsigned short ioprio; unsigned short ioprio;
int ref_count;
void *special; /* opaque pointer available for LLD use */ void *special; /* opaque pointer available for LLD use */
char *buffer; /* kaddr of the current segment if available */ char *buffer; /* kaddr of the current segment if available */
...@@ -215,6 +221,8 @@ struct request_pm_state ...@@ -215,6 +221,8 @@ struct request_pm_state
#include <linux/elevator.h> #include <linux/elevator.h>
struct blk_queue_ctx;
typedef void (request_fn_proc) (struct request_queue *q); typedef void (request_fn_proc) (struct request_queue *q);
typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *); typedef int (prep_rq_fn) (struct request_queue *, struct request *);
...@@ -313,6 +321,18 @@ struct request_queue { ...@@ -313,6 +321,18 @@ struct request_queue {
dma_drain_needed_fn *dma_drain_needed; dma_drain_needed_fn *dma_drain_needed;
lld_busy_fn *lld_busy_fn; lld_busy_fn *lld_busy_fn;
struct blk_mq_ops *mq_ops;
unsigned int *mq_map;
/* sw queues */
struct blk_mq_ctx *queue_ctx;
unsigned int nr_queues;
/* hw dispatch queues */
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
/* /*
* Dispatch queue sorting * Dispatch queue sorting
*/ */
...@@ -361,6 +381,11 @@ struct request_queue { ...@@ -361,6 +381,11 @@ struct request_queue {
*/ */
struct kobject kobj; struct kobject kobj;
/*
* mq queue kobject
*/
struct kobject mq_kobj;
#ifdef CONFIG_PM_RUNTIME #ifdef CONFIG_PM_RUNTIME
struct device *dev; struct device *dev;
int rpm_status; int rpm_status;
...@@ -425,7 +450,13 @@ struct request_queue { ...@@ -425,7 +450,13 @@ struct request_queue {
unsigned long flush_pending_since; unsigned long flush_pending_since;
struct list_head flush_queue[2]; struct list_head flush_queue[2];
struct list_head flush_data_in_flight; struct list_head flush_data_in_flight;
struct request flush_rq; union {
struct request flush_rq;
struct {
spinlock_t mq_flush_lock;
struct work_struct mq_flush_work;
};
};
struct mutex sysfs_lock; struct mutex sysfs_lock;
...@@ -437,14 +468,14 @@ struct request_queue { ...@@ -437,14 +468,14 @@ struct request_queue {
struct bsg_class_device bsg_dev; struct bsg_class_device bsg_dev;
#endif #endif
#ifdef CONFIG_BLK_CGROUP
struct list_head all_q_node;
#endif
#ifdef CONFIG_BLK_DEV_THROTTLING #ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */ /* Throttle data */
struct throtl_data *td; struct throtl_data *td;
#endif #endif
struct rcu_head rcu_head; struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_counter mq_usage_counter;
struct list_head all_q_node;
}; };
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
...@@ -467,6 +498,7 @@ struct request_queue { ...@@ -467,6 +498,7 @@ struct request_queue {
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_STACKABLE) | \
...@@ -539,6 +571,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -539,6 +571,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \ #define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
...@@ -570,7 +603,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ...@@ -570,7 +603,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) ((rq)->cmd_flags & 1) #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
static inline unsigned int blk_queue_cluster(struct request_queue *q) static inline unsigned int blk_queue_cluster(struct request_queue *q)
{ {
...@@ -1013,6 +1046,7 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} ...@@ -1013,6 +1046,7 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
struct blk_plug { struct blk_plug {
unsigned long magic; /* detect uninitialized use-cases */ unsigned long magic; /* detect uninitialized use-cases */
struct list_head list; /* requests */ struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */ struct list_head cb_list; /* md requires an unplug callback */
}; };
#define BLK_MAX_REQUEST_COUNT 16 #define BLK_MAX_REQUEST_COUNT 16
...@@ -1050,7 +1084,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) ...@@ -1050,7 +1084,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{ {
struct blk_plug *plug = tsk->plug; struct blk_plug *plug = tsk->plug;
return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); return plug &&
(!list_empty(&plug->list) ||
!list_empty(&plug->mq_list) ||
!list_empty(&plug->cb_list));
} }
/* /*
...@@ -1325,6 +1362,7 @@ static inline void put_dev_sector(Sector p) ...@@ -1325,6 +1362,7 @@ static inline void put_dev_sector(Sector p)
struct work_struct; struct work_struct;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
/* /*
......
...@@ -16,6 +16,8 @@ struct percpu_ida { ...@@ -16,6 +16,8 @@ struct percpu_ida {
* percpu_ida_init() * percpu_ida_init()
*/ */
unsigned nr_tags; unsigned nr_tags;
unsigned percpu_max_size;
unsigned percpu_batch_size;
struct percpu_ida_cpu __percpu *tag_cpu; struct percpu_ida_cpu __percpu *tag_cpu;
...@@ -51,10 +53,29 @@ struct percpu_ida { ...@@ -51,10 +53,29 @@ struct percpu_ida {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
}; };
/*
* Number of tags we move between the percpu freelist and the global freelist at
* a time
*/
#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
/* Max size of percpu freelist, */
#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
void percpu_ida_free(struct percpu_ida *pool, unsigned tag); void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
void percpu_ida_destroy(struct percpu_ida *pool); void percpu_ida_destroy(struct percpu_ida *pool);
int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags); int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
unsigned long max_size, unsigned long batch_size);
static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
{
return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
IDA_DEFAULT_PCPU_BATCH_MOVE);
}
typedef int (*percpu_ida_cb)(unsigned, void *);
int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
void *data);
unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
#endif /* __PERCPU_IDA_H__ */ #endif /* __PERCPU_IDA_H__ */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
enum { enum {
CSD_FLAG_LOCK = 0x01, CSD_FLAG_LOCK = 0x01,
CSD_FLAG_WAIT = 0x02,
}; };
struct call_function_data { struct call_function_data {
...@@ -124,7 +125,7 @@ static void csd_lock(struct call_single_data *csd) ...@@ -124,7 +125,7 @@ static void csd_lock(struct call_single_data *csd)
static void csd_unlock(struct call_single_data *csd) static void csd_unlock(struct call_single_data *csd)
{ {
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); WARN_ON((csd->flags & CSD_FLAG_WAIT) && !(csd->flags & CSD_FLAG_LOCK));
/* /*
* ensure we're all done before releasing data: * ensure we're all done before releasing data:
...@@ -146,6 +147,9 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait) ...@@ -146,6 +147,9 @@ void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
unsigned long flags; unsigned long flags;
int ipi; int ipi;
if (wait)
csd->flags |= CSD_FLAG_WAIT;
raw_spin_lock_irqsave(&dst->lock, flags); raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list); ipi = list_empty(&dst->list);
list_add_tail(&csd->list, &dst->list); list_add_tail(&csd->list, &dst->list);
...@@ -340,6 +344,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *csd, ...@@ -340,6 +344,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *csd,
} }
put_cpu(); put_cpu();
} }
EXPORT_SYMBOL_GPL(__smp_call_function_single);
/** /**
* smp_call_function_many(): Run a function on a set of other CPUs. * smp_call_function_many(): Run a function on a set of other CPUs.
......
...@@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) ...@@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
void percpu_counter_set(struct percpu_counter *fbc, s64 amount) void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{ {
int cpu; int cpu;
unsigned long flags;
raw_spin_lock(&fbc->lock); raw_spin_lock_irqsave(&fbc->lock, flags);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu); s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
*pcount = 0; *pcount = 0;
} }
fbc->count = amount; fbc->count = amount;
raw_spin_unlock(&fbc->lock); raw_spin_unlock_irqrestore(&fbc->lock, flags);
} }
EXPORT_SYMBOL(percpu_counter_set); EXPORT_SYMBOL(percpu_counter_set);
...@@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) ...@@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
preempt_disable(); preempt_disable();
count = __this_cpu_read(*fbc->counters) + amount; count = __this_cpu_read(*fbc->counters) + amount;
if (count >= batch || count <= -batch) { if (count >= batch || count <= -batch) {
raw_spin_lock(&fbc->lock); unsigned long flags;
raw_spin_lock_irqsave(&fbc->lock, flags);
fbc->count += count; fbc->count += count;
raw_spin_unlock(&fbc->lock); raw_spin_unlock_irqrestore(&fbc->lock, flags);
__this_cpu_write(*fbc->counters, 0); __this_cpu_write(*fbc->counters, 0);
} else { } else {
__this_cpu_write(*fbc->counters, count); __this_cpu_write(*fbc->counters, count);
...@@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) ...@@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
{ {
s64 ret; s64 ret;
int cpu; int cpu;
unsigned long flags;
raw_spin_lock(&fbc->lock); raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count; ret = fbc->count;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu); s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount; ret += *pcount;
} }
raw_spin_unlock(&fbc->lock); raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(__percpu_counter_sum); EXPORT_SYMBOL(__percpu_counter_sum);
......
...@@ -30,15 +30,6 @@ ...@@ -30,15 +30,6 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/percpu_ida.h> #include <linux/percpu_ida.h>
/*
* Number of tags we move between the percpu freelist and the global freelist at
* a time
*/
#define IDA_PCPU_BATCH_MOVE 32U
/* Max size of percpu freelist, */
#define IDA_PCPU_SIZE ((IDA_PCPU_BATCH_MOVE * 3) / 2)
struct percpu_ida_cpu { struct percpu_ida_cpu {
/* /*
* Even though this is percpu, we need a lock for tag stealing by remote * Even though this is percpu, we need a lock for tag stealing by remote
...@@ -78,7 +69,7 @@ static inline void steal_tags(struct percpu_ida *pool, ...@@ -78,7 +69,7 @@ static inline void steal_tags(struct percpu_ida *pool,
struct percpu_ida_cpu *remote; struct percpu_ida_cpu *remote;
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2; cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
cpus_have_tags--) { cpus_have_tags--) {
cpu = cpumask_next(cpu, &pool->cpus_have_tags); cpu = cpumask_next(cpu, &pool->cpus_have_tags);
...@@ -123,7 +114,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool, ...@@ -123,7 +114,7 @@ static inline void alloc_global_tags(struct percpu_ida *pool,
{ {
move_tags(tags->freelist, &tags->nr_free, move_tags(tags->freelist, &tags->nr_free,
pool->freelist, &pool->nr_free, pool->freelist, &pool->nr_free,
min(pool->nr_free, IDA_PCPU_BATCH_MOVE)); min(pool->nr_free, pool->percpu_batch_size));
} }
static inline unsigned alloc_local_tag(struct percpu_ida *pool, static inline unsigned alloc_local_tag(struct percpu_ida *pool,
...@@ -245,17 +236,17 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) ...@@ -245,17 +236,17 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
wake_up(&pool->wait); wake_up(&pool->wait);
} }
if (nr_free == IDA_PCPU_SIZE) { if (nr_free == pool->percpu_max_size) {
spin_lock(&pool->lock); spin_lock(&pool->lock);
/* /*
* Global lock held and irqs disabled, don't need percpu * Global lock held and irqs disabled, don't need percpu
* lock * lock
*/ */
if (tags->nr_free == IDA_PCPU_SIZE) { if (tags->nr_free == pool->percpu_max_size) {
move_tags(pool->freelist, &pool->nr_free, move_tags(pool->freelist, &pool->nr_free,
tags->freelist, &tags->nr_free, tags->freelist, &tags->nr_free,
IDA_PCPU_BATCH_MOVE); pool->percpu_batch_size);
wake_up(&pool->wait); wake_up(&pool->wait);
} }
...@@ -292,7 +283,8 @@ EXPORT_SYMBOL_GPL(percpu_ida_destroy); ...@@ -292,7 +283,8 @@ EXPORT_SYMBOL_GPL(percpu_ida_destroy);
* Allocation is percpu, but sharding is limited by nr_tags - for best * Allocation is percpu, but sharding is limited by nr_tags - for best
* performance, the workload should not span more cpus than nr_tags / 128. * performance, the workload should not span more cpus than nr_tags / 128.
*/ */
int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
unsigned long max_size, unsigned long batch_size)
{ {
unsigned i, cpu, order; unsigned i, cpu, order;
...@@ -301,6 +293,8 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) ...@@ -301,6 +293,8 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
init_waitqueue_head(&pool->wait); init_waitqueue_head(&pool->wait);
spin_lock_init(&pool->lock); spin_lock_init(&pool->lock);
pool->nr_tags = nr_tags; pool->nr_tags = nr_tags;
pool->percpu_max_size = max_size;
pool->percpu_batch_size = batch_size;
/* Guard against overflow */ /* Guard against overflow */
if (nr_tags > (unsigned) INT_MAX + 1) { if (nr_tags > (unsigned) INT_MAX + 1) {
...@@ -319,7 +313,7 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) ...@@ -319,7 +313,7 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
pool->nr_free = nr_tags; pool->nr_free = nr_tags;
pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
IDA_PCPU_SIZE * sizeof(unsigned), pool->percpu_max_size * sizeof(unsigned),
sizeof(unsigned)); sizeof(unsigned));
if (!pool->tag_cpu) if (!pool->tag_cpu)
goto err; goto err;
...@@ -332,4 +326,65 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) ...@@ -332,4 +326,65 @@ int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
percpu_ida_destroy(pool); percpu_ida_destroy(pool);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL_GPL(percpu_ida_init); EXPORT_SYMBOL_GPL(__percpu_ida_init);
/**
* percpu_ida_for_each_free - iterate free ids of a pool
* @pool: pool to iterate
* @fn: interate callback function
* @data: parameter for @fn
*
* Note, this doesn't guarantee to iterate all free ids restrictly. Some free
* ids might be missed, some might be iterated duplicated, and some might
* be iterated and not free soon.
*/
int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
void *data)
{
unsigned long flags;
struct percpu_ida_cpu *remote;
unsigned cpu, i, err = 0;
local_irq_save(flags);
for_each_possible_cpu(cpu) {
remote = per_cpu_ptr(pool->tag_cpu, cpu);
spin_lock(&remote->lock);
for (i = 0; i < remote->nr_free; i++) {
err = fn(remote->freelist[i], data);
if (err)
break;
}
spin_unlock(&remote->lock);
if (err)
goto out;
}
spin_lock(&pool->lock);
for (i = 0; i < pool->nr_free; i++) {
err = fn(pool->freelist[i], data);
if (err)
break;
}
spin_unlock(&pool->lock);
out:
local_irq_restore(flags);
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
/**
* percpu_ida_free_tags - return free tags number of a specific cpu or global pool
* @pool: pool related
* @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
*
* Note: this just returns a snapshot of free tags number.
*/
unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
{
struct percpu_ida_cpu *remote;
if (cpu == nr_cpu_ids)
return pool->nr_free;
remote = per_cpu_ptr(pool->tag_cpu, cpu);
return remote->nr_free;
}
EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment