Commit 6a15674d authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

block: Introduce blk_get_request_flags()

A side effect of this patch is that the GFP mask that is passed to
several allocation functions in the legacy block layer is changed
from GFP_KERNEL into __GFP_DIRECT_RECLAIM.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Tested-by: default avatarMartin Steigerwald <martin@lichtvoll.de>
Tested-by: default avatarOleksandr Natalenko <oleksandr@natalenko.name>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 055f6e18
...@@ -1160,7 +1160,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -1160,7 +1160,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* @rl: request list to allocate from * @rl: request list to allocate from
* @op: operation and flags * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask * @flags: BLQ_MQ_REQ_* flags
* *
* Get a free request from @q. This function may fail under memory * Get a free request from @q. This function may fail under memory
* pressure or if @q is dead. * pressure or if @q is dead.
...@@ -1170,7 +1170,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -1170,7 +1170,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* Returns request pointer on success, with @q->queue_lock *not held*. * Returns request pointer on success, with @q->queue_lock *not held*.
*/ */
static struct request *__get_request(struct request_list *rl, unsigned int op, static struct request *__get_request(struct request_list *rl, unsigned int op,
struct bio *bio, gfp_t gfp_mask) struct bio *bio, unsigned int flags)
{ {
struct request_queue *q = rl->q; struct request_queue *q = rl->q;
struct request *rq; struct request *rq;
...@@ -1179,6 +1179,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1179,6 +1179,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
struct io_cq *icq = NULL; struct io_cq *icq = NULL;
const bool is_sync = op_is_sync(op); const bool is_sync = op_is_sync(op);
int may_queue; int may_queue;
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
__GFP_DIRECT_RECLAIM;
req_flags_t rq_flags = RQF_ALLOCED; req_flags_t rq_flags = RQF_ALLOCED;
lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock);
...@@ -1339,7 +1341,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1339,7 +1341,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* @q: request_queue to allocate request from * @q: request_queue to allocate request from
* @op: operation and flags * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask * @flags: BLK_MQ_REQ_* flags.
* *
* Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask, * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
* this function keeps retrying under memory pressure and fails iff @q is dead. * this function keeps retrying under memory pressure and fails iff @q is dead.
...@@ -1349,7 +1351,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1349,7 +1351,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* Returns request pointer on success, with @q->queue_lock *not held*. * Returns request pointer on success, with @q->queue_lock *not held*.
*/ */
static struct request *get_request(struct request_queue *q, unsigned int op, static struct request *get_request(struct request_queue *q, unsigned int op,
struct bio *bio, gfp_t gfp_mask) struct bio *bio, unsigned int flags)
{ {
const bool is_sync = op_is_sync(op); const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -1361,7 +1363,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op, ...@@ -1361,7 +1363,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
rl = blk_get_rl(q, bio); /* transferred to @rq on success */ rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry: retry:
rq = __get_request(rl, op, bio, gfp_mask); rq = __get_request(rl, op, bio, flags);
if (!IS_ERR(rq)) if (!IS_ERR(rq))
return rq; return rq;
...@@ -1370,7 +1372,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op, ...@@ -1370,7 +1372,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) { if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl); blk_put_rl(rl);
return rq; return rq;
} }
...@@ -1397,10 +1399,13 @@ static struct request *get_request(struct request_queue *q, unsigned int op, ...@@ -1397,10 +1399,13 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
goto retry; goto retry;
} }
/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
static struct request *blk_old_get_request(struct request_queue *q, static struct request *blk_old_get_request(struct request_queue *q,
unsigned int op, gfp_t gfp_mask) unsigned int op, unsigned int flags)
{ {
struct request *rq; struct request *rq;
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
__GFP_DIRECT_RECLAIM;
int ret = 0; int ret = 0;
WARN_ON_ONCE(q->mq_ops); WARN_ON_ONCE(q->mq_ops);
...@@ -1413,7 +1418,7 @@ static struct request *blk_old_get_request(struct request_queue *q, ...@@ -1413,7 +1418,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rq = get_request(q, op, NULL, gfp_mask); rq = get_request(q, op, NULL, flags);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
blk_queue_exit(q); blk_queue_exit(q);
...@@ -1427,25 +1432,40 @@ static struct request *blk_old_get_request(struct request_queue *q, ...@@ -1427,25 +1432,40 @@ static struct request *blk_old_get_request(struct request_queue *q,
return rq; return rq;
} }
struct request *blk_get_request(struct request_queue *q, unsigned int op, /**
gfp_t gfp_mask) * blk_get_request_flags - allocate a request
* @q: request queue to allocate a request for
* @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
* @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
*/
struct request *blk_get_request_flags(struct request_queue *q, unsigned int op,
unsigned int flags)
{ {
struct request *req; struct request *req;
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~BLK_MQ_REQ_NOWAIT);
if (q->mq_ops) { if (q->mq_ops) {
req = blk_mq_alloc_request(q, op, req = blk_mq_alloc_request(q, op, flags);
(gfp_mask & __GFP_DIRECT_RECLAIM) ?
0 : BLK_MQ_REQ_NOWAIT);
if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
q->mq_ops->initialize_rq_fn(req); q->mq_ops->initialize_rq_fn(req);
} else { } else {
req = blk_old_get_request(q, op, gfp_mask); req = blk_old_get_request(q, op, flags);
if (!IS_ERR(req) && q->initialize_rq_fn) if (!IS_ERR(req) && q->initialize_rq_fn)
q->initialize_rq_fn(req); q->initialize_rq_fn(req);
} }
return req; return req;
} }
EXPORT_SYMBOL(blk_get_request_flags);
struct request *blk_get_request(struct request_queue *q, unsigned int op,
gfp_t gfp_mask)
{
return blk_get_request_flags(q, op, gfp_mask & __GFP_DIRECT_RECLAIM ?
0 : BLK_MQ_REQ_NOWAIT);
}
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
/** /**
...@@ -1871,7 +1891,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1871,7 +1891,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* Returns with the queue unlocked. * Returns with the queue unlocked.
*/ */
blk_queue_enter_live(q); blk_queue_enter_live(q);
req = get_request(q, bio->bi_opf, bio, GFP_NOIO); req = get_request(q, bio->bi_opf, bio, 0);
if (IS_ERR(req)) { if (IS_ERR(req)) {
blk_queue_exit(q); blk_queue_exit(q);
__wbt_done(q->rq_wb, wb_acct); __wbt_done(q->rq_wb, wb_acct);
......
...@@ -927,6 +927,9 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); ...@@ -927,6 +927,9 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio); extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *); extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request_flags(struct request_queue *,
unsigned int op,
unsigned int flags);
extern struct request *blk_get_request(struct request_queue *, unsigned int op, extern struct request *blk_get_request(struct request_queue *, unsigned int op,
gfp_t gfp_mask); gfp_t gfp_mask);
extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_requeue_request(struct request_queue *, struct request *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment