Commit 1c4bc3ab authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove the queue_bounce_pfn helper

Only used inside the bounce code, and opencoding it makes it more obvious
what is going on.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3bce016a
...@@ -203,7 +203,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -203,7 +203,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bio_for_each_segment(from, *bio_orig, iter) { bio_for_each_segment(from, *bio_orig, iter) {
if (i++ < BIO_MAX_PAGES) if (i++ < BIO_MAX_PAGES)
sectors += from.bv_len >> 9; sectors += from.bv_len >> 9;
if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
bounce = true; bounce = true;
} }
if (!bounce) if (!bounce)
...@@ -220,7 +220,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, ...@@ -220,7 +220,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bio_for_each_segment_all(to, bio, i) { bio_for_each_segment_all(to, bio, i) {
struct page *page = to->bv_page; struct page *page = to->bv_page;
if (page_to_pfn(page) <= queue_bounce_pfn(q)) if (page_to_pfn(page) <= q->limits.bounce_pfn)
continue; continue;
to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_page = mempool_alloc(pool, q->bounce_gfp);
...@@ -272,7 +272,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) ...@@ -272,7 +272,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
* don't waste time iterating over bio segments * don't waste time iterating over bio segments
*/ */
if (!(q->bounce_gfp & GFP_DMA)) { if (!(q->bounce_gfp & GFP_DMA)) {
if (queue_bounce_pfn(q) >= blk_max_pfn) if (q->limits.bounce_pfn >= blk_max_pfn)
return; return;
pool = page_pool; pool = page_pool;
} else { } else {
......
...@@ -1385,11 +1385,6 @@ enum blk_default_limits { ...@@ -1385,11 +1385,6 @@ enum blk_default_limits {
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
static inline unsigned long queue_bounce_pfn(struct request_queue *q)
{
return q->limits.bounce_pfn;
}
static inline unsigned long queue_segment_boundary(struct request_queue *q) static inline unsigned long queue_segment_boundary(struct request_queue *q)
{ {
return q->limits.seg_boundary_mask; return q->limits.seg_boundary_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment