Commit b99182c5 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

bio: add pcpu caching for non-polling bio_put

This patch extends REQ_ALLOC_CACHE to IRQ completions, whenever
currently it's only limited to iopoll. Instead of guarding the list with
irq toggling on alloc, which is expensive, it keeps an additional
irq-safe list from which bios are spliced in batches to ammortise
overhead. On the put side it toggles irqs, but in many cases they're
already disabled and so cheap.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c2306de96b900ab9264f4428ec37768ddcf0da36.1667384020.git.asml.silence@gmail.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f25cf75a
...@@ -25,9 +25,15 @@ ...@@ -25,9 +25,15 @@
#include "blk-rq-qos.h" #include "blk-rq-qos.h"
#include "blk-cgroup.h" #include "blk-cgroup.h"
#define ALLOC_CACHE_THRESHOLD 16
#define ALLOC_CACHE_SLACK 64
#define ALLOC_CACHE_MAX 512
struct bio_alloc_cache { struct bio_alloc_cache {
struct bio *free_list; struct bio *free_list;
struct bio *free_list_irq;
unsigned int nr; unsigned int nr;
unsigned int nr_irq;
}; };
static struct biovec_slab { static struct biovec_slab {
...@@ -408,6 +414,22 @@ static void punt_bios_to_rescuer(struct bio_set *bs) ...@@ -408,6 +414,22 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
queue_work(bs->rescue_workqueue, &bs->rescue_work); queue_work(bs->rescue_workqueue, &bs->rescue_work);
} }
static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
{
unsigned long flags;
/* cache->free_list must be empty */
if (WARN_ON_ONCE(cache->free_list))
return;
local_irq_save(flags);
cache->free_list = cache->free_list_irq;
cache->free_list_irq = NULL;
cache->nr += cache->nr_irq;
cache->nr_irq = 0;
local_irq_restore(flags);
}
static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
struct bio_set *bs) struct bio_set *bs)
...@@ -417,8 +439,12 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, ...@@ -417,8 +439,12 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
cache = per_cpu_ptr(bs->cache, get_cpu()); cache = per_cpu_ptr(bs->cache, get_cpu());
if (!cache->free_list) { if (!cache->free_list) {
put_cpu(); if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
return NULL; bio_alloc_irq_cache_splice(cache);
if (!cache->free_list) {
put_cpu();
return NULL;
}
} }
bio = cache->free_list; bio = cache->free_list;
cache->free_list = bio->bi_next; cache->free_list = bio->bi_next;
...@@ -462,9 +488,6 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, ...@@ -462,9 +488,6 @@ static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
* submit_bio_noacct() should be avoided - instead, use bio_set's front_pad * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
* for per bio allocations. * for per bio allocations.
* *
* If REQ_ALLOC_CACHE is set, the final put of the bio MUST be done from process
* context, not hard/soft IRQ.
*
* Returns: Pointer to new bio on success, NULL on failure. * Returns: Pointer to new bio on success, NULL on failure.
*/ */
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
...@@ -678,11 +701,8 @@ void guard_bio_eod(struct bio *bio) ...@@ -678,11 +701,8 @@ void guard_bio_eod(struct bio *bio)
bio_truncate(bio, maxsector << 9); bio_truncate(bio, maxsector << 9);
} }
#define ALLOC_CACHE_MAX 512 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
#define ALLOC_CACHE_SLACK 64 unsigned int nr)
static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
unsigned int nr)
{ {
unsigned int i = 0; unsigned int i = 0;
struct bio *bio; struct bio *bio;
...@@ -694,6 +714,17 @@ static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, ...@@ -694,6 +714,17 @@ static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
if (++i == nr) if (++i == nr)
break; break;
} }
return i;
}
static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
unsigned int nr)
{
nr -= __bio_alloc_cache_prune(cache, nr);
if (!READ_ONCE(cache->free_list)) {
bio_alloc_irq_cache_splice(cache);
__bio_alloc_cache_prune(cache, nr);
}
} }
static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
...@@ -732,6 +763,12 @@ static inline void bio_put_percpu_cache(struct bio *bio) ...@@ -732,6 +763,12 @@ static inline void bio_put_percpu_cache(struct bio *bio)
struct bio_alloc_cache *cache; struct bio_alloc_cache *cache;
cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) {
put_cpu();
bio_free(bio);
return;
}
bio_uninit(bio); bio_uninit(bio);
if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) { if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
...@@ -739,13 +776,14 @@ static inline void bio_put_percpu_cache(struct bio *bio) ...@@ -739,13 +776,14 @@ static inline void bio_put_percpu_cache(struct bio *bio)
cache->free_list = bio; cache->free_list = bio;
cache->nr++; cache->nr++;
} else { } else {
put_cpu(); unsigned long flags;
bio_free(bio);
return;
}
if (cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK) local_irq_save(flags);
bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK); bio->bi_next = cache->free_list_irq;
cache->free_list_irq = bio;
cache->nr_irq++;
local_irq_restore(flags);
}
put_cpu(); put_cpu();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment