Commit 4cea422a authored by David Sterba's avatar David Sterba

btrfs: use shrinker for compression page pool

The pages are now allocated and freed centrally, so we can extend the
logic to manage the lifetime. The main idea is to keep a few recently
used pages and hand them to all writers. Ideally we won't have to go to
allocator at all (a slight performance gain) and also raise chance that
we'll have the pages available (slightly increased reliability).

In order to avoid gathering too many pages, the shrinker is attached to
the cache so we can free them on when MM demands that. The first
implementation will drain the whole cache. Further this can be refined
to keep some minimal number of pages for emergency purposes.  The
ultimate goal to avoid memory allocation failures on the write out path
from the compression.

The pool threshold is set to cover full BTRFS_MAX_COMPRESSED / PAGE_SIZE
for minimal thread pool, which is 8 (btrfs_init_fs_info()). This is 128K
/ 4K * 8 = 256 pages at maximum, which is 1MiB.

This is for all filesystems currently mounted, with heavy use of
compression IO the allocator is still needed. The cache helps for short
burst IO.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 9ba965dc
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/shrinker.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include "misc.h" #include "misc.h"
#include "ctree.h" #include "ctree.h"
...@@ -169,16 +170,96 @@ static void btrfs_free_compressed_pages(struct compressed_bio *cb) ...@@ -169,16 +170,96 @@ static void btrfs_free_compressed_pages(struct compressed_bio *cb)
static int btrfs_decompress_bio(struct compressed_bio *cb); static int btrfs_decompress_bio(struct compressed_bio *cb);
/*
* Global cache of last unused pages for compression/decompression.
*/
static struct btrfs_compr_pool {
struct shrinker *shrinker;
spinlock_t lock;
struct list_head list;
int count;
int thresh;
} compr_pool;
static unsigned long btrfs_compr_pool_count(struct shrinker *sh, struct shrink_control *sc)
{
int ret;
/*
* We must not read the values more than once if 'ret' gets expanded in
* the return statement so we don't accidentally return a negative
* number, even if the first condition finds it positive.
*/
ret = READ_ONCE(compr_pool.count) - READ_ONCE(compr_pool.thresh);
return ret > 0 ? ret : 0;
}
static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_control *sc)
{
struct list_head remove;
struct list_head *tmp, *next;
int freed;
if (compr_pool.count == 0)
return SHRINK_STOP;
INIT_LIST_HEAD(&remove);
/* For now, just simply drain the whole list. */
spin_lock(&compr_pool.lock);
list_splice_init(&compr_pool.list, &remove);
freed = compr_pool.count;
compr_pool.count = 0;
spin_unlock(&compr_pool.lock);
list_for_each_safe(tmp, next, &remove) {
struct page *page = list_entry(tmp, struct page, lru);
ASSERT(page_ref_count(page) == 1);
put_page(page);
}
return freed;
}
/* /*
* Common wrappers for page allocation from compression wrappers * Common wrappers for page allocation from compression wrappers
*/ */
struct page *btrfs_alloc_compr_page(void) struct page *btrfs_alloc_compr_page(void)
{ {
struct page *page = NULL;
spin_lock(&compr_pool.lock);
if (compr_pool.count > 0) {
page = list_first_entry(&compr_pool.list, struct page, lru);
list_del_init(&page->lru);
compr_pool.count--;
}
spin_unlock(&compr_pool.lock);
if (page)
return page;
return alloc_page(GFP_NOFS); return alloc_page(GFP_NOFS);
} }
void btrfs_free_compr_page(struct page *page) void btrfs_free_compr_page(struct page *page)
{ {
bool do_free = false;
spin_lock(&compr_pool.lock);
if (compr_pool.count > compr_pool.thresh) {
do_free = true;
} else {
list_add(&page->lru, &compr_pool.list);
compr_pool.count++;
}
spin_unlock(&compr_pool.lock);
if (!do_free)
return;
ASSERT(page_ref_count(page) == 1); ASSERT(page_ref_count(page) == 1);
put_page(page); put_page(page);
} }
...@@ -974,15 +1055,36 @@ int __init btrfs_init_compress(void) ...@@ -974,15 +1055,36 @@ int __init btrfs_init_compress(void)
offsetof(struct compressed_bio, bbio.bio), offsetof(struct compressed_bio, bbio.bio),
BIOSET_NEED_BVECS)) BIOSET_NEED_BVECS))
return -ENOMEM; return -ENOMEM;
compr_pool.shrinker = shrinker_alloc(SHRINKER_NONSLAB, "btrfs-compr-pages");
if (!compr_pool.shrinker)
return -ENOMEM;
btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
zstd_init_workspace_manager(); zstd_init_workspace_manager();
spin_lock_init(&compr_pool.lock);
INIT_LIST_HEAD(&compr_pool.list);
compr_pool.count = 0;
/* 128K / 4K = 32, for 8 threads is 256 pages. */
compr_pool.thresh = BTRFS_MAX_COMPRESSED / PAGE_SIZE * 8;
compr_pool.shrinker->count_objects = btrfs_compr_pool_count;
compr_pool.shrinker->scan_objects = btrfs_compr_pool_scan;
compr_pool.shrinker->batch = 32;
compr_pool.shrinker->seeks = DEFAULT_SEEKS;
shrinker_register(compr_pool.shrinker);
return 0; return 0;
} }
void __cold btrfs_exit_compress(void) void __cold btrfs_exit_compress(void)
{ {
/* For now scan drains all pages and does not touch the parameters. */
btrfs_compr_pool_scan(NULL, NULL);
shrinker_free(compr_pool.shrinker);
btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment