Commit 19c304be authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: GFP_NOIO -> GFP_NOFS

GFP_NOIO dates from the bcache days, when we operated under the block
layer. Now, GFP_NOFS is more appropriate, so switch all GFP_NOIO uses to
GFP_NOFS.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e1d29c5f
...@@ -117,7 +117,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size, ...@@ -117,7 +117,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
if (!p) { if (!p) {
*used_mempool = true; *used_mempool = true;
p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO); p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
} }
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
return p; return p;
...@@ -937,7 +937,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, ...@@ -937,7 +937,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
/* We might get called multiple times on read retry: */ /* We might get called multiple times on read retry: */
b->written = 0; b->written = 0;
iter = mempool_alloc(&c->fill_iter, GFP_NOIO); iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
sort_iter_init(iter, b); sort_iter_init(iter, b);
iter->size = (btree_blocks(c) + 1) * 2; iter->size = (btree_blocks(c) + 1) * 2;
...@@ -1580,7 +1580,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ...@@ -1580,7 +1580,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
bio = bio_alloc_bioset(NULL, bio = bio_alloc_bioset(NULL,
buf_pages(b->data, btree_bytes(c)), buf_pages(b->data, btree_bytes(c)),
REQ_OP_READ|REQ_SYNC|REQ_META, REQ_OP_READ|REQ_SYNC|REQ_META,
GFP_NOIO, GFP_NOFS,
&c->btree_bio); &c->btree_bio);
rb = container_of(bio, struct btree_read_bio, bio); rb = container_of(bio, struct btree_read_bio, bio);
rb->c = c; rb->c = c;
...@@ -2077,7 +2077,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) ...@@ -2077,7 +2077,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
wbio = container_of(bio_alloc_bioset(NULL, wbio = container_of(bio_alloc_bioset(NULL,
buf_pages(data, sectors_to_write << 9), buf_pages(data, sectors_to_write << 9),
REQ_OP_WRITE|REQ_META, REQ_OP_WRITE|REQ_META,
GFP_NOIO, GFP_NOFS,
&c->btree_bio), &c->btree_bio),
struct btree_write_bio, wbio.bio); struct btree_write_bio, wbio.bio);
wbio_init(&wbio->wbio.bio); wbio_init(&wbio->wbio.bio);
......
...@@ -1092,7 +1092,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -1092,7 +1092,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
} }
} }
as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO); as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
memset(as, 0, sizeof(*as)); memset(as, 0, sizeof(*as));
closure_init(&as->cl, NULL); closure_init(&as->cl, NULL);
as->c = c; as->c = c;
......
...@@ -433,12 +433,12 @@ replicas_deltas_realloc(struct btree_trans *trans, unsigned more) ...@@ -433,12 +433,12 @@ replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX); WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
if (!d || d->used + more > d->size) { if (!d || d->used + more > d->size) {
d = krealloc(d, alloc_size, GFP_NOIO|__GFP_ZERO); d = krealloc(d, alloc_size, GFP_NOFS|__GFP_ZERO);
BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX); BUG_ON(!d && alloc_size > REPLICAS_DELTA_LIST_MAX);
if (!d) { if (!d) {
d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOIO); d = mempool_alloc(&trans->c->replicas_delta_pool, GFP_NOFS);
memset(d, 0, REPLICAS_DELTA_LIST_MAX); memset(d, 0, REPLICAS_DELTA_LIST_MAX);
if (trans->fs_usage_deltas) if (trans->fs_usage_deltas)
......
...@@ -28,11 +28,11 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw) ...@@ -28,11 +28,11 @@ static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
BUG_ON(size > c->opts.encoded_extent_max); BUG_ON(size > c->opts.encoded_extent_max);
b = kmalloc(size, GFP_NOIO|__GFP_NOWARN); b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
if (b) if (b)
return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
b = mempool_alloc(&c->compression_bounce[rw], GFP_NOIO); b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
if (b) if (b)
return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
...@@ -94,7 +94,7 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, ...@@ -94,7 +94,7 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages); BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
pages = nr_pages > ARRAY_SIZE(stack_pages) pages = nr_pages > ARRAY_SIZE(stack_pages)
? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOIO) ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
: stack_pages; : stack_pages;
if (!pages) if (!pages)
goto bounce; goto bounce;
...@@ -177,7 +177,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, ...@@ -177,7 +177,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
.avail_out = dst_len, .avail_out = dst_len,
}; };
workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO); workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
zlib_set_workspace(&strm, workspace); zlib_set_workspace(&strm, workspace);
zlib_inflateInit2(&strm, -MAX_WBITS); zlib_inflateInit2(&strm, -MAX_WBITS);
...@@ -196,7 +196,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, ...@@ -196,7 +196,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
if (real_src_len > src_len - 4) if (real_src_len > src_len - 4)
goto err; goto err;
workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO); workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
ret = zstd_decompress_dctx(ctx, ret = zstd_decompress_dctx(ctx,
...@@ -382,7 +382,7 @@ static unsigned __bio_compress(struct bch_fs *c, ...@@ -382,7 +382,7 @@ static unsigned __bio_compress(struct bch_fs *c,
dst_data = bio_map_or_bounce(c, dst, WRITE); dst_data = bio_map_or_bounce(c, dst, WRITE);
src_data = bio_map_or_bounce(c, src, READ); src_data = bio_map_or_bounce(c, src, READ);
workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOIO); workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS);
*src_len = src->bi_iter.bi_size; *src_len = src->bi_iter.bi_size;
*dst_len = dst->bi_iter.bi_size; *dst_len = dst->bi_iter.bi_size;
......
...@@ -47,7 +47,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, ...@@ -47,7 +47,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
bio = bio_alloc_bioset(ca->disk_sb.bdev, bio = bio_alloc_bioset(ca->disk_sb.bdev,
buf_pages(n_sorted, btree_bytes(c)), buf_pages(n_sorted, btree_bytes(c)),
REQ_OP_READ|REQ_META, REQ_OP_READ|REQ_META,
GFP_NOIO, GFP_NOFS,
&c->btree_bio); &c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_sector = pick.ptr.offset;
bch2_bio_map(bio, n_sorted, btree_bytes(c)); bch2_bio_map(bio, n_sorted, btree_bytes(c));
...@@ -211,7 +211,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c, ...@@ -211,7 +211,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
bio = bio_alloc_bioset(ca->disk_sb.bdev, bio = bio_alloc_bioset(ca->disk_sb.bdev,
buf_pages(n_ondisk, btree_bytes(c)), buf_pages(n_ondisk, btree_bytes(c)),
REQ_OP_READ|REQ_META, REQ_OP_READ|REQ_META,
GFP_NOIO, GFP_NOFS,
&c->btree_bio); &c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_sector = pick.ptr.offset;
bch2_bio_map(bio, n_ondisk, btree_bytes(c)); bch2_bio_map(bio, n_ondisk, btree_bytes(c));
......
...@@ -485,7 +485,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio) ...@@ -485,7 +485,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
BUG_ON(!rbio->pick.has_ec); BUG_ON(!rbio->pick.has_ec);
buf = kzalloc(sizeof(*buf), GFP_NOIO); buf = kzalloc(sizeof(*buf), GFP_NOFS);
if (!buf) if (!buf)
return -BCH_ERR_ENOMEM_ec_read_extent; return -BCH_ERR_ENOMEM_ec_read_extent;
......
...@@ -163,7 +163,7 @@ static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool) ...@@ -163,7 +163,7 @@ static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
struct page *page; struct page *page;
if (likely(!*using_mempool)) { if (likely(!*using_mempool)) {
page = alloc_page(GFP_NOIO); page = alloc_page(GFP_NOFS);
if (unlikely(!page)) { if (unlikely(!page)) {
mutex_lock(&c->bio_bounce_pages_lock); mutex_lock(&c->bio_bounce_pages_lock);
*using_mempool = true; *using_mempool = true;
...@@ -172,7 +172,7 @@ static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool) ...@@ -172,7 +172,7 @@ static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
} }
} else { } else {
pool_alloc: pool_alloc:
page = mempool_alloc(&c->bio_bounce_pages, GFP_NOIO); page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
} }
return page; return page;
...@@ -660,7 +660,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, ...@@ -660,7 +660,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
if (to_entry(ptr + 1) < ptrs.end) { if (to_entry(ptr + 1) < ptrs.end) {
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
GFP_NOIO, &ca->replica_set)); GFP_NOFS, &ca->replica_set));
n->bio.bi_end_io = wbio->bio.bi_end_io; n->bio.bi_end_io = wbio->bio.bi_end_io;
n->bio.bi_private = wbio->bio.bi_private; n->bio.bi_private = wbio->bio.bi_private;
...@@ -976,7 +976,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c, ...@@ -976,7 +976,7 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
pages = min(pages, BIO_MAX_VECS); pages = min(pages, BIO_MAX_VECS);
bio = bio_alloc_bioset(NULL, pages, 0, bio = bio_alloc_bioset(NULL, pages, 0,
GFP_NOIO, &c->bio_write); GFP_NOFS, &c->bio_write);
wbio = wbio_init(bio); wbio = wbio_init(bio);
wbio->put_bio = true; wbio->put_bio = true;
/* copy WRITE_SYNC flag */ /* copy WRITE_SYNC flag */
...@@ -1314,7 +1314,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp, ...@@ -1314,7 +1314,7 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
BUG_ON(total_output != total_input); BUG_ON(total_output != total_input);
dst = bio_split(src, total_input >> 9, dst = bio_split(src, total_input >> 9,
GFP_NOIO, &c->bio_write); GFP_NOFS, &c->bio_write);
wbio_init(dst)->put_bio = true; wbio_init(dst)->put_bio = true;
/* copy WRITE_SYNC flag */ /* copy WRITE_SYNC flag */
dst->bi_opf = src->bi_opf; dst->bi_opf = src->bi_opf;
...@@ -2013,7 +2013,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, ...@@ -2013,7 +2013,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote)) if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
return NULL; return NULL;
op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO); op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
if (!op) if (!op)
goto err; goto err;
...@@ -2026,7 +2026,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, ...@@ -2026,7 +2026,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
*/ */
*rbio = kzalloc(sizeof(struct bch_read_bio) + *rbio = kzalloc(sizeof(struct bch_read_bio) +
sizeof(struct bio_vec) * pages, sizeof(struct bio_vec) * pages,
GFP_NOIO); GFP_NOFS);
if (!*rbio) if (!*rbio)
goto err; goto err;
...@@ -2034,7 +2034,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, ...@@ -2034,7 +2034,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0); bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
GFP_NOIO)) GFP_NOFS))
goto err; goto err;
(*rbio)->bounce = true; (*rbio)->bounce = true;
...@@ -2746,7 +2746,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, ...@@ -2746,7 +2746,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
rbio = rbio_init(bio_alloc_bioset(NULL, rbio = rbio_init(bio_alloc_bioset(NULL,
DIV_ROUND_UP(sectors, PAGE_SECTORS), DIV_ROUND_UP(sectors, PAGE_SECTORS),
0, 0,
GFP_NOIO, GFP_NOFS,
&c->bio_read_split), &c->bio_read_split),
orig->opts); orig->opts);
...@@ -2762,7 +2762,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, ...@@ -2762,7 +2762,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
* from the whole bio, in which case we don't want to retry and * from the whole bio, in which case we don't want to retry and
* lose the error) * lose the error)
*/ */
rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO, rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
&c->bio_read_split), &c->bio_read_split),
orig->opts); orig->opts);
rbio->bio.bi_iter = iter; rbio->bio.bi_iter = iter;
......
...@@ -1438,7 +1438,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) ...@@ -1438,7 +1438,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
if (buf->buf_size >= new_size) if (buf->buf_size >= new_size)
return; return;
new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN); new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
if (!new_buf) if (!new_buf)
return; return;
......
...@@ -271,7 +271,7 @@ void bch2_journal_do_discards(struct journal *j) ...@@ -271,7 +271,7 @@ void bch2_journal_do_discards(struct journal *j)
blkdev_issue_discard(ca->disk_sb.bdev, blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca, bucket_to_sector(ca,
ja->buckets[ja->discard_idx]), ja->buckets[ja->discard_idx]),
ca->mi.bucket_size, GFP_NOIO); ca->mi.bucket_size, GFP_NOFS);
spin_lock(&j->lock); spin_lock(&j->lock);
ja->discard_idx = (ja->discard_idx + 1) % ja->nr; ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
......
...@@ -18,7 +18,7 @@ int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s, ...@@ -18,7 +18,7 @@ int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s,
(old_buf && roundup_pow_of_two(oldsize) == newsize)) (old_buf && roundup_pow_of_two(oldsize) == newsize))
return 0; return 0;
new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOIO); new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOFS);
if (!new_keys) if (!new_keys)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment