Commit 77b5a084 authored by Jens Axboe's avatar Jens Axboe

bcache: don't embed 'return' statements in closure macros

This is horribly confusing, it breaks the flow of the code without
it being apparent in the caller.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
parent 06b285bd
...@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list) ...@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
do { \ do { \
set_closure_fn(_cl, _fn, _wq); \ set_closure_fn(_cl, _fn, _wq); \
closure_sub(_cl, CLOSURE_RUNNING + 1); \ closure_sub(_cl, CLOSURE_RUNNING + 1); \
return; \
} while (0) } while (0)
/** /**
...@@ -349,7 +348,6 @@ do { \ ...@@ -349,7 +348,6 @@ do { \
do { \ do { \
set_closure_fn(_cl, _fn, _wq); \ set_closure_fn(_cl, _fn, _wq); \
closure_queue(_cl); \ closure_queue(_cl); \
return; \
} while (0) } while (0)
/** /**
...@@ -365,7 +363,6 @@ do { \ ...@@ -365,7 +363,6 @@ do { \
do { \ do { \
set_closure_fn(_cl, _destructor, NULL); \ set_closure_fn(_cl, _destructor, NULL); \
closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
return; \
} while (0) } while (0)
/** /**
......
...@@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) ...@@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
} while (n != bio); } while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL); continue_at(&s->cl, bch_bio_submit_split_done, NULL);
return;
submit: submit:
generic_make_request(bio); generic_make_request(bio);
} }
......
...@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
if (!w->need_write) { if (!w->need_write) {
closure_return_with_destructor(cl, journal_write_unlock); closure_return_with_destructor(cl, journal_write_unlock);
return;
} else if (journal_full(&c->journal)) { } else if (journal_full(&c->journal)) {
journal_reclaim(c); journal_reclaim(c);
spin_unlock(&c->journal.lock); spin_unlock(&c->journal.lock);
btree_flush_write(c); btree_flush_write(c);
continue_at(cl, journal_write, system_wq); continue_at(cl, journal_write, system_wq);
return;
} }
c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
......
...@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl) ...@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
if (journal_ref) if (journal_ref)
atomic_dec_bug(journal_ref); atomic_dec_bug(journal_ref);
if (!op->insert_data_done) if (!op->insert_data_done) {
continue_at(cl, bch_data_insert_start, op->wq); continue_at(cl, bch_data_insert_start, op->wq);
return;
}
bch_keylist_free(&op->insert_keys); bch_keylist_free(&op->insert_keys);
closure_return(cl); closure_return(cl);
...@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */ /* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys, if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0), 3 + (op->csum ? 1 : 0),
op->c)) op->c)) {
continue_at(cl, bch_data_insert_keys, op->wq); continue_at(cl, bch_data_insert_keys, op->wq);
return;
}
k = op->insert_keys.top; k = op->insert_keys.top;
bkey_init(k); bkey_init(k);
...@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
op->insert_data_done = true; op->insert_data_done = true;
continue_at(cl, bch_data_insert_keys, op->wq); continue_at(cl, bch_data_insert_keys, op->wq);
return;
err: err:
/* bch_alloc_sectors() blocks if s->writeback = true */ /* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback); BUG_ON(op->writeback);
...@@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl) ...@@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl)
ret = bch_btree_map_keys(&s->op, s->iop.c, ret = bch_btree_map_keys(&s->op, s->iop.c,
&KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY); cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN) if (ret == -EAGAIN) {
continue_at(cl, cache_lookup, bcache_wq); continue_at(cl, cache_lookup, bcache_wq);
return;
}
closure_return(cl); closure_return(cl);
} }
...@@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) ...@@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
continue_at_nobarrier(&s->cl, continue_at_nobarrier(&s->cl,
flash_dev_nodata, flash_dev_nodata,
bcache_wq); bcache_wq);
return;
} else if (rw) { } else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio->bi_iter.bi_sector, 0),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment