Commit 17e4aed8 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: remove 'int n' from parameter list of bch_bucket_alloc_set()

The parameter 'int n' from bch_bucket_alloc_set() is not cleared
defined. From the code comments n is the number of buckets to alloc, but
from the code itself 'n' is the maximum cache to iterate. Indeed all the
locations where bch_bucket_alloc_set() is called, 'n' is alwasy 1.

This patch removes the confused and unnecessary 'int n' from parameter
list of  bch_bucket_alloc_set(), and explicitly allocates only 1 bucket
for its caller.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 84e5d136
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
* *
* bch_bucket_alloc() allocates a single bucket from a specific cache. * bch_bucket_alloc() allocates a single bucket from a specific cache.
* *
* bch_bucket_alloc_set() allocates one or more buckets from different caches * bch_bucket_alloc_set() allocates one bucket from different caches
* out of a cache set. * out of a cache set.
* *
* free_some_buckets() drives all the processes described above. It's called * free_some_buckets() drives all the processes described above. It's called
...@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) ...@@ -488,34 +488,29 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k)
} }
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait) struct bkey *k, bool wait)
{ {
int i; struct cache *ca;
long b;
/* No allocation if CACHE_SET_IO_DISABLE bit is set */ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
return -1; return -1;
lockdep_assert_held(&c->bucket_lock); lockdep_assert_held(&c->bucket_lock);
BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
bkey_init(k); bkey_init(k);
/* sort by free space/prio of oldest data in caches */ ca = c->cache_by_alloc[0];
b = bch_bucket_alloc(ca, reserve, wait);
for (i = 0; i < n; i++) { if (b == -1)
struct cache *ca = c->cache_by_alloc[i]; goto err;
long b = bch_bucket_alloc(ca, reserve, wait);
if (b == -1) k->ptr[0] = MAKE_PTR(ca->buckets[b].gen,
goto err; bucket_to_sector(c, b),
ca->sb.nr_this_dev);
k->ptr[i] = MAKE_PTR(ca->buckets[b].gen, SET_KEY_PTRS(k, 1);
bucket_to_sector(c, b),
ca->sb.nr_this_dev);
SET_KEY_PTRS(k, i + 1);
}
return 0; return 0;
err: err:
...@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, ...@@ -525,12 +520,12 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
} }
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait) struct bkey *k, bool wait)
{ {
int ret; int ret;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); ret = __bch_bucket_alloc_set(c, reserve, k, wait);
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
return ret; return ret;
} }
...@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c, ...@@ -638,7 +633,7 @@ bool bch_alloc_sectors(struct cache_set *c,
spin_unlock(&c->data_bucket_lock); spin_unlock(&c->data_bucket_lock);
if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait))
return false; return false;
spin_lock(&c->data_bucket_lock); spin_lock(&c->data_bucket_lock);
......
...@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k); ...@@ -994,9 +994,9 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k);
long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait); struct bkey *k, bool wait);
int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait); struct bkey *k, bool wait);
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, bool bch_alloc_sectors(struct cache_set *c, struct bkey *k,
unsigned int sectors, unsigned int write_point, unsigned int sectors, unsigned int write_point,
unsigned int write_prio, bool wait); unsigned int write_prio, bool wait);
......
...@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, ...@@ -1091,7 +1091,7 @@ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
retry: retry:
if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
goto err; goto err;
bkey_put(c, &k.key); bkey_put(c, &k.key);
......
...@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c) ...@@ -486,7 +486,7 @@ static int __uuid_write(struct cache_set *c)
closure_init_stack(&cl); closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock); lockdep_assert_held(&bch_register_lock);
if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true)) if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
return 1; return 1;
size = meta_bucket_pages(&c->sb) * PAGE_SECTORS; size = meta_bucket_pages(&c->sb) * PAGE_SECTORS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment