Commit 697e2349 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: explicitly make cache_set only have single cache

Currently although the bcache code has a framework for multiple caches
in a cache set, but indeed the multiple caches never completed and users
use md raid1 for multiple copies of the cached data.

This patch does the following change in struct cache_set, to explicitly
make a cache_set only have single cache,
- Change pointer array "*cache[MAX_CACHES_PER_SET]" to a single pointer
  "*cache".
- Remove pointer array "*cache_by_alloc[MAX_CACHES_PER_SET]".
- Remove "caches_loaded".

Now the code looks as exactly what it does in practic: only one cache is
used in the cache set.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 17e4aed8
......@@ -501,7 +501,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
bkey_init(k);
ca = c->cache_by_alloc[0];
ca = c->cache;
b = bch_bucket_alloc(ca, reserve, wait);
if (b == -1)
goto err;
......
......@@ -519,9 +519,7 @@ struct cache_set {
struct cache_sb sb;
struct cache *cache[MAX_CACHES_PER_SET];
struct cache *cache_by_alloc[MAX_CACHES_PER_SET];
int caches_loaded;
struct cache *cache;
struct bcache_device **devices;
unsigned int devices_max_used;
......@@ -808,7 +806,7 @@ static inline struct cache *PTR_CACHE(struct cache_set *c,
const struct bkey *k,
unsigned int ptr)
{
return c->cache[PTR_DEV(k, ptr)];
return c->cache;
}
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
......@@ -890,7 +888,7 @@ do { \
/* Looping macros */
#define for_each_cache(ca, cs, iter) \
for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
for (iter = 0; ca = cs->cache, iter < 1; iter++)
#define for_each_bucket(b, ca) \
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
......
......@@ -1674,7 +1674,7 @@ static void cache_set_free(struct closure *cl)
for_each_cache(ca, c, i)
if (ca) {
ca->set = NULL;
c->cache[ca->sb.nr_this_dev] = NULL;
c->cache = NULL;
kobject_put(&ca->kobj);
}
......@@ -2165,7 +2165,7 @@ static const char *register_cache_set(struct cache *ca)
list_for_each_entry(c, &bch_cache_sets, list)
if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
if (c->cache[ca->sb.nr_this_dev])
if (c->cache)
return "duplicate cache set member";
if (!can_attach_cache(ca, c))
......@@ -2215,14 +2215,11 @@ static const char *register_cache_set(struct cache *ca)
kobject_get(&ca->kobj);
ca->set = c;
ca->set->cache[ca->sb.nr_this_dev] = ca;
c->cache_by_alloc[c->caches_loaded++] = ca;
ca->set->cache = ca;
if (c->caches_loaded == c->sb.nr_in_set) {
err = "failed to run cache set";
if (run_cache_set(c) < 0)
goto err;
}
err = "failed to run cache set";
if (run_cache_set(c) < 0)
goto err;
return NULL;
err:
......@@ -2239,8 +2236,8 @@ void bch_cache_release(struct kobject *kobj)
unsigned int i;
if (ca->set) {
BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
ca->set->cache[ca->sb.nr_this_dev] = NULL;
BUG_ON(ca->set->cache != ca);
ca->set->cache = NULL;
}
free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment