Commit 2d5b569b authored by NeilBrown's avatar NeilBrown

md/raid5: avoid races when changing cache size.

Cache size can grow or shrink due to various pressures at
any time.  So when we resize the cache as part of a 'grow'
operation (i.e. change the size to allow more devices) we need
to blocks that automatic growing/shrinking.

So introduce a mutex.  auto grow/shrink uses mutex_trylock()
and just doesn't bother if there is a blockage.
Resizing the whole cache holds the mutex to ensure that
the correct number of new stripes is allocated.

This bug can result in some stripes not being freed when an
array is stopped.  This leads to the kmem_cache not being
freed and a subsequent array can try to use the same kmem_cache
and get confused.

Fixes: edbe83ab ("md/raid5: allow the stripe_cache to grow and shrink.")
Cc: stable@vger.kernel.org (4.1 - please delay until 2 weeks after release of 4.2)
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
parent 6aaf0da8
...@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
if (!sc) if (!sc)
return -ENOMEM; return -ENOMEM;
/* Need to ensure auto-resizing doesn't interfere */
mutex_lock(&conf->cache_size_mutex);
for (i = conf->max_nr_stripes; i; i--) { for (i = conf->max_nr_stripes; i; i--) {
nsh = alloc_stripe(sc, GFP_KERNEL); nsh = alloc_stripe(sc, GFP_KERNEL);
if (!nsh) if (!nsh)
...@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
kmem_cache_free(sc, nsh); kmem_cache_free(sc, nsh);
} }
kmem_cache_destroy(sc); kmem_cache_destroy(sc);
mutex_unlock(&conf->cache_size_mutex);
return -ENOMEM; return -ENOMEM;
} }
/* Step 2 - Must use GFP_NOIO now. /* Step 2 - Must use GFP_NOIO now.
...@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) ...@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
} else } else
err = -ENOMEM; err = -ENOMEM;
mutex_unlock(&conf->cache_size_mutex);
/* Step 4, return new stripes to service */ /* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) { while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru); nsh = list_entry(newstripes.next, struct stripe_head, lru);
...@@ -5857,12 +5862,14 @@ static void raid5d(struct md_thread *thread) ...@@ -5857,12 +5862,14 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled); pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
mutex_trylock(&conf->cache_size_mutex)) {
grow_one_stripe(conf, __GFP_NOWARN); grow_one_stripe(conf, __GFP_NOWARN);
/* Set flag even if allocation failed. This helps /* Set flag even if allocation failed. This helps
* slow down allocation requests when mem is short * slow down allocation requests when mem is short
*/ */
set_bit(R5_DID_ALLOC, &conf->cache_state); set_bit(R5_DID_ALLOC, &conf->cache_state);
mutex_unlock(&conf->cache_size_mutex);
} }
async_tx_issue_pending_all(); async_tx_issue_pending_all();
...@@ -5894,18 +5901,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) ...@@ -5894,18 +5901,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
return -EINVAL; return -EINVAL;
conf->min_nr_stripes = size; conf->min_nr_stripes = size;
mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes && while (size < conf->max_nr_stripes &&
drop_one_stripe(conf)) drop_one_stripe(conf))
; ;
mutex_unlock(&conf->cache_size_mutex);
err = md_allow_write(mddev); err = md_allow_write(mddev);
if (err) if (err)
return err; return err;
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes) while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL)) if (!grow_one_stripe(conf, GFP_KERNEL))
break; break;
mutex_unlock(&conf->cache_size_mutex);
return 0; return 0;
} }
...@@ -6371,11 +6382,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, ...@@ -6371,11 +6382,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
int ret = 0; unsigned long ret = SHRINK_STOP;
while (ret < sc->nr_to_scan) {
if (drop_one_stripe(conf) == 0) if (mutex_trylock(&conf->cache_size_mutex)) {
return SHRINK_STOP; ret= 0;
ret++; while (ret < sc->nr_to_scan) {
if (drop_one_stripe(conf) == 0) {
ret = SHRINK_STOP;
break;
}
ret++;
}
mutex_unlock(&conf->cache_size_mutex);
} }
return ret; return ret;
} }
...@@ -6444,6 +6462,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) ...@@ -6444,6 +6462,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
goto abort; goto abort;
spin_lock_init(&conf->device_lock); spin_lock_init(&conf->device_lock);
seqcount_init(&conf->gen_lock); seqcount_init(&conf->gen_lock);
mutex_init(&conf->cache_size_mutex);
init_waitqueue_head(&conf->wait_for_quiescent); init_waitqueue_head(&conf->wait_for_quiescent);
for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
init_waitqueue_head(&conf->wait_for_stripe[i]); init_waitqueue_head(&conf->wait_for_stripe[i]);
......
...@@ -482,7 +482,8 @@ struct r5conf { ...@@ -482,7 +482,8 @@ struct r5conf {
*/ */
int active_name; int active_name;
char cache_name[2][32]; char cache_name[2][32];
struct kmem_cache *slab_cache; /* for allocating stripes */ struct kmem_cache *slab_cache; /* for allocating stripes */
struct mutex cache_size_mutex; /* Protect changes to cache size */
int seq_flush, seq_write; int seq_flush, seq_write;
int quiesce; int quiesce;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment