Commit 4e6b6ee2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/4.2-rc5-fixes' of git://neil.brown.name/md

Pull md fixes from Neil Brown:
 "Three more fixes for md in 4.2

  Mostly corner-case stuff.

  One of these patches is for a CVE: CVE-2015-5697

  I'm not convinced it is serious (data leak from CAP_SYS_ADMIN ioctl)
  but as people seem to want to back-port it, I've included a minimal
  version here.  The remainder of that patch from Benjamin is
  code-cleanup and will arrive in the 4.3 merge window"

* tag 'md/4.2-rc5-fixes' of git://neil.brown.name/md:
  md/raid5: don't let shrink_slab shrink too far.
  md: use kzalloc() when bitmap is disabled
  md/raid1: extend spinlock to protect raid1_end_read_request against inconsistencies
parents 9e91edcd 49895bcc
......@@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
char *ptr;
int err;
file = kmalloc(sizeof(*file), GFP_NOIO);
file = kzalloc(sizeof(*file), GFP_NOIO);
if (!file)
return -ENOMEM;
......
......@@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
unsigned long flags;
/*
* If it is not operational, then we have already marked it as dead
......@@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
return;
}
set_bit(Blocked, &rdev->flags);
spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
} else
set_bit(Faulty, &rdev->flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery is running, make sure it aborts.
*/
......@@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
* device_lock used to avoid races with raid1_end_read_request
* which expects 'In_sync' flags and ->degraded to be consistent.
*/
spin_lock_irqsave(&conf->device_lock, flags);
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
......@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);
......
......@@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
......@@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
if (mutex_trylock(&conf->cache_size_mutex)) {
ret= 0;
while (ret < sc->nr_to_scan) {
while (ret < sc->nr_to_scan &&
conf->max_nr_stripes > conf->min_nr_stripes) {
if (drop_one_stripe(conf) == 0) {
ret = SHRINK_STOP;
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment