Commit 88ed806a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'md/3.18' of git://neil.brown.name/md

Pull md updates from Neil Brown:
 - a few minor bug fixes
 - quite a lot of code tidy-up and simplification
 - remove PRINT_RAID_DEBUG ioctl.  I'm fairly sure it is unused, and it
   isn't particularly useful.

* tag 'md/3.18' of git://neil.brown.name/md: (21 commits)
  lib/raid6: Add log level to printks
  md: move EXPORT_SYMBOL to after function in md.c
  md: discard PRINT_RAID_DEBUG ioctl
  md: remove MD_BUG()
  md: clean up 'exit' labels in md_ioctl().
  md: remove unnecessary test for MD_MAJOR in md_ioctl()
  md: don't allow "-sync" to be set for device in an active array.
  md: remove unwanted white space from md.c
  md: don't start resync thread directly from md thread.
  md: Just use RCU when checking for overlap between arrays.
  md: avoid potential long delay under pers_lock
  md: simplify export_array()
  md: discard find_rdev_nr in favour of find_rdev_nr_rcu
  md: use wait_event() to simplify md_super_wait()
  md: be more relaxed about stopping an array which isn't started.
  md/raid1: process_checks doesn't use its return value.
  md/raid5: fix init_stripe() inconsistencies
  md/raid10: another memory leak due to reshape.
  md: use set_bit/clear_bit instead of shift/mask for bi_flags changes.
  md/raid1: minor typos and reformatting.
  ...
parents e56d9fcc b395f75e
......@@ -879,7 +879,6 @@ void bitmap_unplug(struct bitmap *bitmap)
{
unsigned long i;
int dirty, need_write;
int wait = 0;
if (!bitmap || !bitmap->storage.filemap ||
test_bit(BITMAP_STALE, &bitmap->flags))
......@@ -897,16 +896,13 @@ void bitmap_unplug(struct bitmap *bitmap)
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
write_page(bitmap, bitmap->storage.filemap[i], 0);
}
if (dirty)
wait = 1;
}
if (wait) { /* if any writes were performed, we need to wait on them */
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
}
if (bitmap->storage.file)
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes)==0);
else
md_super_wait(bitmap->mddev);
if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
bitmap_file_kick(bitmap);
}
......
......@@ -10,10 +10,10 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>
......@@ -25,7 +25,7 @@
#include "linear.h"
/*
* find which device holds a particular offset
* find which device holds a particular offset
*/
static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
{
......@@ -355,7 +355,6 @@ static void linear_status (struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
}
static struct md_personality linear_personality =
{
.name = "linear",
......@@ -379,7 +378,6 @@ static void linear_exit (void)
unregister_md_personality (&linear_personality);
}
module_init(linear_init);
module_exit(linear_exit);
MODULE_LICENSE("GPL");
......
/*
md.c : Multiple Devices driver for Linux
Copyright (C) 1998, 1999, 2000 Ingo Molnar
Copyright (C) 1998, 1999, 2000 Ingo Molnar
completely rewritten, based on the MD driver code from Marc Zyngier
......@@ -66,8 +66,6 @@ static void autostart_arrays(int part);
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
static void md_print_devices(void);
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
......@@ -75,8 +73,6 @@ static struct workqueue_struct *md_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
#define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
......@@ -218,7 +214,6 @@ static void md_new_event_inintr(struct mddev *mddev)
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
/*
* iterates through all used mddevs in the system.
* We take care to grab the all_mddevs_lock whenever navigating
......@@ -228,7 +223,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
#define for_each_mddev(_mddev,_tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
for (({ spin_lock(&all_mddevs_lock); \
_tmp = all_mddevs.next; \
_mddev = NULL;}); \
({ if (_tmp != &all_mddevs) \
......@@ -241,7 +236,6 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
_tmp = _tmp->next;}) \
)
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
......@@ -488,7 +482,7 @@ void mddev_init(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_init);
static struct mddev * mddev_find(dev_t unit)
static struct mddev *mddev_find(dev_t unit)
{
struct mddev *mddev, *new = NULL;
......@@ -530,7 +524,7 @@ static struct mddev * mddev_find(dev_t unit)
kfree(new);
return NULL;
}
is_free = 1;
list_for_each_entry(mddev, &all_mddevs, all_mddevs)
if (mddev->unit == dev) {
......@@ -562,7 +556,7 @@ static struct mddev * mddev_find(dev_t unit)
goto retry;
}
static inline int __must_check mddev_lock(struct mddev * mddev)
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
}
......@@ -570,7 +564,7 @@ static inline int __must_check mddev_lock(struct mddev * mddev)
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
*/
static inline void mddev_lock_nointr(struct mddev * mddev)
static inline void mddev_lock_nointr(struct mddev *mddev)
{
mutex_lock(&mddev->reconfig_mutex);
}
......@@ -580,14 +574,14 @@ static inline int mddev_is_locked(struct mddev *mddev)
return mutex_is_locked(&mddev->reconfig_mutex);
}
static inline int mddev_trylock(struct mddev * mddev)
static inline int mddev_trylock(struct mddev *mddev)
{
return mutex_trylock(&mddev->reconfig_mutex);
}
static struct attribute_group md_redundancy_group;
static void mddev_unlock(struct mddev * mddev)
static void mddev_unlock(struct mddev *mddev)
{
if (mddev->to_remove) {
/* These cannot be removed under reconfig_mutex as
......@@ -630,17 +624,6 @@ static void mddev_unlock(struct mddev * mddev)
spin_unlock(&pers_lock);
}
static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
rdev_for_each(rdev, mddev)
if (rdev->desc_nr == nr)
return rdev;
return NULL;
}
static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
......@@ -693,11 +676,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
return MD_NEW_SIZE_SECTORS(num_sectors);
}
static int alloc_disk_sb(struct md_rdev * rdev)
static int alloc_disk_sb(struct md_rdev *rdev)
{
if (rdev->sb_page)
MD_BUG();
rdev->sb_page = alloc_page(GFP_KERNEL);
if (!rdev->sb_page) {
printk(KERN_ALERT "md: out of memory.\n");
......@@ -766,14 +746,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
void md_super_wait(struct mddev *mddev)
{
/* wait for all superblock writes that were scheduled to complete */
DEFINE_WAIT(wq);
for(;;) {
prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
if (atomic_read(&mddev->pending_writes)==0)
break;
schedule();
}
finish_wait(&mddev->sb_wait, &wq);
wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
......@@ -801,17 +774,13 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
}
EXPORT_SYMBOL_GPL(sync_page_io);
static int read_disk_sb(struct md_rdev * rdev, int size)
static int read_disk_sb(struct md_rdev *rdev, int size)
{
char b[BDEVNAME_SIZE];
if (!rdev->sb_page) {
MD_BUG();
return -EINVAL;
}
if (rdev->sb_loaded)
return 0;
if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
goto fail;
rdev->sb_loaded = 1;
......@@ -825,7 +794,7 @@ static int read_disk_sb(struct md_rdev * rdev, int size)
static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
{
return sb1->set_uuid0 == sb2->set_uuid0 &&
return sb1->set_uuid0 == sb2->set_uuid0 &&
sb1->set_uuid1 == sb2->set_uuid1 &&
sb1->set_uuid2 == sb2->set_uuid2 &&
sb1->set_uuid3 == sb2->set_uuid3;
......@@ -861,14 +830,13 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
return ret;
}
static u32 md_csum_fold(u32 csum)
{
csum = (csum & 0xffff) + (csum >> 16);
return (csum & 0xffff) + (csum >> 16);
}
static unsigned int calc_sb_csum(mdp_super_t * sb)
static unsigned int calc_sb_csum(mdp_super_t *sb)
{
u64 newcsum = 0;
u32 *sb32 = (u32*)sb;
......@@ -882,7 +850,6 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
newcsum += sb32[i];
csum = (newcsum & 0xffffffff) + (newcsum>>32);
#ifdef CONFIG_ALPHA
/* This used to use csum_partial, which was wrong for several
* reasons including that different results are returned on
......@@ -899,7 +866,6 @@ static unsigned int calc_sb_csum(mdp_super_t * sb)
return csum;
}
/*
* Handle superblock details.
* We want to be able to handle multiple superblock formats
......@@ -965,7 +931,7 @@ int md_check_no_bitmap(struct mddev *mddev)
EXPORT_SYMBOL(md_check_no_bitmap);
/*
* load_super for 0.90.0
* load_super for 0.90.0
*/
static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
{
......@@ -1044,7 +1010,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
ev2 = md_event(refsb);
if (ev1 > ev2)
ret = 1;
else
else
ret = 0;
}
rdev->sectors = rdev->sb_start;
......@@ -1118,7 +1084,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
if (sb->state & (1<<MD_SB_CLEAN))
mddev->recovery_cp = MaxSector;
else {
if (sb->events_hi == sb->cp_events_hi &&
if (sb->events_hi == sb->cp_events_hi &&
sb->events_lo == sb->cp_events_lo) {
mddev->recovery_cp = sb->recovery_cp;
} else
......@@ -1146,7 +1112,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
++ev1;
if (sb->disks[rdev->desc_nr].state & (
(1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
if (ev1 < mddev->events)
if (ev1 < mddev->events)
return -EINVAL;
} else if (mddev->bitmap) {
/* if adding to array with a bitmap, then we can accept an
......@@ -1197,7 +1163,6 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
struct md_rdev *rdev2;
int next_spare = mddev->raid_disks;
/* make rdev->sb match mddev data..
*
* 1/ zero out disks
......@@ -1366,7 +1331,7 @@ super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
* version 1 superblock
*/
static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
{
__le32 disk_csum;
u32 csum;
......@@ -1430,7 +1395,6 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
ret = read_disk_sb(rdev, 4096);
if (ret) return ret;
sb = page_address(rdev->sb_page);
if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
......@@ -1817,7 +1781,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
rdev_for_each(rdev2, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
......@@ -2033,18 +1997,13 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
}
EXPORT_SYMBOL(md_integrity_add_rdev);
static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
{
char b[BDEVNAME_SIZE];
struct kobject *ko;
char *s;
int err;
if (rdev->mddev) {
MD_BUG();
return -EINVAL;
}
/* prevent duplicates */
if (find_rdev(mddev, rdev->bdev->bd_dev))
return -EEXIST;
......@@ -2067,16 +2026,21 @@ static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
* If it is -1, assign a free number, else
* check number is not in use
*/
rcu_read_lock();
if (rdev->desc_nr < 0) {
int choice = 0;
if (mddev->pers) choice = mddev->raid_disks;
while (find_rdev_nr(mddev, choice))
if (mddev->pers)
choice = mddev->raid_disks;
while (find_rdev_nr_rcu(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
if (find_rdev_nr(mddev, rdev->desc_nr))
if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
rcu_read_unlock();
return -EBUSY;
}
}
rcu_read_unlock();
if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
mdname(mddev), mddev->max_disks);
......@@ -2118,13 +2082,10 @@ static void md_delayed_delete(struct work_struct *ws)
kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(struct md_rdev * rdev)
static void unbind_rdev_from_array(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
if (!rdev->mddev) {
MD_BUG();
return;
}
bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
......@@ -2169,20 +2130,17 @@ static void unlock_rdev(struct md_rdev *rdev)
{
struct block_device *bdev = rdev->bdev;
rdev->bdev = NULL;
if (!bdev)
MD_BUG();
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
static void export_rdev(struct md_rdev * rdev)
static void export_rdev(struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: export_rdev(%s)\n",
bdevname(rdev->bdev,b));
if (rdev->mddev)
MD_BUG();
md_rdev_clear(rdev);
#ifndef MODULE
if (test_bit(AutoDetected, &rdev->flags))
......@@ -2192,7 +2150,7 @@ static void export_rdev(struct md_rdev * rdev)
kobject_put(&rdev->kobj);
}
static void kick_rdev_from_array(struct md_rdev * rdev)
static void kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
......@@ -2200,153 +2158,18 @@ static void kick_rdev_from_array(struct md_rdev * rdev)
static void export_array(struct mddev *mddev)
{
struct md_rdev *rdev, *tmp;
struct md_rdev *rdev;
rdev_for_each_safe(rdev, tmp, mddev) {
if (!rdev->mddev) {
MD_BUG();
continue;
}
while (!list_empty(&mddev->disks)) {
rdev = list_first_entry(&mddev->disks, struct md_rdev,
same_set);
kick_rdev_from_array(rdev);
}
if (!list_empty(&mddev->disks))
MD_BUG();
mddev->raid_disks = 0;
mddev->major_version = 0;
}
static void print_desc(mdp_disk_t *desc)
{
printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
desc->major,desc->minor,desc->raid_disk,desc->state);
}
static void print_sb_90(mdp_super_t *sb)
{
int i;
printk(KERN_INFO
"md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
sb->major_version, sb->minor_version, sb->patch_version,
sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
sb->ctime);
printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
sb->level, sb->size, sb->nr_disks, sb->raid_disks,
sb->md_minor, sb->layout, sb->chunk_size);
printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
" FD:%d SD:%d CSUM:%08x E:%08lx\n",
sb->utime, sb->state, sb->active_disks, sb->working_disks,
sb->failed_disks, sb->spare_disks,
sb->sb_csum, (unsigned long)sb->events_lo);
printk(KERN_INFO);
for (i = 0; i < MD_SB_DISKS; i++) {
mdp_disk_t *desc;
desc = sb->disks + i;
if (desc->number || desc->major || desc->minor ||
desc->raid_disk || (desc->state && (desc->state != 4))) {
printk(" D %2d: ", i);
print_desc(desc);
}
}
printk(KERN_INFO "md: THIS: ");
print_desc(&sb->this_disk);
}
static void print_sb_1(struct mdp_superblock_1 *sb)
{
__u8 *uuid;
uuid = sb->set_uuid;
printk(KERN_INFO
"md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
"md: Name: \"%s\" CT:%llu\n",
le32_to_cpu(sb->major_version),
le32_to_cpu(sb->feature_map),
uuid,
sb->set_name,
(unsigned long long)le64_to_cpu(sb->ctime)
& MD_SUPERBLOCK_1_TIME_SEC_MASK);
uuid = sb->device_uuid;
printk(KERN_INFO
"md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
" RO:%llu\n"
"md: Dev:%08x UUID: %pU\n"
"md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
"md: (MaxDev:%u) \n",
le32_to_cpu(sb->level),
(unsigned long long)le64_to_cpu(sb->size),
le32_to_cpu(sb->raid_disks),
le32_to_cpu(sb->layout),
le32_to_cpu(sb->chunksize),
(unsigned long long)le64_to_cpu(sb->data_offset),
(unsigned long long)le64_to_cpu(sb->data_size),
(unsigned long long)le64_to_cpu(sb->super_offset),
(unsigned long long)le64_to_cpu(sb->recovery_offset),
le32_to_cpu(sb->dev_number),
uuid,
sb->devflags,
(unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
(unsigned long long)le64_to_cpu(sb->events),
(unsigned long long)le64_to_cpu(sb->resync_offset),
le32_to_cpu(sb->sb_csum),
le32_to_cpu(sb->max_dev)
);
}
static void print_rdev(struct md_rdev *rdev, int major_version)
{
char b[BDEVNAME_SIZE];
printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
rdev->desc_nr);
if (rdev->sb_loaded) {
printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
switch (major_version) {
case 0:
print_sb_90(page_address(rdev->sb_page));
break;
case 1:
print_sb_1(page_address(rdev->sb_page));
break;
}
} else
printk(KERN_INFO "md: no rdev superblock!\n");
}
static void md_print_devices(void)
{
struct list_head *tmp;
struct md_rdev *rdev;
struct mddev *mddev;
char b[BDEVNAME_SIZE];
printk("\n");
printk("md: **********************************\n");
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
printk("md: **********************************\n");
for_each_mddev(mddev, tmp) {
if (mddev->bitmap)
bitmap_print_sb(mddev->bitmap);
else
printk("%s: ", mdname(mddev));
rdev_for_each(rdev, mddev)
printk("<%s>", bdevname(rdev->bdev,b));
printk("\n");
rdev_for_each(rdev, mddev)
print_rdev(rdev, mddev->major_version);
}
printk("md: **********************************\n");
printk("\n");
}
static void sync_sbs(struct mddev * mddev, int nospares)
static void sync_sbs(struct mddev *mddev, int nospares)
{
/* Update each superblock (in-memory image), but
* if we are allowed to, skip spares which already
......@@ -2369,7 +2192,7 @@ static void sync_sbs(struct mddev * mddev, int nospares)
}
}
static void md_update_sb(struct mddev * mddev, int force_change)
static void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
......@@ -2390,7 +2213,7 @@ static void md_update_sb(struct mddev * mddev, int force_change)
mddev->curr_resync_completed > rdev->recovery_offset)
rdev->recovery_offset = mddev->curr_resync_completed;
}
}
if (!mddev->persistent) {
clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
clear_bit(MD_CHANGE_DEVS, &mddev->flags);
......@@ -2453,15 +2276,12 @@ static void md_update_sb(struct mddev * mddev, int force_change)
mddev->can_decrease_events = nospares;
}
if (!mddev->events) {
/*
* oops, this 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug:
*/
MD_BUG();
mddev->events --;
}
/*
* This 64-bit counter should never wrap.
* Either we are in around ~1 trillion A.C., assuming
* 1 reboot per second, or we have a bug...
*/
WARN_ON(mddev->events == 0);
rdev_for_each(rdev, mddev) {
if (rdev->badblocks.changed)
......@@ -2668,10 +2488,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(In_sync, &rdev->flags);
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
err = 0;
if (rdev->mddev->pers == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
err = 0;
}
} else if (cmd_match(buf, "write_error")) {
set_bit(WriteErrorSeen, &rdev->flags);
err = 0;
......@@ -2829,7 +2651,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
return len;
}
static struct rdev_sysfs_entry rdev_slot =
__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
......@@ -2980,20 +2801,20 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
rdev->sectors = sectors;
if (sectors > oldsectors && my_mddev->external) {
/* need to check that all other rdevs with the same ->bdev
* do not overlap. We need to unlock the mddev to avoid
* a deadlock. We have already changed rdev->sectors, and if
* we have to change it back, we will have the lock again.
/* Need to check that all other rdevs with the same
* ->bdev do not overlap. 'rcu' is sufficient to walk
* the rdev lists safely.
* This check does not provide a hard guarantee, it
* just helps avoid dangerous mistakes.
*/
struct mddev *mddev;
int overlap = 0;
struct list_head *tmp;
mddev_unlock(my_mddev);
rcu_read_lock();
for_each_mddev(mddev, tmp) {
struct md_rdev *rdev2;
mddev_lock_nointr(mddev);
rdev_for_each(rdev2, mddev)
if (rdev->bdev == rdev2->bdev &&
rdev != rdev2 &&
......@@ -3003,13 +2824,12 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
overlap = 1;
break;
}
mddev_unlock(mddev);
if (overlap) {
mddev_put(mddev);
break;
}
}
mddev_lock_nointr(my_mddev);
rcu_read_unlock();
if (overlap) {
/* Someone else could have slipped in a size
* change here, but doing so is just silly.
......@@ -3027,7 +2847,6 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
static struct rdev_sysfs_entry rdev_size =
__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
{
unsigned long long recovery_start = rdev->recovery_offset;
......@@ -3063,7 +2882,6 @@ static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_
static struct rdev_sysfs_entry rdev_recovery_start =
__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
static ssize_t
badblocks_show(struct badblocks *bb, char *page, int unack);
static ssize_t
......@@ -3084,7 +2902,6 @@ static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
static struct rdev_sysfs_entry rdev_bad_blocks =
__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
static ssize_t ubb_show(struct md_rdev *rdev, char *page)
{
return badblocks_show(&rdev->badblocks, page, 1);
......@@ -3241,7 +3058,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
err = -EINVAL;
......@@ -3260,7 +3077,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
goto abort_free;
}
if (err < 0) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: could not read %s's sb, not importing!\n",
bdevname(rdev->bdev,b));
goto abort_free;
......@@ -3281,8 +3098,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
* Check a full RAID array for plausibility
*/
static void analyze_sbs(struct mddev * mddev)
static void analyze_sbs(struct mddev *mddev)
{
int i;
struct md_rdev *rdev, *freshest, *tmp;
......@@ -3300,12 +3116,11 @@ static void analyze_sbs(struct mddev * mddev)
default:
printk( KERN_ERR \
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
" -- removing from array\n",
bdevname(rdev->bdev,b));
kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
validate_super(mddev, freshest);
......@@ -3344,7 +3159,7 @@ static void analyze_sbs(struct mddev * mddev)
/* Read a fixed-point number.
* Numbers in sysfs attributes should be in "standard" units where
* possible, so time should be in seconds.
* However we internally use a a much smaller unit such as
* However we internally use a a much smaller unit such as
* milliseconds or jiffies.
* This function takes a decimal number with a possible fractional
* component, and produces an integer which is the result of
......@@ -3381,7 +3196,6 @@ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
return 0;
}
static void md_safemode_timeout(unsigned long data);
static ssize_t
......@@ -3524,7 +3338,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
/* Looks like we have a winner */
mddev_suspend(mddev);
mddev->pers->stop(mddev);
if (mddev->pers->sync_request == NULL &&
pers->sync_request != NULL) {
/* need to add the md_redundancy_group */
......@@ -3533,7 +3347,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
"md: cannot register extra attributes for %s\n",
mdname(mddev));
mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
}
}
if (mddev->pers->sync_request != NULL &&
pers->sync_request == NULL) {
/* need to remove the md_redundancy_group */
......@@ -3611,7 +3425,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_level =
__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
static ssize_t
layout_show(struct mddev *mddev, char *page)
{
......@@ -3654,7 +3467,6 @@ layout_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_layout =
__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
static ssize_t
raid_disks_show(struct mddev *mddev, char *page)
{
......@@ -3859,9 +3671,9 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
static int do_md_run(struct mddev * mddev);
static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
static int do_md_run(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
......@@ -4012,7 +3824,6 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
minor != MINOR(dev))
return -EOVERFLOW;
if (mddev->persistent) {
rdev = md_import_device(dev, mddev->major_version,
mddev->minor_version);
......@@ -4108,7 +3919,6 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_size =
__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metadata version.
* This is one of
* 'none' for arrays with no metadata (good luck...)
......@@ -4490,7 +4300,7 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
unsigned long long new = simple_strtoull(buf, &e, 10);
unsigned long long old = mddev->suspend_lo;
if (mddev->pers == NULL ||
if (mddev->pers == NULL ||
mddev->pers->quiesce == NULL)
return -EINVAL;
if (buf == e || (*e && *e != '\n'))
......@@ -4510,7 +4320,6 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
static struct md_sysfs_entry md_suspend_lo =
__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
static ssize_t
suspend_hi_show(struct mddev *mddev, char *page)
{
......@@ -4698,7 +4507,6 @@ static struct attribute_group md_redundancy_group = {
.attrs = md_redundancy_attrs,
};
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
......@@ -5111,7 +4919,7 @@ int md_run(struct mddev *mddev)
} else if (mddev->ro == 2) /* auto-readonly not meaningful */
mddev->ro = 0;
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->writes_pending,0);
atomic_set(&mddev->max_corr_read_errors,
MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
mddev->safemode = 0;
......@@ -5125,9 +4933,9 @@ int md_run(struct mddev *mddev)
if (rdev->raid_disk >= 0)
if (sysfs_link_rdev(mddev, rdev))
/* failure here is OK */;
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (mddev->flags & MD_UPDATE_SB_FLAGS)
md_update_sb(mddev, 0);
......@@ -5307,7 +5115,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sync_thread ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
printk("md: %s still in use.\n",mdname(mddev));
......@@ -5339,7 +5147,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
static int do_md_stop(struct mddev * mddev, int mode,
static int do_md_stop(struct mddev *mddev, int mode,
struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
......@@ -5362,7 +5170,7 @@ static int do_md_stop(struct mddev * mddev, int mode,
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > !!bdev ||
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
mddev->sysfs_active ||
mddev->sync_thread ||
(bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
......@@ -5512,12 +5320,12 @@ static void autorun_devices(int part)
"md: cannot allocate memory for md drive.\n");
break;
}
if (mddev_lock(mddev))
if (mddev_lock(mddev))
printk(KERN_WARNING "md: %s locked, cannot run\n",
mdname(mddev));
else if (mddev->raid_disks || mddev->major_version
|| !list_empty(&mddev->disks)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: %s already running, cannot run %s\n",
mdname(mddev), bdevname(rdev0->bdev,b));
mddev_unlock(mddev);
......@@ -5545,7 +5353,7 @@ static void autorun_devices(int part)
}
#endif /* !MODULE */
static int get_version(void __user * arg)
static int get_version(void __user *arg)
{
mdu_version_t ver;
......@@ -5559,7 +5367,7 @@ static int get_version(void __user * arg)
return 0;
}
static int get_array_info(struct mddev * mddev, void __user * arg)
static int get_array_info(struct mddev *mddev, void __user *arg)
{
mdu_array_info_t info;
int nr,working,insync,failed,spare;
......@@ -5574,7 +5382,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
else {
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
insync++;
else
spare++;
}
......@@ -5614,7 +5422,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
return 0;
}
static int get_bitmap_file(struct mddev * mddev, void __user * arg)
static int get_bitmap_file(struct mddev *mddev, void __user * arg)
{
mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
char *ptr, *buf = NULL;
......@@ -5652,7 +5460,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
return err;
}
static int get_disk_info(struct mddev * mddev, void __user * arg)
static int get_disk_info(struct mddev *mddev, void __user * arg)
{
mdu_disk_info_t info;
struct md_rdev *rdev;
......@@ -5688,7 +5496,7 @@ static int get_disk_info(struct mddev * mddev, void __user * arg)
return 0;
}
static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
{
char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
struct md_rdev *rdev;
......@@ -5702,7 +5510,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
/* expecting a device which has a superblock */
rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
......@@ -5714,9 +5522,9 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
err = super_types[mddev->major_version]
.load_super(rdev, rdev0, mddev->minor_version);
if (err < 0) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: %s has different UUID to %s\n",
bdevname(rdev->bdev,b),
bdevname(rdev->bdev,b),
bdevname(rdev0->bdev,b2));
export_rdev(rdev);
return -EINVAL;
......@@ -5736,7 +5544,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
if (mddev->pers) {
int err;
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
......@@ -5747,7 +5555,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: md_import_device returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
......@@ -5821,7 +5629,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
int err;
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return PTR_ERR(rdev);
......@@ -5856,7 +5664,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
return 0;
}
static int hot_remove_disk(struct mddev * mddev, dev_t dev)
static int hot_remove_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
struct md_rdev *rdev;
......@@ -5882,7 +5690,7 @@ static int hot_remove_disk(struct mddev * mddev, dev_t dev)
return -EBUSY;
}
static int hot_add_disk(struct mddev * mddev, dev_t dev)
static int hot_add_disk(struct mddev *mddev, dev_t dev)
{
char b[BDEVNAME_SIZE];
int err;
......@@ -5898,7 +5706,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev)
return -EINVAL;
}
if (!mddev->pers->hot_add_disk) {
printk(KERN_WARNING
printk(KERN_WARNING
"%s: personality does not support diskops!\n",
mdname(mddev));
return -EINVAL;
......@@ -5906,7 +5714,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev)
rdev = md_import_device(dev, -1, 0);
if (IS_ERR(rdev)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: error, md_import_device() returned %ld\n",
PTR_ERR(rdev));
return -EINVAL;
......@@ -5920,7 +5728,7 @@ static int hot_add_disk(struct mddev * mddev, dev_t dev)
rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING
printk(KERN_WARNING
"md: can not hot-add faulty %s disk to %s!\n",
bdevname(rdev->bdev,b), mdname(mddev));
err = -EINVAL;
......@@ -5968,7 +5776,6 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
/* we should be able to change the bitmap.. */
}
if (fd >= 0) {
struct inode *inode;
if (mddev->bitmap)
......@@ -6039,7 +5846,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
* The minor and patch _version numbers are also kept incase the
* super_block handler wishes to interpret them.
*/
static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
{
if (info->raid_disks == 0) {
......@@ -6048,7 +5855,7 @@ static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
info->major_version >= ARRAY_SIZE(super_types) ||
super_types[info->major_version].name == NULL) {
/* maybe try to auto-load a module? */
printk(KERN_INFO
printk(KERN_INFO
"md: superblock version %d not known\n",
info->major_version);
return -EINVAL;
......@@ -6196,7 +6003,6 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
return rv;
}
/*
* update_array_info is used to change the configuration of an
* on-line array.
......@@ -6347,7 +6153,6 @@ static inline bool md_ioctl_valid(unsigned int cmd)
case GET_DISK_INFO:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
case PRINT_RAID_DEBUG:
case RAID_AUTORUN:
case RAID_VERSION:
case RESTART_ARRAY_RW:
......@@ -6391,18 +6196,13 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case RAID_VERSION:
err = get_version(argp);
goto done;
case PRINT_RAID_DEBUG:
err = 0;
md_print_devices();
goto done;
goto out;
#ifndef MODULE
case RAID_AUTORUN:
err = 0;
autostart_arrays(arg);
goto done;
goto out;
#endif
default:;
}
......@@ -6415,7 +6215,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
if (!mddev) {
BUG();
goto abort;
goto out;
}
/* Some actions do not requires the mutex */
......@@ -6425,18 +6225,18 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = -ENODEV;
else
err = get_array_info(mddev, argp);
goto abort;
goto out;
case GET_DISK_INFO:
if (!mddev->raid_disks && !mddev->external)
err = -ENODEV;
else
err = get_disk_info(mddev, argp);
goto abort;
goto out;
case SET_DISK_FAULTY:
err = set_disk_faulty(mddev, new_decode_dev(arg));
goto abort;
goto out;
}
if (cmd == ADD_NEW_DISK)
......@@ -6454,10 +6254,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* and writes
*/
mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > 1) {
if (mddev->pers && atomic_read(&mddev->openers) > 1) {
mutex_unlock(&mddev->open_mutex);
err = -EBUSY;
goto abort;
goto out;
}
set_bit(MD_STILL_CLOSED, &mddev->flags);
mutex_unlock(&mddev->open_mutex);
......@@ -6465,10 +6265,10 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
err = mddev_lock(mddev);
if (err) {
printk(KERN_INFO
printk(KERN_INFO
"md: ioctl lock interrupted, reason %d, cmd %d\n",
err, cmd);
goto abort;
goto out;
}
if (cmd == SET_ARRAY_INFO) {
......@@ -6477,38 +6277,38 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
memset(&info, 0, sizeof(info));
else if (copy_from_user(&info, argp, sizeof(info))) {
err = -EFAULT;
goto abort_unlock;
goto unlock;
}
if (mddev->pers) {
err = update_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't update"
" array info. %d\n", err);
goto abort_unlock;
goto unlock;
}
goto done_unlock;
goto unlock;
}
if (!list_empty(&mddev->disks)) {
printk(KERN_WARNING
"md: array %s already has disks!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
goto unlock;
}
if (mddev->raid_disks) {
printk(KERN_WARNING
"md: array %s already initialised!\n",
mdname(mddev));
err = -EBUSY;
goto abort_unlock;
goto unlock;
}
err = set_array_info(mddev, &info);
if (err) {
printk(KERN_WARNING "md: couldn't set"
" array info. %d\n", err);
goto abort_unlock;
goto unlock;
}
goto done_unlock;
goto unlock;
}
/*
......@@ -6521,7 +6321,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
&& cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
&& cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto abort_unlock;
goto unlock;
}
/*
......@@ -6530,23 +6330,23 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case GET_BITMAP_FILE:
err = get_bitmap_file(mddev, argp);
goto done_unlock;
goto unlock;
case RESTART_ARRAY_RW:
err = restart_array(mddev);
goto done_unlock;
goto unlock;
case STOP_ARRAY:
err = do_md_stop(mddev, 0, bdev);
goto done_unlock;
goto unlock;
case STOP_ARRAY_RO:
err = md_set_readonly(mddev, bdev);
goto done_unlock;
goto unlock;
case HOT_REMOVE_DISK:
err = hot_remove_disk(mddev, new_decode_dev(arg));
goto done_unlock;
goto unlock;
case ADD_NEW_DISK:
/* We can support ADD_NEW_DISK on read-only arrays
......@@ -6562,14 +6362,14 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
break;
else
err = add_new_disk(mddev, &info);
goto done_unlock;
goto unlock;
}
break;
case BLKROSET:
if (get_user(ro, (int __user *)(arg))) {
err = -EFAULT;
goto done_unlock;
goto unlock;
}
err = -EINVAL;
......@@ -6577,11 +6377,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
* does not matter, no writes are coming
*/
if (ro)
goto done_unlock;
goto unlock;
/* are we are already prepared for writes? */
if (mddev->ro != 1)
goto done_unlock;
goto unlock;
/* transitioning to readauto need only happen for
* arrays that call md_write_start
......@@ -6593,17 +6393,14 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
set_disk_ro(mddev->gendisk, 0);
}
}
goto done_unlock;
goto unlock;
}
/*
* The remaining ioctls are changing the state of the
* superblock, so we do not allow them on read-only arrays.
* However non-MD ioctls (e.g. get-size) will still come through
* here and hit the 'default' below, so only disallow
* 'md' ioctls, and switch to rw mode if started auto-readonly.
*/
if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
if (mddev->ro && mddev->pers) {
if (mddev->ro == 2) {
mddev->ro = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
......@@ -6621,7 +6418,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
}
} else {
err = -EROFS;
goto abort_unlock;
goto unlock;
}
}
......@@ -6633,38 +6430,32 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
err = -EFAULT;
else
err = add_new_disk(mddev, &info);
goto done_unlock;
goto unlock;
}
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto done_unlock;
goto unlock;
case RUN_ARRAY:
err = do_md_run(mddev);
goto done_unlock;
goto unlock;
case SET_BITMAP_FILE:
err = set_bitmap_file(mddev, (int)arg);
goto done_unlock;
goto unlock;
default:
err = -EINVAL;
goto abort_unlock;
goto unlock;
}
done_unlock:
abort_unlock:
unlock:
if (mddev->hold_active == UNTIL_IOCTL &&
err != -EINVAL)
mddev->hold_active = 0;
mddev_unlock(mddev);
return err;
done:
if (err)
MD_BUG();
abort:
out:
return err;
}
#ifdef CONFIG_COMPAT
......@@ -6726,7 +6517,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
static void md_release(struct gendisk *disk, fmode_t mode)
{
struct mddev *mddev = disk->private_data;
struct mddev *mddev = disk->private_data;
BUG_ON(!mddev);
atomic_dec(&mddev->openers);
......@@ -6761,7 +6552,7 @@ static const struct block_device_operations md_fops =
.revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
static int md_thread(void *arg)
{
struct md_thread *thread = arg;
......@@ -6810,6 +6601,7 @@ void md_wakeup_thread(struct md_thread *thread)
wake_up(&thread->wqueue);
}
}
EXPORT_SYMBOL(md_wakeup_thread);
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
struct mddev *mddev, const char *name)
......@@ -6835,6 +6627,7 @@ struct md_thread *md_register_thread(void (*run) (struct md_thread *),
}
return thread;
}
EXPORT_SYMBOL(md_register_thread);
void md_unregister_thread(struct md_thread **threadp)
{
......@@ -6852,14 +6645,10 @@ void md_unregister_thread(struct md_thread **threadp)
kthread_stop(thread->tsk);
kfree(thread);
}
EXPORT_SYMBOL(md_unregister_thread);
void md_error(struct mddev *mddev, struct md_rdev *rdev)
{
if (!mddev) {
MD_BUG();
return;
}
if (!rdev || test_bit(Faulty, &rdev->flags))
return;
......@@ -6876,6 +6665,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
queue_work(md_misc_wq, &mddev->event_work);
md_new_event_inintr(mddev);
}
EXPORT_SYMBOL(md_error);
/* seq_file implementation /proc/mdstat */
......@@ -6898,8 +6688,7 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "\n");
}
static void status_resync(struct seq_file *seq, struct mddev * mddev)
static void status_resync(struct seq_file *seq, struct mddev *mddev)
{
sector_t max_sectors, resync, res;
unsigned long dt, db;
......@@ -6919,13 +6708,7 @@ static void status_resync(struct seq_file *seq, struct mddev * mddev)
else
max_sectors = mddev->dev_sectors;
/*
* Should not happen.
*/
if (!max_sectors) {
MD_BUG();
return;
}
WARN_ON(max_sectors == 0);
/* Pick 'scale' such that (resync>>scale)*1000 will fit
* in a sector_t, and (max_sectors>>scale) will fit in a
* u32, as those are the requirements for sector_div.
......@@ -7021,7 +6804,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *tmp;
struct mddev *next_mddev, *mddev = v;
++*pos;
if (v == (void*)2)
return NULL;
......@@ -7036,7 +6819,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
else {
next_mddev = (void*)2;
*pos = 0x10000;
}
}
spin_unlock(&all_mddevs_lock);
if (v != (void*)1)
......@@ -7132,7 +6915,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (mddev->pers) {
mddev->pers->status(seq, mddev);
seq_printf(seq, "\n ");
seq_printf(seq, "\n ");
if (mddev->pers->sync_request) {
if (mddev->curr_resync > 2) {
status_resync(seq, mddev);
......@@ -7150,7 +6933,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
mddev_unlock(mddev);
return 0;
}
......@@ -7204,12 +6987,14 @@ static const struct file_operations md_seq_fops = {
int register_md_personality(struct md_personality *p)
{
printk(KERN_INFO "md: %s personality registered for level %d\n",
p->name, p->level);
spin_lock(&pers_lock);
list_add_tail(&p->list, &pers_list);
printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(register_md_personality);
int unregister_md_personality(struct md_personality *p)
{
......@@ -7219,10 +7004,11 @@ int unregister_md_personality(struct md_personality *p)
spin_unlock(&pers_lock);
return 0;
}
EXPORT_SYMBOL(unregister_md_personality);
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev * rdev;
struct md_rdev *rdev;
int idle;
int curr_events;
......@@ -7276,7 +7062,7 @@ void md_done_sync(struct mddev *mddev, int blocks, int ok)
// stop recovery, signal do_sync ....
}
}
EXPORT_SYMBOL(md_done_sync);
/* md_write_start(mddev, bi)
* If we need to update some array metadata (e.g. 'active' flag
......@@ -7317,6 +7103,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
}
EXPORT_SYMBOL(md_write_start);
void md_write_end(struct mddev *mddev)
{
......@@ -7327,6 +7114,7 @@ void md_write_end(struct mddev *mddev)
mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
}
}
EXPORT_SYMBOL(md_write_end);
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
......@@ -7784,6 +7572,33 @@ static int remove_and_add_spares(struct mddev *mddev,
return spares;
}
static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
&mddev->recovery))
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
}
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
......@@ -7900,7 +7715,7 @@ void md_check_recovery(struct mddev *mddev)
if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto unlock;
goto not_running;
/* no recovery is running.
* remove any failed drives, then
* add spares if possible.
......@@ -7912,7 +7727,7 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */
goto unlock;
goto not_running;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if ((spares = remove_and_add_spares(mddev, NULL))) {
......@@ -7925,7 +7740,7 @@ void md_check_recovery(struct mddev *mddev)
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
} else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
/* nothing to be done ... */
goto unlock;
goto not_running;
if (mddev->pers->sync_request) {
if (spares) {
......@@ -7935,27 +7750,11 @@ void md_check_recovery(struct mddev *mddev)
*/
bitmap_write_all(mddev->bitmap);
}
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
printk(KERN_ERR "%s: could not start resync"
" thread...\n",
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
md_new_event(mddev);
INIT_WORK(&mddev->del_work, md_start_sync);
queue_work(md_misc_wq, &mddev->del_work);
goto unlock;
}
unlock:
wake_up(&mddev->sb_wait);
not_running:
if (!mddev->sync_thread) {
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
if (test_and_clear_bit(MD_RECOVERY_RECOVER,
......@@ -7963,9 +7762,12 @@ void md_check_recovery(struct mddev *mddev)
if (mddev->sysfs_action)
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
}
}
EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev)
{
......@@ -8008,6 +7810,7 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
EXPORT_SYMBOL(md_reap_sync_thread);
void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
......@@ -8641,7 +8444,6 @@ void md_autodetect_dev(dev_t dev)
}
}
static void autostart_arrays(int part)
{
struct md_rdev *rdev;
......@@ -8665,10 +8467,9 @@ static void autostart_arrays(int part)
if (IS_ERR(rdev))
continue;
if (test_bit(Faulty, &rdev->flags)) {
MD_BUG();
if (test_bit(Faulty, &rdev->flags))
continue;
}
set_bit(AutoDetected, &rdev->flags);
list_add(&rdev->same_set, &pending_raid_disks);
i_passed++;
......@@ -8736,20 +8537,8 @@ static int set_ro(const char *val, struct kernel_param *kp)
module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality);
EXPORT_SYMBOL(md_error);
EXPORT_SYMBOL(md_done_sync);
EXPORT_SYMBOL(md_write_start);
EXPORT_SYMBOL(md_write_end);
EXPORT_SYMBOL(md_register_thread);
EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_check_recovery);
EXPORT_SYMBOL(md_reap_sync_thread);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
MODULE_ALIAS("md");
......
/*
md.h : kernel internal structure of the Linux MD driver
Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_MD_H
......@@ -56,7 +56,7 @@ struct md_rdev {
__u64 sb_events;
sector_t data_offset; /* start of data in array */
sector_t new_data_offset;/* only relevant while reshaping */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
sector_t sb_start; /* offset of the super block (in 512byte sectors) */
int sb_size; /* bytes in the superblock */
int preferred_minor; /* autorun support */
......@@ -239,7 +239,7 @@ struct mddev {
minor_version,
patch_version;
int persistent;
int external; /* metadata is
int external; /* metadata is
* managed externally */
char metadata_type[17]; /* externally set*/
int chunk_sectors;
......@@ -248,7 +248,7 @@ struct mddev {
char clevel[16];
int raid_disks;
int max_disks;
sector_t dev_sectors; /* used size of
sector_t dev_sectors; /* used size of
* component devices */
sector_t array_sectors; /* exported array size */
int external_size; /* size managed
......@@ -312,7 +312,7 @@ struct mddev {
int parallel_resync;
int ok_start_degraded;
/* recovery/resync flags
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
* SYNC: actually doing a resync, not a recovery
......@@ -392,20 +392,20 @@ struct mddev {
unsigned int safemode; /* if set, update "clean" superblock
* when no writes pending.
*/
*/
unsigned int safemode_delay;
struct timer_list safemode_timer;
atomic_t writes_pending;
atomic_t writes_pending;
struct request_queue *queue; /* for plugging ... */
struct bitmap *bitmap; /* the bitmap for the device */
struct bitmap *bitmap; /* the bitmap for the device */
struct {
struct file *file; /* the bitmap file */
loff_t offset; /* offset from superblock of
* start of bitmap. May be
* negative, but not '0'
* For external metadata, offset
* from start of device.
* from start of device.
*/
unsigned long space; /* space available at this offset */
loff_t default_offset; /* this is the offset to use when
......@@ -421,7 +421,7 @@ struct mddev {
int external;
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
atomic_t max_corr_read_errors; /* max read retries */
struct list_head all_mddevs;
struct attribute_group *to_remove;
......@@ -439,7 +439,6 @@ struct mddev {
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
};
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
int faulty = test_bit(Faulty, &rdev->flags);
......@@ -449,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
{
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
}
struct md_personality
......@@ -463,7 +462,7 @@ struct md_personality
int (*stop)(struct mddev *mddev);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
* if appropriate, and should abort recovery if needed
* if appropriate, and should abort recovery if needed
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
......@@ -493,7 +492,6 @@ struct md_personality
void *(*takeover) (struct mddev *mddev);
};
struct md_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct mddev *, char *);
......@@ -560,7 +558,7 @@ struct md_thread {
void (*run) (struct md_thread *thread);
struct mddev *mddev;
wait_queue_head_t wqueue;
unsigned long flags;
unsigned long flags;
struct task_struct *tsk;
unsigned long timeout;
void *private;
......@@ -594,7 +592,7 @@ extern void md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
extern void md_new_event(struct mddev *mddev);
......
......@@ -31,13 +31,12 @@
#define NR_RESERVED_BUFS 32
static int multipath_map (struct mpconf *conf)
{
int i, disks = conf->raid_disks;
/*
* Later we do read balancing on the read side
* Later we do read balancing on the read side
* now we use the first available disk.
*/
......@@ -68,7 +67,6 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
md_wakeup_thread(mddev->thread);
}
/*
* multipath_end_bh_io() is called when we have finished servicing a multipathed
* operation and are ready to return a success/failure code to the buffer
......@@ -98,8 +96,8 @@ static void multipath_end_request(struct bio *bio, int error)
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_iter.bi_sector);
multipath_reschedule_retry(mp_bh);
} else
......@@ -145,12 +143,12 @@ static void multipath_status (struct seq_file *seq, struct mddev *mddev)
{
struct mpconf *conf = mddev->private;
int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->multipaths[i].rdev &&
conf->multipaths[i].rdev &&
test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
}
......@@ -195,7 +193,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
* first check if this is a queued request for a device
* which has just failed.
*/
printk(KERN_ALERT
printk(KERN_ALERT
"multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
return;
......@@ -242,7 +240,6 @@ static void print_multipath_conf (struct mpconf *conf)
}
}
static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
......@@ -325,8 +322,6 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
/*
* This is a kernel thread which:
*
......@@ -356,7 +351,7 @@ static void multipathd(struct md_thread *thread)
bio = &mp_bh->bio;
bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
......@@ -414,7 +409,7 @@ static int multipath_run (struct mddev *mddev)
conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out;
......@@ -423,7 +418,7 @@ static int multipath_run (struct mddev *mddev)
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->multipaths) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
......@@ -469,7 +464,7 @@ static int multipath_run (struct mddev *mddev)
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
if (conf->pool == NULL) {
printk(KERN_ERR
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
......@@ -485,7 +480,7 @@ static int multipath_run (struct mddev *mddev)
}
}
printk(KERN_INFO
printk(KERN_INFO
"multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->raid_disks - mddev->degraded,
mddev->raid_disks);
......@@ -512,7 +507,6 @@ static int multipath_run (struct mddev *mddev)
return -EIO;
}
static int multipath_stop (struct mddev *mddev)
{
struct mpconf *conf = mddev->private;
......
/*
raid0.c : Multiple Devices driver for Linux
Copyright (C) 1994-96 Marc ZYNGIER
Copyright (C) 1994-96 Marc ZYNGIER
<zyngier@ufr-info-p7.ibp.fr> or
<maz@gloups.fdn.fr>
Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
RAID-0 management functions.
......@@ -12,10 +11,10 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
You should have received a copy of the GNU General Public License
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>
......
......@@ -494,7 +494,6 @@ static void raid1_end_write_request(struct bio *bio, int error)
bio_put(to_put);
}
/*
* This routine returns the disk from which the requested read should
* be done. There is a per-array 'next expected sequential IO' sector
......@@ -901,18 +900,18 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
* However if there are already pending
* requests (preventing the barrier from
* rising completely), and the
* pre-process bio queue isn't empty,
* per-process bio queue isn't empty,
* then don't wait, as we need to empty
* that queue to get the nr_pending
* count down.
* that queue to allow conf->start_next_window
* to increase.
*/
wait_event_lock_irq(conf->wait_barrier,
!conf->array_frozen &&
(!conf->barrier ||
((conf->start_next_window <
conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
!bio_list_empty(current->bio_list))),
((conf->start_next_window <
conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
!bio_list_empty(current->bio_list))),
conf->resync_lock);
conf->nr_waiting--;
}
......@@ -1001,8 +1000,7 @@ static void unfreeze_array(struct r1conf *conf)
spin_unlock_irq(&conf->resync_lock);
}
/* duplicate the data pages for behind I/O
/* duplicate the data pages for behind I/O
*/
static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
{
......@@ -1471,7 +1469,6 @@ static void status(struct seq_file *seq, struct mddev *mddev)
seq_printf(seq, "]");
}
static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
......@@ -1565,7 +1562,7 @@ static int raid1_spare_active(struct mddev *mddev)
unsigned long flags;
/*
* Find all failed disks within the RAID1 configuration
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
*/
......@@ -1606,7 +1603,6 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
......@@ -1735,7 +1731,6 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
static void end_sync_read(struct bio *bio, int error)
{
struct r1bio *r1_bio = bio->bi_private;
......@@ -1947,7 +1942,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
return 1;
}
static int process_checks(struct r1bio *r1_bio)
static void process_checks(struct r1bio *r1_bio)
{
/* We have read all readable devices. If we haven't
* got the block, then there is no hope left.
......@@ -2039,7 +2034,6 @@ static int process_checks(struct r1bio *r1_bio)
bio_copy_data(sbio, pbio);
}
return 0;
}
static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
......@@ -2057,8 +2051,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
return;
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
if (process_checks(r1_bio) < 0)
return;
process_checks(r1_bio);
/*
* schedule writes
*/
......@@ -2458,7 +2452,6 @@ static void raid1d(struct md_thread *thread)
blk_finish_plug(&plug);
}
static int init_resync(struct r1conf *conf)
{
int buffs;
......@@ -2722,7 +2715,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* remove last page from this bio */
bio->bi_vcnt--;
bio->bi_iter.bi_size -= len;
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &bio->bi_flags);
}
goto bio_full;
}
......@@ -2947,9 +2940,9 @@ static int run(struct mddev *mddev)
printk(KERN_NOTICE "md/raid1:%s: not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO
printk(KERN_INFO
"md/raid1:%s: active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded,
mdname(mddev), mddev->raid_disks - mddev->degraded,
mddev->raid_disks);
/*
......
......@@ -90,7 +90,6 @@ struct r1conf {
*/
int recovery_disabled;
/* poolinfo contains information about the content of the
* mempools - it changes when the array grows or shrinks
*/
......@@ -103,7 +102,6 @@ struct r1conf {
*/
struct page *tmppage;
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array.
*/
......
......@@ -366,7 +366,6 @@ static void raid10_end_read_request(struct bio *bio, int error)
struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
rdev = r10_bio->devs[slot].rdev;
......@@ -1559,7 +1558,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
md_write_start(mddev, bio);
do {
/*
......@@ -1782,7 +1780,6 @@ static int raid10_spare_active(struct mddev *mddev)
return count;
}
static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
......@@ -1929,7 +1926,6 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
static void end_sync_read(struct bio *bio, int error)
{
struct r10bio *r10_bio = bio->bi_private;
......@@ -2295,7 +2291,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
}
}
/*
* Used by fix_read_error() to decay the per rdev read_errors.
* We halve the read error count for every hour that has elapsed
......@@ -2852,7 +2847,6 @@ static void raid10d(struct md_thread *thread)
blk_finish_plug(&plug);
}
static int init_resync(struct r10conf *conf)
{
int buffs;
......@@ -3388,7 +3382,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
}
goto bio_full;
}
......@@ -3776,7 +3770,6 @@ static int run(struct mddev *mddev)
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
if (md_integrity_register(mddev))
goto out_free_conf;
......@@ -3834,6 +3827,8 @@ static int stop(struct mddev *mddev)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
kfree(conf->mirrors_new);
kfree(conf);
mddev->private = NULL;
return 0;
......@@ -4121,7 +4116,7 @@ static int raid10_start_reshape(struct mddev *mddev)
memcpy(conf->mirrors_new, conf->mirrors,
sizeof(struct raid10_info)*conf->prev.raid_disks);
smp_mb();
kfree(conf->mirrors_old); /* FIXME and elsewhere */
kfree(conf->mirrors_old);
conf->mirrors_old = conf->mirrors;
conf->mirrors = conf->mirrors_new;
conf->mirrors_new = NULL;
......@@ -4416,7 +4411,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ;
read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
read_bio->bi_flags |= 1 << BIO_UPTODATE;
__set_bit(BIO_UPTODATE, &read_bio->bi_flags);
read_bio->bi_vcnt = 0;
read_bio->bi_iter.bi_size = 0;
r10_bio->master_bio = read_bio;
......@@ -4473,7 +4468,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
/* Remove last page from this bio */
bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len;
bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
}
goto bio_full;
}
......@@ -4575,7 +4570,6 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0;
}
static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio)
{
......
......@@ -463,7 +463,6 @@ static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
hlist_add_head(&sh->hash, hp);
}
/* find an idle stripe, make sure it is unhashed, and return it. */
static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
{
......@@ -531,9 +530,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
BUG_ON(stripe_operations_active(sh));
pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sh->sector);
remove_hash(sh);
(unsigned long long)sector);
retry:
seq = read_seqcount_begin(&conf->gen_lock);
sh->generation = conf->generation - previous;
......@@ -542,7 +539,6 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
stripe_set_idx(sector, conf, previous, sh);
sh->state = 0;
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
......@@ -1350,7 +1346,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
}
}
static void ops_complete_prexor(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
......@@ -2419,7 +2414,6 @@ static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
return new_sector;
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
{
struct r5conf *conf = sh->raid_conf;
......@@ -2437,7 +2431,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
sector_t r_sector;
struct stripe_head sh2;
chunk_offset = sector_div(new_sector, sectors_per_chunk);
stripe = new_sector;
......@@ -2541,7 +2534,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
return r_sector;
}
static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
......@@ -3013,7 +3005,6 @@ static void handle_stripe_fill(struct stripe_head *sh,
set_bit(STRIPE_HANDLE, &sh->state);
}
/* handle_stripe_clean_event
* any written block on an uptodate or failed drive can be returned.
* Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
......@@ -3304,7 +3295,6 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
}
}
static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
struct stripe_head_state *s,
int disks)
......@@ -3939,7 +3929,6 @@ static void handle_stripe(struct stripe_head *sh)
}
}
/* Finish reconstruct operations initiated by the expansion process */
if (sh->reconstruct_state == reconstruct_state_result) {
struct stripe_head *sh_src
......@@ -4137,7 +4126,6 @@ static int raid5_mergeable_bvec(struct request_queue *q,
return max;
}
static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
......@@ -4167,7 +4155,6 @@ static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
md_wakeup_thread(conf->mddev->thread);
}
static struct bio *remove_bio_from_retry(struct r5conf *conf)
{
struct bio *bi;
......@@ -4191,7 +4178,6 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
return bi;
}
/*
* The "raid5_align_endio" should check if the read succeeded and if it
* did, call bio_endio on the original bio (having bio_put the new bio
......@@ -4224,7 +4210,6 @@ static void raid5_align_endio(struct bio *bi, int error)
return;
}
pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
add_bio_to_retry(raid_bi, conf);
......@@ -4249,7 +4234,6 @@ static int bio_fits_rdev(struct bio *bi)
return 1;
}
static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
{
struct r5conf *conf = mddev->private;
......@@ -4301,7 +4285,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
rcu_read_unlock();
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
__clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_iter.bi_sector,
......@@ -5446,7 +5430,6 @@ raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR,
raid5_show_skip_copy,
raid5_store_skip_copy);
static ssize_t
stripe_cache_active_show(struct mddev *mddev, char *page)
{
......@@ -5898,7 +5881,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
return ERR_PTR(-ENOMEM);
}
static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
{
switch (algo) {
......@@ -5911,7 +5893,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 1;
break;
case ALGORITHM_PARITY_0_6:
if (raid_disk == 0 ||
if (raid_disk == 0 ||
raid_disk == raid_disks - 1)
return 1;
break;
......@@ -6165,7 +6147,6 @@ static int run(struct mddev *mddev)
"reshape");
}
/* Ok, everything is just fine now */
if (mddev->to_remove == &raid5_attrs_group)
mddev->to_remove = NULL;
......@@ -6814,7 +6795,6 @@ static void raid5_quiesce(struct mddev *mddev, int state)
}
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
{
struct r0conf *raid0_conf = mddev->private;
......@@ -6841,7 +6821,6 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
return setup_conf(mddev);
}
static void *raid5_takeover_raid1(struct mddev *mddev)
{
int chunksect;
......@@ -6902,7 +6881,6 @@ static void *raid5_takeover_raid6(struct mddev *mddev)
return setup_conf(mddev);
}
static int raid5_check_reshape(struct mddev *mddev)
{
/* For a 2-drive array, the layout and chunk size can be changed
......@@ -7051,7 +7029,6 @@ static void *raid6_takeover(struct mddev *mddev)
return setup_conf(mddev);
}
static struct md_personality raid6_personality =
{
.name = "raid6",
......
......@@ -155,7 +155,7 @@
*/
/*
* Operations state - intermediate states that are visible outside of
* Operations state - intermediate states that are visible outside of
* STRIPE_ACTIVE.
* In general _idle indicates nothing is running, _run indicates a data
* processing operation is active, and _result means the data processing result
......@@ -364,7 +364,6 @@ enum {
* HANDLE gets cleared if stripe_handle leaves nothing locked.
*/
struct disk_info {
struct md_rdev *rdev, *replacement;
};
......@@ -528,7 +527,6 @@ struct r5conf {
#define ALGORITHM_ROTATING_N_RESTART 9 /* DDF PRL=6 RLQ=2 */
#define ALGORITHM_ROTATING_N_CONTINUE 10 /*DDF PRL=6 RLQ=3 */
/* For every RAID5 algorithm we define a RAID6 algorithm
* with exactly the same layout for data and parity, and
* with the Q block always on the last device (N-1).
......
......@@ -39,7 +39,6 @@
#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)
#define PRINT_RAID_DEBUG _IO (MD_MAJOR, 0x13)
#define RAID_AUTORUN _IO (MD_MAJOR, 0x14)
#define GET_BITMAP_FILE _IOR (MD_MAJOR, 0x15, mdu_bitmap_file_t)
......
......@@ -121,9 +121,9 @@ static inline const struct raid6_recov_calls *raid6_choose_recov(void)
raid6_2data_recov = best->data2;
raid6_datap_recov = best->datap;
printk("raid6: using %s recovery algorithm\n", best->name);
pr_info("raid6: using %s recovery algorithm\n", best->name);
} else
printk("raid6: Yikes! No recovery algorithm found!\n");
pr_err("raid6: Yikes! No recovery algorithm found!\n");
return best;
}
......@@ -157,18 +157,18 @@ static inline const struct raid6_calls *raid6_choose_gen(
bestperf = perf;
best = *algo;
}
printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
pr_info("raid6: %-8s %5ld MB/s\n", (*algo)->name,
(perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
}
}
if (best) {
printk("raid6: using algorithm %s (%ld MB/s)\n",
pr_info("raid6: using algorithm %s (%ld MB/s)\n",
best->name,
(bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
raid6_call = *best;
} else
printk("raid6: Yikes! No algorithm found!\n");
pr_err("raid6: Yikes! No algorithm found!\n");
return best;
}
......@@ -194,7 +194,7 @@ int __init raid6_select_algo(void)
syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
if (!syndromes) {
printk("raid6: Yikes! No memory available.\n");
pr_err("raid6: Yikes! No memory available.\n");
return -ENOMEM;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment