Commit 6179fd18 authored by Andrew Morton's avatar Andrew Morton Committed by Patrick Mochel

[PATCH] 64-bit sector_t - driver changes

From Peter Chubb

Compaq Smart array sector_t cleanup: prepare for possible 64-bit sector_t

Clean up loop device to allow huge backing files.

MD transition to 64-bit sector_t.
 - Hold sizes and offsets as sector_t not int;
 - use 64-bit arithmetic if necessary to map block-in-raid to zone
   and block-in-zone
parent be48ef9e
...@@ -175,8 +175,8 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, ...@@ -175,8 +175,8 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
drv = &h->drv[i]; drv = &h->drv[i];
if (drv->block_size == 0) if (drv->block_size == 0)
continue; continue;
size = sprintf(buffer+len, "cciss/c%dd%d: blksz=%d nr_blocks=%d\n", size = sprintf(buffer+len, "cciss/c%dd%d: blksz=%d nr_blocks=%llu\n",
ctlr, i, drv->block_size, drv->nr_blocks); ctlr, i, drv->block_size, (unsigned long long)drv->nr_blocks);
pos += size; len += size; pos += size; len += size;
} }
...@@ -405,7 +405,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep, ...@@ -405,7 +405,7 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
} else { } else {
driver_geo.heads = 0xff; driver_geo.heads = 0xff;
driver_geo.sectors = 0x3f; driver_geo.sectors = 0x3f;
driver_geo.cylinders = hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f); driver_geo.cylinders = (int)hba[ctlr]->drv[dsk].nr_blocks / (0xff*0x3f);
} }
driver_geo.start= get_start_sect(inode->i_bdev); driver_geo.start= get_start_sect(inode->i_bdev);
if (copy_to_user((void *) arg, &driver_geo, if (copy_to_user((void *) arg, &driver_geo,
...@@ -1179,7 +1179,7 @@ static int register_new_disk(int ctlr) ...@@ -1179,7 +1179,7 @@ static int register_new_disk(int ctlr)
total_size = 0; total_size = 0;
block_size = BLOCK_SIZE; block_size = BLOCK_SIZE;
} }
printk(KERN_INFO " blocks= %d block_size= %d\n", printk(KERN_INFO " blocks= %u block_size= %d\n",
total_size, block_size); total_size, block_size);
/* Execute the command to read the disk geometry */ /* Execute the command to read the disk geometry */
memset(inq_buff, 0, sizeof(InquiryData_struct)); memset(inq_buff, 0, sizeof(InquiryData_struct));
......
...@@ -29,7 +29,7 @@ typedef struct _drive_info_struct ...@@ -29,7 +29,7 @@ typedef struct _drive_info_struct
{ {
__u32 LunID; __u32 LunID;
int usage_count; int usage_count;
int nr_blocks; sector_t nr_blocks;
int block_size; int block_size;
int heads; int heads;
int sectors; int sectors;
......
...@@ -154,16 +154,25 @@ struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { ...@@ -154,16 +154,25 @@ struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
&xor_funcs &xor_funcs
}; };
#define MAX_DISK_SIZE 1024*1024*1024 static int figure_loop_size(struct loop_device *lo)
static void figure_loop_size(struct loop_device *lo)
{ {
loff_t size = lo->lo_backing_file->f_dentry->d_inode->i_size; loff_t size = lo->lo_backing_file->f_dentry->d_inode->i_mapping->host->i_size;
set_capacity(disks[lo->lo_number], (size - lo->lo_offset) >> 9); sector_t x;
/*
* Unfortunately, if we want to do I/O on the device,
* the number of 512-byte sectors has to fit into a sector_t.
*/
size = (size - lo->lo_offset) >> 9;
x = (sector_t)size;
if ((loff_t)x != size)
return -EFBIG;
set_capacity(disks[lo->lo_number], size);
return 0;
} }
static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf, static inline int lo_do_transfer(struct loop_device *lo, int cmd, char *rbuf,
char *lbuf, int size, int rblock) char *lbuf, int size, sector_t rblock)
{ {
if (!lo->transfer) if (!lo->transfer)
return 0; return 0;
......
...@@ -72,10 +72,12 @@ static int linear_run (mddev_t *mddev) ...@@ -72,10 +72,12 @@ static int linear_run (mddev_t *mddev)
goto out; goto out;
} }
nb_zone = conf->nr_zones = {
md_size[mdidx(mddev)] / conf->smallest->size + sector_t sz = md_size[mdidx(mddev)];
((md_size[mdidx(mddev)] % conf->smallest->size) ? 1 : 0); unsigned round = sector_div(sz, conf->smallest->size);
nb_zone = conf->nr_zones = sz + (round ? 1 : 0);
}
conf->hash_table = kmalloc (sizeof (struct linear_hash) * nb_zone, conf->hash_table = kmalloc (sizeof (struct linear_hash) * nb_zone,
GFP_KERNEL); GFP_KERNEL);
if (!conf->hash_table) if (!conf->hash_table)
......
...@@ -103,7 +103,7 @@ static ctl_table raid_root_table[] = { ...@@ -103,7 +103,7 @@ static ctl_table raid_root_table[] = {
static void md_recover_arrays(void); static void md_recover_arrays(void);
static mdk_thread_t *md_recovery_thread; static mdk_thread_t *md_recovery_thread;
int md_size[MAX_MD_DEVS]; sector_t md_size[MAX_MD_DEVS];
static struct block_device_operations md_fops; static struct block_device_operations md_fops;
static devfs_handle_t devfs_handle; static devfs_handle_t devfs_handle;
...@@ -243,35 +243,35 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) ...@@ -243,35 +243,35 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
return NULL; return NULL;
} }
static unsigned int calc_dev_sboffset(struct block_device *bdev) static sector_t calc_dev_sboffset(struct block_device *bdev)
{ {
unsigned int size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
return MD_NEW_SIZE_BLOCKS(size); return MD_NEW_SIZE_BLOCKS(size);
} }
static unsigned int calc_dev_size(struct block_device *bdev, mddev_t *mddev) static sector_t calc_dev_size(struct block_device *bdev, mddev_t *mddev)
{ {
unsigned int size; sector_t size;
if (mddev->persistent) if (mddev->persistent)
size = calc_dev_sboffset(bdev); size = calc_dev_sboffset(bdev);
else else
size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
if (mddev->chunk_size) if (mddev->chunk_size)
size &= ~(mddev->chunk_size/1024 - 1); size &= ~((sector_t)mddev->chunk_size/1024 - 1);
return size; return size;
} }
static unsigned int zoned_raid_size(mddev_t *mddev) static sector_t zoned_raid_size(mddev_t *mddev)
{ {
unsigned int mask; sector_t mask;
mdk_rdev_t * rdev; mdk_rdev_t * rdev;
struct list_head *tmp; struct list_head *tmp;
/* /*
* do size and offset calculations. * do size and offset calculations.
*/ */
mask = ~(mddev->chunk_size/1024 - 1); mask = ~((sector_t)mddev->chunk_size/1024 - 1);
ITERATE_RDEV(mddev,rdev,tmp) { ITERATE_RDEV(mddev,rdev,tmp) {
rdev->size &= mask; rdev->size &= mask;
...@@ -362,7 +362,7 @@ static int sync_page_io(struct block_device *bdev, sector_t sector, int size, ...@@ -362,7 +362,7 @@ static int sync_page_io(struct block_device *bdev, sector_t sector, int size,
static int read_disk_sb(mdk_rdev_t * rdev) static int read_disk_sb(mdk_rdev_t * rdev)
{ {
unsigned long sb_offset; sector_t sb_offset;
if (!rdev->sb) { if (!rdev->sb) {
MD_BUG(); MD_BUG();
...@@ -614,9 +614,9 @@ static void print_sb(mdp_super_t *sb) ...@@ -614,9 +614,9 @@ static void print_sb(mdp_super_t *sb)
static void print_rdev(mdk_rdev_t *rdev) static void print_rdev(mdk_rdev_t *rdev)
{ {
printk(KERN_INFO "md: rdev %s, SZ:%08ld F:%d S:%d DN:%d ", printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%d ",
bdev_partition_name(rdev->bdev), bdev_partition_name(rdev->bdev),
rdev->size, rdev->faulty, rdev->in_sync, rdev->desc_nr); (unsigned long long)rdev->size, rdev->faulty, rdev->in_sync, rdev->desc_nr);
if (rdev->sb) { if (rdev->sb) {
printk(KERN_INFO "md: rdev superblock:\n"); printk(KERN_INFO "md: rdev superblock:\n");
print_sb(rdev->sb); print_sb(rdev->sb);
...@@ -698,7 +698,8 @@ static int uuid_equal(mdk_rdev_t *rdev1, mdk_rdev_t *rdev2) ...@@ -698,7 +698,8 @@ static int uuid_equal(mdk_rdev_t *rdev1, mdk_rdev_t *rdev2)
static int write_disk_sb(mdk_rdev_t * rdev) static int write_disk_sb(mdk_rdev_t * rdev)
{ {
unsigned long sb_offset, size; sector_t sb_offset;
sector_t size;
if (!rdev->sb) { if (!rdev->sb) {
MD_BUG(); MD_BUG();
...@@ -715,8 +716,10 @@ static int write_disk_sb(mdk_rdev_t * rdev) ...@@ -715,8 +716,10 @@ static int write_disk_sb(mdk_rdev_t * rdev)
sb_offset = calc_dev_sboffset(rdev->bdev); sb_offset = calc_dev_sboffset(rdev->bdev);
if (rdev->sb_offset != sb_offset) { if (rdev->sb_offset != sb_offset) {
printk(KERN_INFO "%s's sb offset has changed from %ld to %ld, skipping\n", printk(KERN_INFO "%s's sb offset has changed from %llu to %llu, skipping\n",
bdev_partition_name(rdev->bdev), rdev->sb_offset, sb_offset); bdev_partition_name(rdev->bdev),
(unsigned long long)rdev->sb_offset,
(unsigned long long)sb_offset);
goto skip; goto skip;
} }
/* /*
...@@ -726,12 +729,14 @@ static int write_disk_sb(mdk_rdev_t * rdev) ...@@ -726,12 +729,14 @@ static int write_disk_sb(mdk_rdev_t * rdev)
*/ */
size = calc_dev_size(rdev->bdev, rdev->mddev); size = calc_dev_size(rdev->bdev, rdev->mddev);
if (size != rdev->size) { if (size != rdev->size) {
printk(KERN_INFO "%s's size has changed from %ld to %ld since import, skipping\n", printk(KERN_INFO "%s's size has changed from %llu to %llu since import, skipping\n",
bdev_partition_name(rdev->bdev), rdev->size, size); bdev_partition_name(rdev->bdev),
(unsigned long long)rdev->size,
(unsigned long long)size);
goto skip; goto skip;
} }
printk(KERN_INFO "(write) %s's sb offset: %ld\n", bdev_partition_name(rdev->bdev), sb_offset); printk(KERN_INFO "(write) %s's sb offset: %llu\n", bdev_partition_name(rdev->bdev), (unsigned long long)sb_offset);
if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE)) if (!sync_page_io(rdev->bdev, sb_offset<<1, MD_SB_BYTES, rdev->sb_page, WRITE))
goto fail; goto fail;
...@@ -929,7 +934,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int on_disk) ...@@ -929,7 +934,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int on_disk)
{ {
int err; int err;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
unsigned int size; sector_t size;
rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL); rdev = (mdk_rdev_t *) kmalloc(sizeof(*rdev), GFP_KERNEL);
if (!rdev) { if (!rdev) {
...@@ -1209,9 +1214,9 @@ static int device_size_calculation(mddev_t * mddev) ...@@ -1209,9 +1214,9 @@ static int device_size_calculation(mddev_t * mddev)
rdev->size = calc_dev_size(rdev->bdev, mddev); rdev->size = calc_dev_size(rdev->bdev, mddev);
if (rdev->size < mddev->chunk_size / 1024) { if (rdev->size < mddev->chunk_size / 1024) {
printk(KERN_WARNING printk(KERN_WARNING
"md: Dev %s smaller than chunk_size: %ldk < %dk\n", "md: Dev %s smaller than chunk_size: %lluk < %dk\n",
bdev_partition_name(rdev->bdev), bdev_partition_name(rdev->bdev),
rdev->size, mddev->chunk_size / 1024); (unsigned long long)rdev->size, mddev->chunk_size / 1024);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -1401,6 +1406,12 @@ static int do_md_run(mddev_t * mddev) ...@@ -1401,6 +1406,12 @@ static int do_md_run(mddev_t * mddev)
mddev->pers = pers[pnum]; mddev->pers = pers[pnum];
blk_queue_make_request(&mddev->queue, mddev->pers->make_request); blk_queue_make_request(&mddev->queue, mddev->pers->make_request);
printk("%s: setting max_sectors to %d, segment boundary to %d\n",
disk->disk_name,
chunk_size >> 9,
(chunk_size>>1)-1);
blk_queue_max_sectors(&mddev->queue, chunk_size >> 9);
blk_queue_segment_boundary(&mddev->queue, (chunk_size>>1) - 1);
mddev->queue.queuedata = mddev; mddev->queue.queuedata = mddev;
err = mddev->pers->run(mddev); err = mddev->pers->run(mddev);
...@@ -1872,7 +1883,7 @@ static int get_disk_info(mddev_t * mddev, void * arg) ...@@ -1872,7 +1883,7 @@ static int get_disk_info(mddev_t * mddev, void * arg)
static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
{ {
int size; sector_t size;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
dev_t dev; dev_t dev;
dev = MKDEV(info->major,info->minor); dev = MKDEV(info->major,info->minor);
...@@ -2025,8 +2036,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -2025,8 +2036,9 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
size = calc_dev_size(rdev->bdev, mddev); size = calc_dev_size(rdev->bdev, mddev);
if (size < mddev->size) { if (size < mddev->size) {
printk(KERN_WARNING "md%d: disk size %d blocks < array size %ld\n", printk(KERN_WARNING "md%d: disk size %llu blocks < array size %llu\n",
mdidx(mddev), size, mddev->size); mdidx(mddev), (unsigned long long)size,
(unsigned long long)mddev->size);
err = -ENOSPC; err = -ENOSPC;
goto abort_export; goto abort_export;
} }
...@@ -2628,7 +2640,8 @@ static int status_resync(char * page, mddev_t * mddev) ...@@ -2628,7 +2640,8 @@ static int status_resync(char * page, mddev_t * mddev)
static int md_status_read_proc(char *page, char **start, off_t off, static int md_status_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data) int count, int *eof, void *data)
{ {
int sz = 0, j, size; int sz = 0, j;
sector_t size;
struct list_head *tmp, *tmp2; struct list_head *tmp, *tmp2;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
mddev_t *mddev; mddev_t *mddev;
...@@ -2662,10 +2675,10 @@ static int md_status_read_proc(char *page, char **start, off_t off, ...@@ -2662,10 +2675,10 @@ static int md_status_read_proc(char *page, char **start, off_t off,
if (!list_empty(&mddev->disks)) { if (!list_empty(&mddev->disks)) {
if (mddev->pers) if (mddev->pers)
sz += sprintf(page + sz, "\n %d blocks", sz += sprintf(page + sz, "\n %llu blocks",
md_size[mdidx(mddev)]); (unsigned long long)md_size[mdidx(mddev)]);
else else
sz += sprintf(page + sz, "\n %d blocks", size); sz += sprintf(page + sz, "\n %llu blocks", (unsigned long long)size);
} }
if (!mddev->pers) { if (!mddev->pers) {
...@@ -3466,6 +3479,8 @@ void cleanup_module(void) ...@@ -3466,6 +3479,8 @@ void cleanup_module(void)
} }
#endif #endif
extern u64 __udivdi3(u64, u64);
extern u64 __umoddi3(u64, u64);
EXPORT_SYMBOL(md_size); EXPORT_SYMBOL(md_size);
EXPORT_SYMBOL(register_md_personality); EXPORT_SYMBOL(register_md_personality);
EXPORT_SYMBOL(unregister_md_personality); EXPORT_SYMBOL(unregister_md_personality);
...@@ -3477,4 +3492,6 @@ EXPORT_SYMBOL(md_unregister_thread); ...@@ -3477,4 +3492,6 @@ EXPORT_SYMBOL(md_unregister_thread);
EXPORT_SYMBOL(md_wakeup_thread); EXPORT_SYMBOL(md_wakeup_thread);
EXPORT_SYMBOL(md_print_devices); EXPORT_SYMBOL(md_print_devices);
EXPORT_SYMBOL(md_interrupt_thread); EXPORT_SYMBOL(md_interrupt_thread);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -130,8 +130,8 @@ int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error) ...@@ -130,8 +130,8 @@ int multipath_end_request(struct bio *bio, unsigned int bytes_done, int error)
* oops, IO error: * oops, IO error:
*/ */
md_error (mp_bh->mddev, rdev); md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %lu\n", printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdev_partition_name(rdev->bdev), bio->bi_sector); bdev_partition_name(rdev->bdev), (unsigned long long)bio->bi_sector);
multipath_reschedule_retry(mp_bh); multipath_reschedule_retry(mp_bh);
} }
atomic_dec(&rdev->nr_pending); atomic_dec(&rdev->nr_pending);
...@@ -320,10 +320,10 @@ static int multipath_remove_disk(mddev_t *mddev, int number) ...@@ -320,10 +320,10 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
} }
#define IO_ERROR KERN_ALERT \ #define IO_ERROR KERN_ALERT \
"multipath: %s: unrecoverable IO read error for block %lu\n" "multipath: %s: unrecoverable IO read error for block %llu\n"
#define REDIRECT_SECTOR KERN_ERR \ #define REDIRECT_SECTOR KERN_ERR \
"multipath: %s: redirecting sector %lu to another IO path\n" "multipath: %s: redirecting sector %llu to another IO path\n"
/* /*
* This is a kernel thread which: * This is a kernel thread which:
...@@ -356,11 +356,11 @@ static void multipathd (void *data) ...@@ -356,11 +356,11 @@ static void multipathd (void *data)
rdev = NULL; rdev = NULL;
if (multipath_map (mddev, &rdev)<0) { if (multipath_map (mddev, &rdev)<0) {
printk(IO_ERROR, printk(IO_ERROR,
bdev_partition_name(bio->bi_bdev), bio->bi_sector); bdev_partition_name(bio->bi_bdev), (unsigned long long)bio->bi_sector);
multipath_end_bh_io(mp_bh, 0); multipath_end_bh_io(mp_bh, 0);
} else { } else {
printk(REDIRECT_SECTOR, printk(REDIRECT_SECTOR,
bdev_partition_name(bio->bi_bdev), bio->bi_sector); bdev_partition_name(bio->bi_bdev), (unsigned long long)bio->bi_sector);
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
generic_make_request(bio); generic_make_request(bio);
} }
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
static int create_strip_zones (mddev_t *mddev) static int create_strip_zones (mddev_t *mddev)
{ {
int i, c, j; int i, c, j;
unsigned long current_offset, curr_zone_offset; sector_t current_offset, curr_zone_offset;
raid0_conf_t *conf = mddev_to_conf(mddev); raid0_conf_t *conf = mddev_to_conf(mddev);
mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
struct list_head *tmp1, *tmp2; struct list_head *tmp1, *tmp2;
...@@ -46,9 +46,9 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -46,9 +46,9 @@ static int create_strip_zones (mddev_t *mddev)
printk("raid0: looking at %s\n", bdev_partition_name(rdev1->bdev)); printk("raid0: looking at %s\n", bdev_partition_name(rdev1->bdev));
c = 0; c = 0;
ITERATE_RDEV(mddev,rdev2,tmp2) { ITERATE_RDEV(mddev,rdev2,tmp2) {
printk("raid0: comparing %s(%ld) with %s(%ld)\n", printk("raid0: comparing %s(%llu) with %s(%llu)\n",
bdev_partition_name(rdev1->bdev), rdev1->size, bdev_partition_name(rdev1->bdev), (unsigned long long)rdev1->size,
bdev_partition_name(rdev2->bdev), rdev2->size); bdev_partition_name(rdev2->bdev), (unsigned long long)rdev2->size);
if (rdev2 == rdev1) { if (rdev2 == rdev1) {
printk("raid0: END\n"); printk("raid0: END\n");
break; break;
...@@ -135,7 +135,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -135,7 +135,8 @@ static int create_strip_zones (mddev_t *mddev)
c++; c++;
if (!smallest || (rdev->size <smallest->size)) { if (!smallest || (rdev->size <smallest->size)) {
smallest = rdev; smallest = rdev;
printk(" (%ld) is smallest!.\n", rdev->size); printk(" (%llu) is smallest!.\n",
(unsigned long long)rdev->size);
} }
} else } else
printk(" nope.\n"); printk(" nope.\n");
...@@ -152,7 +153,7 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -152,7 +153,7 @@ static int create_strip_zones (mddev_t *mddev)
curr_zone_offset += zone->size; curr_zone_offset += zone->size;
current_offset = smallest->size; current_offset = smallest->size;
printk("raid0: current zone offset: %ld\n", current_offset); printk("raid0: current zone offset: %llu\n", (unsigned long long)current_offset);
} }
printk("raid0: done.\n"); printk("raid0: done.\n");
return 0; return 0;
...@@ -163,7 +164,9 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -163,7 +164,9 @@ static int create_strip_zones (mddev_t *mddev)
static int raid0_run (mddev_t *mddev) static int raid0_run (mddev_t *mddev)
{ {
unsigned long cur=0, i=0, size, zone0_size, nb_zone; unsigned cur=0, i=0, nb_zone;
sector_t zone0_size;
s64 size;
raid0_conf_t *conf; raid0_conf_t *conf;
MOD_INC_USE_COUNT; MOD_INC_USE_COUNT;
...@@ -176,16 +179,15 @@ static int raid0_run (mddev_t *mddev) ...@@ -176,16 +179,15 @@ static int raid0_run (mddev_t *mddev)
if (create_strip_zones (mddev)) if (create_strip_zones (mddev))
goto out_free_conf; goto out_free_conf;
printk("raid0 : md_size is %d blocks.\n", md_size[mdidx(mddev)]); printk("raid0 : md_size is %llu blocks.\n", (unsigned long long)md_size[mdidx(mddev)]);
printk("raid0 : conf->smallest->size is %ld blocks.\n", conf->smallest->size); printk("raid0 : conf->smallest->size is %ld blocks.\n", conf->smallest->size);
nb_zone = md_size[mdidx(mddev)]/conf->smallest->size + nb_zone = md_size[mdidx(mddev)]/conf->smallest->size +
(md_size[mdidx(mddev)] % conf->smallest->size ? 1 : 0); (md_size[mdidx(mddev)] % conf->smallest->size ? 1 : 0);
printk("raid0 : nb_zone is %ld.\n", nb_zone); printk("raid0 : nb_zone is %d.\n", nb_zone);
conf->nr_zones = nb_zone; conf->nr_zones = nb_zone;
printk("raid0 : Allocating %ld bytes for hash.\n", printk("raid0 : Allocating %d bytes for hash.\n",
nb_zone*sizeof(struct raid0_hash)); nb_zone*sizeof(struct raid0_hash));
conf->hash_table = vmalloc (sizeof (struct raid0_hash)*nb_zone); conf->hash_table = vmalloc (sizeof (struct raid0_hash)*nb_zone);
if (!conf->hash_table) if (!conf->hash_table)
goto out_free_zone_conf; goto out_free_zone_conf;
...@@ -269,7 +271,8 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio) ...@@ -269,7 +271,8 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
struct raid0_hash *hash; struct raid0_hash *hash;
struct strip_zone *zone; struct strip_zone *zone;
mdk_rdev_t *tmp_dev; mdk_rdev_t *tmp_dev;
unsigned long chunk, block, rsect; unsigned long chunk;
sector_t block, rsect;
chunk_size = mddev->chunk_size >> 10; chunk_size = mddev->chunk_size >> 10;
chunksize_bits = ffz(~chunk_size); chunksize_bits = ffz(~chunk_size);
...@@ -312,16 +315,16 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio) ...@@ -312,16 +315,16 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
return 1; return 1;
bad_map: bad_map:
printk ("raid0_make_request bug: can't convert block across chunks or bigger than %dk %ld %d\n", chunk_size, bio->bi_sector, bio->bi_size >> 10); printk ("raid0_make_request bug: can't convert block across chunks or bigger than %dk %llu %d\n", chunk_size, (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
goto outerr; goto outerr;
bad_hash: bad_hash:
printk("raid0_make_request bug: hash==NULL for block %ld\n", block); printk("raid0_make_request bug: hash==NULL for block %llu\n", (unsigned long long)block);
goto outerr; goto outerr;
bad_zone0: bad_zone0:
printk ("raid0_make_request bug: hash->zone0==NULL for block %ld\n", block); printk ("raid0_make_request bug: hash->zone0==NULL for block %llu\n", (unsigned long long)block);
goto outerr; goto outerr;
bad_zone1: bad_zone1:
printk ("raid0_make_request bug: hash->zone1==NULL for block %ld\n", block); printk ("raid0_make_request bug: hash->zone1==NULL for block %llu\n", (unsigned long long)block);
outerr: outerr:
bio_io_error(bio, bio->bi_size); bio_io_error(bio, bio->bi_size);
return 0; return 0;
......
...@@ -298,8 +298,8 @@ static int end_request(struct bio *bio, unsigned int bytes_done, int error) ...@@ -298,8 +298,8 @@ static int end_request(struct bio *bio, unsigned int bytes_done, int error)
/* /*
* oops, read error: * oops, read error:
*/ */
printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n", printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
bdev_partition_name(conf->mirrors[mirror].rdev->bdev), r1_bio->sector); bdev_partition_name(conf->mirrors[mirror].rdev->bdev), (unsigned long long)r1_bio->sector);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
} }
} else { } else {
...@@ -747,10 +747,10 @@ static int raid1_remove_disk(mddev_t *mddev, int number) ...@@ -747,10 +747,10 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
} }
#define IO_ERROR KERN_ALERT \ #define IO_ERROR KERN_ALERT \
"raid1: %s: unrecoverable I/O read error for block %lu\n" "raid1: %s: unrecoverable I/O read error for block %llu\n"
#define REDIRECT_SECTOR KERN_ERR \ #define REDIRECT_SECTOR KERN_ERR \
"raid1: %s: redirecting sector %lu to another mirror\n" "raid1: %s: redirecting sector %llu to another mirror\n"
static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error) static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
{ {
...@@ -827,7 +827,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -827,7 +827,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* There is no point trying a read-for-reconstruct as * There is no point trying a read-for-reconstruct as
* reconstruct is about to be aborted * reconstruct is about to be aborted
*/ */
printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector); printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), (unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
resume_device(conf); resume_device(conf);
put_buf(r1_bio); put_buf(r1_bio);
...@@ -878,7 +878,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -878,7 +878,8 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* Nowhere to write this to... I guess we * Nowhere to write this to... I guess we
* must be done * must be done
*/ */
printk(KERN_ALERT "raid1: sync aborting as there is nowhere to write sector %lu\n", r1_bio->sector); printk(KERN_ALERT "raid1: sync aborting as there is nowhere to write sector %llu\n",
(unsigned long long)r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
resume_device(conf); resume_device(conf);
put_buf(r1_bio); put_buf(r1_bio);
...@@ -931,12 +932,12 @@ static void raid1d(void *data) ...@@ -931,12 +932,12 @@ static void raid1d(void *data)
case READ: case READ:
case READA: case READA:
if (map(mddev, &rdev) == -1) { if (map(mddev, &rdev) == -1) {
printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector); printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), (unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio, 0); raid_end_bio_io(r1_bio, 0);
break; break;
} }
printk(REDIRECT_SECTOR, printk(REDIRECT_SECTOR,
bdev_partition_name(rdev->bdev), r1_bio->sector); bdev_partition_name(rdev->bdev), (unsigned long long)r1_bio->sector);
bio->bi_bdev = rdev->bdev; bio->bi_bdev = rdev->bdev;
bio->bi_sector = r1_bio->sector; bio->bi_sector = r1_bio->sector;
bio->bi_rw = r1_bio->cmd; bio->bi_rw = r1_bio->cmd;
......
...@@ -96,7 +96,7 @@ static void release_stripe(struct stripe_head *sh) ...@@ -96,7 +96,7 @@ static void release_stripe(struct stripe_head *sh)
static void remove_hash(struct stripe_head *sh) static void remove_hash(struct stripe_head *sh)
{ {
PRINTK("remove_hash(), stripe %lu\n", sh->sector); PRINTK("remove_hash(), stripe %llu\n", (unsigned long long)sh->sector);
if (sh->hash_pprev) { if (sh->hash_pprev) {
if (sh->hash_next) if (sh->hash_next)
...@@ -110,7 +110,7 @@ static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) ...@@ -110,7 +110,7 @@ static __inline__ void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
{ {
struct stripe_head **shp = &stripe_hash(conf, sh->sector); struct stripe_head **shp = &stripe_hash(conf, sh->sector);
PRINTK("insert_hash(), stripe %lu\n",sh->sector); PRINTK("insert_hash(), stripe %llu\n", (unsigned long long)sh->sector);
CHECK_DEVLOCK(); CHECK_DEVLOCK();
if ((sh->hash_next = *shp) != NULL) if ((sh->hash_next = *shp) != NULL)
...@@ -180,7 +180,7 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int ...@@ -180,7 +180,7 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int
BUG(); BUG();
CHECK_DEVLOCK(); CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %lu\n", sh->sector); PRINTK("init_stripe called, stripe %llu\n", (unsigned long long)sh->sector);
remove_hash(sh); remove_hash(sh);
...@@ -193,8 +193,8 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int ...@@ -193,8 +193,8 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector, int
if (dev->toread || dev->towrite || dev->written || if (dev->toread || dev->towrite || dev->written ||
test_bit(R5_LOCKED, &dev->flags)) { test_bit(R5_LOCKED, &dev->flags)) {
printk("sector=%lx i=%d %p %p %p %d\n", printk("sector=%llx i=%d %p %p %p %d\n",
sh->sector, i, dev->toread, (unsigned long long)sh->sector, i, dev->toread,
dev->towrite, dev->written, dev->towrite, dev->written,
test_bit(R5_LOCKED, &dev->flags)); test_bit(R5_LOCKED, &dev->flags));
BUG(); BUG();
...@@ -336,7 +336,7 @@ static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done, ...@@ -336,7 +336,7 @@ static int raid5_end_read_request (struct bio * bi, unsigned int bytes_done,
if (bi == &sh->dev[i].req) if (bi == &sh->dev[i].req)
break; break;
PRINTK("end_read_request %lu/%d, count: %d, uptodate %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate); PRINTK("end_read_request %llu/%d, count: %d, uptodate %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), uptodate);
if (i == disks) { if (i == disks) {
BUG(); BUG();
return 0; return 0;
...@@ -407,7 +407,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, ...@@ -407,7 +407,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
if (bi == &sh->dev[i].req) if (bi == &sh->dev[i].req)
break; break;
PRINTK("end_write_request %lu/%d, count %d, uptodate: %d.\n", sh->sector, i, atomic_read(&sh->count), uptodate); PRINTK("end_write_request %llu/%d, count %d, uptodate: %d.\n", (unsigned long long)sh->sector, i, atomic_read(&sh->count), uptodate);
if (i == disks) { if (i == disks) {
BUG(); BUG();
return 0; return 0;
...@@ -427,7 +427,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, ...@@ -427,7 +427,7 @@ static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done,
} }
static unsigned long compute_blocknr(struct stripe_head *sh, int i); static sector_t compute_blocknr(struct stripe_head *sh, int i);
static void raid5_build_block (struct stripe_head *sh, int i) static void raid5_build_block (struct stripe_head *sh, int i)
{ {
...@@ -648,7 +648,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) ...@@ -648,7 +648,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
int i, count, disks = conf->raid_disks; int i, count, disks = conf->raid_disks;
void *ptr[MAX_XOR_BLOCKS], *p; void *ptr[MAX_XOR_BLOCKS], *p;
PRINTK("compute_block, stripe %lu, idx %d\n", sh->sector, dd_idx); PRINTK("compute_block, stripe %llu, idx %d\n", (unsigned long long)sh->sector, dd_idx);
ptr[0] = page_address(sh->dev[dd_idx].page); ptr[0] = page_address(sh->dev[dd_idx].page);
memset(ptr[0], 0, STRIPE_SIZE); memset(ptr[0], 0, STRIPE_SIZE);
...@@ -660,7 +660,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) ...@@ -660,7 +660,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) if (test_bit(R5_UPTODATE, &sh->dev[i].flags))
ptr[count++] = p; ptr[count++] = p;
else else
printk("compute_block() %d, stripe %lu, %d not present\n", dd_idx, sh->sector, i); printk("compute_block() %d, stripe %llu, %d not present\n", dd_idx, (unsigned long long)sh->sector, i);
check_xor(); check_xor();
} }
...@@ -676,7 +676,7 @@ static void compute_parity(struct stripe_head *sh, int method) ...@@ -676,7 +676,7 @@ static void compute_parity(struct stripe_head *sh, int method)
void *ptr[MAX_XOR_BLOCKS]; void *ptr[MAX_XOR_BLOCKS];
struct bio *chosen[MD_SB_DISKS]; struct bio *chosen[MD_SB_DISKS];
PRINTK("compute_parity, stripe %lu, method %d\n", sh->sector, method); PRINTK("compute_parity, stripe %llu, method %d\n", (unsigned long long)sh->sector, method);
memset(chosen, 0, sizeof(chosen)); memset(chosen, 0, sizeof(chosen));
count = 1; count = 1;
...@@ -762,7 +762,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -762,7 +762,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
struct bio **bip; struct bio **bip;
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
PRINTK("adding bh b#%lu to stripe s#%lu\n", bi->bi_sector, sh->sector); PRINTK("adding bh b#%llu to stripe s#%llu\n", (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector);
spin_lock(&sh->lock); spin_lock(&sh->lock);
...@@ -783,7 +783,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -783,7 +783,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock); spin_unlock(&sh->lock);
PRINTK("added bi b#%lu to stripe s#%lu, disk %d.\n", bi->bi_sector, sh->sector, dd_idx); PRINTK("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector, dd_idx);
if (forwrite) { if (forwrite) {
/* check if page is coverred */ /* check if page is coverred */
...@@ -831,7 +831,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -831,7 +831,7 @@ static void handle_stripe(struct stripe_head *sh)
int failed_num=0; int failed_num=0;
struct r5dev *dev; struct r5dev *dev;
PRINTK("handling stripe %ld, cnt=%d, pd_idx=%d\n", sh->sector, atomic_read(&sh->count), sh->pd_idx); PRINTK("handling stripe %llu, cnt=%d, pd_idx=%d\n", (unsigned long long)sh->sector, atomic_read(&sh->count), sh->pd_idx);
spin_lock(&sh->lock); spin_lock(&sh->lock);
clear_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_HANDLE, &sh->state);
...@@ -1035,7 +1035,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1035,7 +1035,7 @@ static void handle_stripe(struct stripe_head *sh)
else rcw += 2*disks; else rcw += 2*disks;
} }
} }
PRINTK("for sector %ld, rmw=%d rcw=%d\n", sh->sector, rmw, rcw); PRINTK("for sector %llu, rmw=%d rcw=%d\n", (unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
if (rmw < rcw && rmw > 0) if (rmw < rcw && rmw > 0)
/* prefer read-modify-write, but need to get some data */ /* prefer read-modify-write, but need to get some data */
...@@ -1178,7 +1178,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1178,7 +1178,7 @@ static void handle_stripe(struct stripe_head *sh)
md_sync_acct(rdev, STRIPE_SECTORS); md_sync_acct(rdev, STRIPE_SECTORS);
bi->bi_bdev = rdev->bdev; bi->bi_bdev = rdev->bdev;
PRINTK("for %ld schedule op %ld on disc %d\n", sh->sector, bi->bi_rw, i); PRINTK("for %llu schedule op %ld on disc %d\n", (unsigned long long)sh->sector, bi->bi_rw, i);
atomic_inc(&sh->count); atomic_inc(&sh->count);
bi->bi_sector = sh->sector; bi->bi_sector = sh->sector;
bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_flags = 1 << BIO_UPTODATE;
...@@ -1189,7 +1189,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1189,7 +1189,7 @@ static void handle_stripe(struct stripe_head *sh)
bi->bi_next = NULL; bi->bi_next = NULL;
generic_make_request(bi); generic_make_request(bi);
} else { } else {
PRINTK("skip op %ld on disc %d for sector %ld\n", bi->bi_rw, i, sh->sector); PRINTK("skip op %ld on disc %d for sector %llu\n", bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &dev->flags); clear_bit(R5_LOCKED, &dev->flags);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
} }
...@@ -1510,9 +1510,9 @@ static void print_sh (struct stripe_head *sh) ...@@ -1510,9 +1510,9 @@ static void print_sh (struct stripe_head *sh)
{ {
int i; int i;
printk("sh %lu, pd_idx %d, state %ld.\n", sh->sector, sh->pd_idx, sh->state); printk("sh %llu, pd_idx %d, state %ld.\n", (unsigned long long)sh->sector, sh->pd_idx, sh->state);
printk("sh %lu, count %d.\n", sh->sector, atomic_read(&sh->count)); printk("sh %llu, count %d.\n", (unsigned long long)sh->sector, atomic_read(&sh->count));
printk("sh %lu, ", sh->sector); printk("sh %llu, ", (unsigned long long)sh->sector);
for (i = 0; i < sh->raid_conf->raid_disks; i++) { for (i = 0; i < sh->raid_conf->raid_disks; i++) {
printk("(cache%d: %p %ld) ", i, sh->dev[i].page, sh->dev[i].flags); printk("(cache%d: %p %ld) ", i, sh->dev[i].page, sh->dev[i].flags);
} }
......
...@@ -33,7 +33,7 @@ struct loop_device { ...@@ -33,7 +33,7 @@ struct loop_device {
int lo_flags; int lo_flags;
int (*transfer)(struct loop_device *, int cmd, int (*transfer)(struct loop_device *, int cmd,
char *raw_buf, char *loop_buf, int size, char *raw_buf, char *loop_buf, int size,
int real_block); sector_t real_block);
char lo_name[LO_NAME_SIZE]; char lo_name[LO_NAME_SIZE];
char lo_encrypt_key[LO_KEY_SIZE]; char lo_encrypt_key[LO_KEY_SIZE];
__u32 lo_init[2]; __u32 lo_init[2];
...@@ -123,7 +123,7 @@ struct loop_info { ...@@ -123,7 +123,7 @@ struct loop_info {
struct loop_func_table { struct loop_func_table {
int number; /* filter type */ int number; /* filter type */
int (*transfer)(struct loop_device *lo, int cmd, char *raw_buf, int (*transfer)(struct loop_device *lo, int cmd, char *raw_buf,
char *loop_buf, int size, int real_block); char *loop_buf, int size, sector_t real_block);
int (*init)(struct loop_device *, struct loop_info *); int (*init)(struct loop_device *, struct loop_info *);
/* release is called from loop_unregister_transfer or clr_fd */ /* release is called from loop_unregister_transfer or clr_fd */
int (*release)(struct loop_device *); int (*release)(struct loop_device *);
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#define MD_MINOR_VERSION 90 #define MD_MINOR_VERSION 90
#define MD_PATCHLEVEL_VERSION 0 #define MD_PATCHLEVEL_VERSION 0
extern int md_size[MAX_MD_DEVS]; extern sector_t md_size[MAX_MD_DEVS];
extern inline char * bdev_partition_name (struct block_device *bdev) extern inline char * bdev_partition_name (struct block_device *bdev)
{ {
......
...@@ -144,7 +144,7 @@ struct mdk_rdev_s ...@@ -144,7 +144,7 @@ struct mdk_rdev_s
{ {
struct list_head same_set; /* RAID devices within the same set */ struct list_head same_set; /* RAID devices within the same set */
unsigned long size; /* Device size (in blocks) */ sector_t size; /* Device size (in blocks) */
mddev_t *mddev; /* RAID array if running */ mddev_t *mddev; /* RAID array if running */
unsigned long last_events; /* IO event timestamp */ unsigned long last_events; /* IO event timestamp */
...@@ -152,7 +152,7 @@ struct mdk_rdev_s ...@@ -152,7 +152,7 @@ struct mdk_rdev_s
struct page *sb_page; struct page *sb_page;
mdp_super_t *sb; mdp_super_t *sb;
unsigned long sb_offset; sector_t sb_offset;
/* A device can be in one of three states based on two flags: /* A device can be in one of three states based on two flags:
* Not working: faulty==1 in_sync==0 * Not working: faulty==1 in_sync==0
...@@ -350,5 +350,5 @@ do { \ ...@@ -350,5 +350,5 @@ do { \
__wait_disk_event(wq, condition); \ __wait_disk_event(wq, condition); \
} while (0) } while (0)
#endif #endif
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
struct strip_zone struct strip_zone
{ {
unsigned long zone_offset; /* Zone offset in md_dev */ sector_t zone_offset; /* Zone offset in md_dev */
unsigned long dev_offset; /* Zone offset in real dev */ sector_t dev_offset; /* Zone offset in real dev */
unsigned long size; /* Zone size */ sector_t size; /* Zone size */
int nb_dev; /* # of devices attached to the zone */ int nb_dev; /* # of devices attached to the zone */
mdk_rdev_t *dev[MD_SB_DISKS]; /* Devices attached to the zone */ mdk_rdev_t *dev[MD_SB_DISKS]; /* Devices attached to the zone */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment