Commit dd696ac1 authored by Alexander Viro's avatar Alexander Viro Committed by Linus Torvalds

[PATCH] brw_kiovec() converted to struct block_device *

brw_kiovec() and ll_rw_kiovec() switched to struct block_device *.
parent fa65ca01
......@@ -322,7 +322,7 @@ ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
if (err)
break;
err = brw_kiovec(rw, 1, &iobuf, dev, &blocknr, sector_size);
err = brw_kiovec(rw, 1, &iobuf, raw_devices[minor].binding, &blocknr, sector_size);
if (rw == READ && err > 0)
mark_dirty_kiobuf(iobuf, err);
......
......@@ -302,6 +302,7 @@ int lvm_snapshot_COW(kdev_t org_phys_dev,
{
const char * reason;
kdev_t snap_phys_dev;
struct block_device *org_bdev, *snap_bdev;
unsigned long org_start, snap_start, virt_start, pe_off;
int idx = lv_snap->u.lv_remap_ptr, chunk_size = lv_snap->u.lv_chunk_size;
struct kiobuf * iobuf;
......@@ -321,6 +322,15 @@ int lvm_snapshot_COW(kdev_t org_phys_dev,
snap_phys_dev = lv_snap->u.lv_block_exception[idx].rdev_new;
snap_start = lv_snap->u.lv_block_exception[idx].rsector_new;
org_bdev = bdget(kdev_t_to_nr(org_phys_dev));
if (!org_bdev)
goto fail_enomem;
snap_bdev = bdget(kdev_t_to_nr(snap_phys_dev));
if (!snap_bdev) {
bdput(org_bdev);
goto fail_enomem;
}
#ifdef DEBUG_SNAPSHOT
printk(KERN_INFO
"%s -- COW: "
......@@ -356,7 +366,7 @@ int lvm_snapshot_COW(kdev_t org_phys_dev,
nr_sectors, blksize_org))
goto fail_prepare;
if (brw_kiovec(READ, 1, &iobuf, org_phys_dev,
if (brw_kiovec(READ, 1, &iobuf, org_bdev,
lv_snap->blocks, blksize_org) != (nr_sectors<<9))
goto fail_raw_read;
......@@ -364,7 +374,7 @@ int lvm_snapshot_COW(kdev_t org_phys_dev,
nr_sectors, blksize_snap))
goto fail_prepare;
if (brw_kiovec(WRITE, 1, &iobuf, snap_phys_dev,
if (brw_kiovec(WRITE, 1, &iobuf, snap_bdev,
lv_snap->blocks, blksize_snap) !=(nr_sectors<<9))
goto fail_raw_write;
}
......@@ -387,16 +397,21 @@ int lvm_snapshot_COW(kdev_t org_phys_dev,
if (lv_snap->u.lv_remap_ptr * 100 / lv_snap->u.lv_remap_end >= lv_snap->lv_snapshot_use_rate)
wake_up_interruptible(&lv_snap->lv_snapshot_wait);
}
bdput(snap_bdev);
bdput(org_bdev);
return 0;
/* slow path */
out:
bdput(snap_bdev);
bdput(org_bdev);
out1:
lvm_drop_snapshot(vg, lv_snap, reason);
return 1;
fail_out_of_space:
reason = "out of space";
goto out;
goto out1;
fail_raw_read:
reason = "read error";
goto out;
......@@ -405,7 +420,9 @@ int lvm_snapshot_COW(kdev_t org_phys_dev,
goto out;
fail_blksize:
reason = "blocksize error";
goto out;
fail_enomem:
reason = "out of memory";
goto out1;
fail_prepare:
reason = "couldn't prepare kiovec blocks "
......@@ -569,6 +586,7 @@ static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap,
COW_entries_per_pe, COW_chunks_per_pe, COW_entries_per_block;
ulong blocks[1];
kdev_t snap_phys_dev;
struct block_device *bdev;
lv_block_exception_t *be;
struct kiobuf * COW_table_iobuf = lv_snap->lv_COW_table_iobuf;
lv_COW_table_disk_t * lv_COW_table =
......@@ -581,6 +599,8 @@ static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap,
snap_phys_dev = lv_snap->u.lv_block_exception[idx].rdev_new;
snap_pe_start = lv_snap->u.lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->u.lv_chunk_size;
bdev = bdget(kdev_t_to_nr(snap_phys_dev));
blksize_snap = block_size(snap_phys_dev);
COW_entries_per_block = blksize_snap / sizeof(lv_COW_table_disk_t);
......@@ -611,7 +631,7 @@ static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap,
COW_table_iobuf->length = blksize_snap;
if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
if (brw_kiovec(WRITE, 1, &COW_table_iobuf, bdev,
blocks, blksize_snap) != blksize_snap)
goto fail_raw_write;
......@@ -629,25 +649,30 @@ static int _write_COW_table_block(vg_t *vg, lv_t *lv_snap,
idx++;
snap_phys_dev = lv_snap->u.lv_block_exception[idx].rdev_new;
snap_pe_start = lv_snap->u.lv_block_exception[idx - (idx % COW_entries_per_pe)].rsector_new - lv_snap->u.lv_chunk_size;
bdput(bdev);
bdev = bdget(kdev_t_to_nr(snap_phys_dev));
blksize_snap = block_size(snap_phys_dev);
blocks[0] = snap_pe_start >> (blksize_snap >> 10);
} else blocks[0]++;
if (brw_kiovec(WRITE, 1, &COW_table_iobuf, snap_phys_dev,
if (brw_kiovec(WRITE, 1, &COW_table_iobuf, bdev,
blocks, blksize_snap) !=
blksize_snap)
goto fail_raw_write;
}
out:
bdput(bdev);
return 0;
fail_raw_write:
*reason = "write error";
bdput(bdev);
return 1;
fail_pv_get_number:
*reason = "_pv_get_number failed";
bdput(bdev);
return 1;
}
......
......@@ -264,7 +264,7 @@ static int blkmtd_readpage(mtd_raw_dev_data_t *rawdevice, struct page *page)
DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n");
err = brw_kiovec(READ, 1, &iobuf, dev, blocks, rawdevice->sector_size);
err = brw_kiovec(READ, 1, &iobuf, rawdevice->binding, blocks, rawdevice->sector_size);
DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err);
iobuf->locked = 0;
free_kiovec(1, &iobuf);
......@@ -401,7 +401,7 @@ static int write_queue_task(void *data)
iobuf->nr_pages = cpagecnt;
iobuf->length = cursectors << item->rawdevice->sector_bits;
DEBUG(3, "blkmtd: write_task: about to kiovec\n");
err = brw_kiovec(WRITE, 1, &iobuf, dev, blocks, item->rawdevice->sector_size);
err = brw_kiovec(WRITE, 1, &iobuf, item->rawdevice->binding, blocks, item->rawdevice->sector_size);
DEBUG(3, "bklmtd: write_task: done, err = %d\n", err);
if(err != (cursectors << item->rawdevice->sector_bits)) {
/* if an error occured - set this to exit the loop */
......
......@@ -328,7 +328,7 @@ static void bio_end_io_kio(struct bio *bio)
* ll_rw_kio - submit a &struct kiobuf for I/O
* @rw: %READ or %WRITE
* @kio: the kiobuf to do I/O on
* @dev: target device
* @bdev: target device
* @sector: start location on disk
*
* Description:
......@@ -336,11 +336,12 @@ static void bio_end_io_kio(struct bio *bio)
* &struct bio and queue them for I/O. The kiobuf given must describe
* a continous range of data, and must be fully prepared for I/O.
**/
void ll_rw_kio(int rw, struct kiobuf *kio, kdev_t dev, sector_t sector)
void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t sector)
{
int i, offset, size, err, map_i, total_nr_pages, nr_pages;
struct bio_vec *bvec;
struct bio *bio;
kdev_t dev = to_kdev_t(bdev->bd_dev);
err = 0;
if ((rw & WRITE) && is_read_only(dev)) {
......
......@@ -2122,7 +2122,7 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig
}
/* This does not understand multi-device filesystems currently */
retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, blocks, blocksize);
retval = brw_kiovec(rw, 1, &iobuf, inode->i_sb->s_bdev, blocks, blocksize);
out:
return retval;
......@@ -2138,8 +2138,8 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsig
* It is up to the caller to make sure that there are enough blocks
* passed in to completely map the iobufs to disk.
*/
int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], kdev_t dev, sector_t b[],
int size)
int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
struct block_device *bdev, sector_t b[], int size)
{
int transferred;
int i;
......@@ -2167,7 +2167,7 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[], kdev_t dev, sector_t b[],
iobuf = iovec[i];
iobuf->errno = 0;
ll_rw_kio(rw, iobuf, dev, b[i] * (size >> 9));
ll_rw_kio(rw, iobuf, bdev, b[i] * (size >> 9));
}
/*
......
......@@ -80,9 +80,9 @@ extern void free_kiobuf_bhs(struct kiobuf *);
/* fs/buffer.c */
int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
kdev_t dev, sector_t [], int size);
struct block_device *bdev, sector_t [], int size);
/* fs/bio.c */
void ll_rw_kio(int rw, struct kiobuf *kio, kdev_t dev, sector_t block);
void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t block);
#endif /* __LINUX_IOBUF_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment