Commit 2cd8916b authored by Alexander Viro's avatar Alexander Viro Committed by Linus Torvalds

[PATCH] (6/6) blksize_size[] removal

 - switch blk_get_ra_pages() to struct block_device *, move its use
   from bdget() to do_open().
parent de628566
......@@ -241,7 +241,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
ra_pages = blk_get_ra_pages(dev);
ra_pages = blk_get_ra_pages(bdev);
if (ra_pages == NULL)
return -ENOTTY;
*ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
......@@ -251,7 +251,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
case BLKFRAGET:
if (!arg)
return -EINVAL;
ra_pages = blk_get_ra_pages(dev);
ra_pages = blk_get_ra_pages(bdev);
if (ra_pages == NULL)
return -ENOTTY;
return put_user((*ra_pages * PAGE_CACHE_SIZE) / 512,
......
......@@ -108,10 +108,10 @@ inline request_queue_t *blk_get_queue(kdev_t dev)
*
* Will return NULL if the request queue cannot be located.
*/
unsigned long *blk_get_ra_pages(kdev_t dev)
unsigned long *blk_get_ra_pages(struct block_device *bdev)
{
unsigned long *ret = NULL;
request_queue_t *q = blk_get_queue(dev);
request_queue_t *q = blk_get_queue(to_kdev_t(bdev->bd_dev));
if (q)
ret = &q->ra_pages;
......
......@@ -1578,7 +1578,7 @@ static int device_size_calculation(mddev_t * mddev)
md_size[mdidx(mddev)] = sb->size * data_disks;
readahead = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
ra_pages = blk_get_ra_pages(rdev->dev);
ra_pages = blk_get_ra_pages(rdev->bdev);
if (ra_pages)
readahead = (*ra_pages * PAGE_CACHE_SIZE) / PAGE_SIZE;
if (!sb->level || (sb->level == 4) || (sb->level == 5)) {
......
......@@ -319,7 +319,6 @@ struct block_device *bdget(dev_t dev)
struct inode *inode = new_inode(bd_mnt->mnt_sb);
if (inode) {
kdev_t kdev = to_kdev_t(dev);
unsigned long *ra_pages;
atomic_set(&new_bdev->bd_count,1);
new_bdev->bd_dev = dev;
......@@ -332,10 +331,7 @@ struct block_device *bdget(dev_t dev)
inode->i_bdev = new_bdev;
inode->i_data.a_ops = &def_blk_aops;
inode->i_data.gfp_mask = GFP_USER;
ra_pages = blk_get_ra_pages(kdev);
if (ra_pages == NULL)
ra_pages = &default_ra_pages;
inode->i_data.ra_pages = ra_pages;
inode->i_data.ra_pages = &default_ra_pages;
spin_lock(&bdev_lock);
bdev = bdfind(dev, head);
if (!bdev) {
......@@ -598,6 +594,12 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
}
}
}
if (bdev->bd_inode->i_data.ra_pages == &default_ra_pages) {
unsigned long *ra_pages = blk_get_ra_pages(bdev);
if (ra_pages == NULL)
ra_pages = &default_ra_pages;
inode->i_data.ra_pages = ra_pages;
}
if (bdev->bd_op->open) {
ret = bdev->bd_op->open(inode, file);
if (ret)
......@@ -622,6 +624,7 @@ static int do_open(struct block_device *bdev, struct inode *inode, struct file *
out2:
if (!bdev->bd_openers) {
bdev->bd_op = NULL;
bdev->bd_inode->i_data.ra_pages = &default_ra_pages;
if (bdev != bdev->bd_contains) {
blkdev_put(bdev->bd_contains, BDEV_RAW);
bdev->bd_contains = NULL;
......@@ -695,6 +698,7 @@ int blkdev_put(struct block_device *bdev, int kind)
__MOD_DEC_USE_COUNT(bdev->bd_op->owner);
if (!bdev->bd_openers) {
bdev->bd_op = NULL;
bdev->bd_inode->i_data.ra_pages = &default_ra_pages;
if (bdev != bdev->bd_contains) {
blkdev_put(bdev->bd_contains, BDEV_RAW);
bdev->bd_contains = NULL;
......
......@@ -310,7 +310,7 @@ extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn);
extern unsigned long *blk_get_ra_pages(kdev_t kdev);
extern unsigned long *blk_get_ra_pages(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment