Commit df87ea9f authored by Alexander Viro's avatar Alexander Viro Committed by Linus Torvalds

[PATCH] kdev_t -> bdev cleanups [1/2]

 - kill the last caller of get_hardsect_size() (switching it to
   bdev_hardsect_size()).

 - kill blk_get_queue() and switching to bdev_get_queue()
parent 777e760f
...@@ -260,7 +260,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg) ...@@ -260,7 +260,7 @@ int blk_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
(long *)arg); (long *)arg);
case BLKSECTGET: case BLKSECTGET:
if ((q = blk_get_queue(dev)) == NULL) if ((q = bdev_get_queue(bdev)) == NULL)
return -EINVAL; return -EINVAL;
usval = q->max_sectors; usval = q->max_sectors;
......
...@@ -56,7 +56,7 @@ int block_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg) ...@@ -56,7 +56,7 @@ int block_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg)
struct request *rq; struct request *rq;
int close = 0, err; int close = 0, err;
q = blk_get_queue(to_kdev_t(bdev->bd_dev)); q = bdev_get_queue(bdev);
if (!q) if (!q)
return -ENXIO; return -ENXIO;
......
...@@ -80,8 +80,8 @@ unsigned long blk_max_low_pfn, blk_max_pfn; ...@@ -80,8 +80,8 @@ unsigned long blk_max_low_pfn, blk_max_pfn;
int blk_nohighio = 0; int blk_nohighio = 0;
/** /**
* blk_get_queue: - return the queue that matches the given device * bdev_get_queue: - return the queue that matches the given device
* @dev: device * @bdev: device
* *
* Description: * Description:
* Given a specific device, return the queue that will hold I/O * Given a specific device, return the queue that will hold I/O
...@@ -90,12 +90,12 @@ int blk_nohighio = 0; ...@@ -90,12 +90,12 @@ int blk_nohighio = 0;
* stored in the same location. * stored in the same location.
* *
**/ **/
inline request_queue_t *blk_get_queue(kdev_t dev) inline request_queue_t *bdev_get_queue(struct block_device *bdev)
{ {
struct blk_dev_struct *bdev = blk_dev + major(dev); kdev_t dev = to_kdev_t(bdev->bd_dev);
struct blk_dev_struct *p = blk_dev + major(dev);
if (bdev->queue) if (p->queue)
return bdev->queue(dev); return p->queue(dev);
else else
return &blk_dev[major(dev)].request_queue; return &blk_dev[major(dev)].request_queue;
} }
...@@ -112,7 +112,7 @@ inline request_queue_t *blk_get_queue(kdev_t dev) ...@@ -112,7 +112,7 @@ inline request_queue_t *blk_get_queue(kdev_t dev)
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{ {
struct backing_dev_info *ret = NULL; struct backing_dev_info *ret = NULL;
request_queue_t *q = blk_get_queue(to_kdev_t(bdev->bd_dev)); request_queue_t *q = bdev_get_queue(bdev);
if (q) if (q)
ret = &q->backing_dev_info; ret = &q->backing_dev_info;
...@@ -1482,7 +1482,7 @@ void generic_make_request(struct bio *bio) ...@@ -1482,7 +1482,7 @@ void generic_make_request(struct bio *bio)
* Stacking drivers are expected to know what they are doing. * Stacking drivers are expected to know what they are doing.
*/ */
do { do {
q = blk_get_queue(to_kdev_t(bio->bi_bdev->bd_dev)); q = bdev_get_queue(bio->bi_bdev);
if (!q) { if (!q) {
printk(KERN_ERR printk(KERN_ERR
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n", "generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
...@@ -1885,7 +1885,7 @@ int __init blk_dev_init(void) ...@@ -1885,7 +1885,7 @@ int __init blk_dev_init(void)
EXPORT_SYMBOL(end_that_request_first); EXPORT_SYMBOL(end_that_request_first);
EXPORT_SYMBOL(end_that_request_last); EXPORT_SYMBOL(end_that_request_last);
EXPORT_SYMBOL(blk_init_queue); EXPORT_SYMBOL(blk_init_queue);
EXPORT_SYMBOL(blk_get_queue); EXPORT_SYMBOL(bdev_get_queue);
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
......
...@@ -2285,7 +2285,7 @@ static int hot_generate_error(mddev_t * mddev, kdev_t dev) ...@@ -2285,7 +2285,7 @@ static int hot_generate_error(mddev_t * mddev, kdev_t dev)
if (!disk_active(disk)) if (!disk_active(disk))
return -ENODEV; return -ENODEV;
q = blk_get_queue(rdev->dev); q = bdev_get_queue(rdev->bdev);
if (!q) { if (!q) {
MD_BUG(); MD_BUG();
return -ENODEV; return -ENODEV;
......
...@@ -1241,11 +1241,6 @@ static int sd_init() ...@@ -1241,11 +1241,6 @@ static int sd_init()
sd_max_sectors[k] = MAX_PHYS_SEGMENTS*8; sd_max_sectors[k] = MAX_PHYS_SEGMENTS*8;
} }
for (k = 0; k < N_USED_SD_MAJORS; k++) {
request_queue_t *q = blk_get_queue(mk_kdev(SD_MAJOR(k), 0));
blk_queue_hardsect_size(q, 512);
}
for (k = 0; k < N_USED_SD_MAJORS; k++) { for (k = 0; k < N_USED_SD_MAJORS; k++) {
int N = SCSI_DISKS_PER_MAJOR; int N = SCSI_DISKS_PER_MAJOR;
......
...@@ -56,14 +56,13 @@ static void kill_bdev(struct block_device *bdev) ...@@ -56,14 +56,13 @@ static void kill_bdev(struct block_device *bdev)
int set_blocksize(struct block_device *bdev, int size) int set_blocksize(struct block_device *bdev, int size)
{ {
int oldsize; int oldsize;
kdev_t dev = to_kdev_t(bdev->bd_dev);
/* Size must be a power of two, and between 512 and PAGE_SIZE */ /* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || (size & (size-1))) if (size > PAGE_SIZE || size < 512 || (size & (size-1)))
return -EINVAL; return -EINVAL;
/* Size cannot be smaller than the size supported by the device */ /* Size cannot be smaller than the size supported by the device */
if (size < get_hardsect_size(dev)) if (size < bdev_hardsect_size(bdev))
return -EINVAL; return -EINVAL;
oldsize = bdev->bd_block_size; oldsize = bdev->bd_block_size;
......
...@@ -293,7 +293,7 @@ extern void grok_partitions(kdev_t dev, long size); ...@@ -293,7 +293,7 @@ extern void grok_partitions(kdev_t dev, long size);
extern int wipe_partitions(kdev_t dev); extern int wipe_partitions(kdev_t dev);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(struct bio *bio); extern void generic_make_request(struct bio *bio);
extern inline request_queue_t *blk_get_queue(kdev_t dev); extern inline request_queue_t *bdev_get_queue(struct block_device *bdev);
extern void blkdev_release_request(struct request *); extern void blkdev_release_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *); extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int); extern struct request *blk_get_request(request_queue_t *, int, int);
...@@ -373,14 +373,9 @@ extern inline int queue_hardsect_size(request_queue_t *q) ...@@ -373,14 +373,9 @@ extern inline int queue_hardsect_size(request_queue_t *q)
return retval; return retval;
} }
extern inline int get_hardsect_size(kdev_t dev)
{
return queue_hardsect_size(blk_get_queue(dev));
}
extern inline int bdev_hardsect_size(struct block_device *bdev) extern inline int bdev_hardsect_size(struct block_device *bdev)
{ {
return queue_hardsect_size(blk_get_queue(to_kdev_t(bdev->bd_dev))); return queue_hardsect_size(bdev_get_queue(bdev));
} }
#define blk_finished_io(nsects) do { } while (0) #define blk_finished_io(nsects) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment