Commit cc1d7840 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] 2.5.28 small REQ_SPECIAL abstraction

The attached patch does the following:

1. Remove blkdev_release_request(Request); it was an unnecessary wrapper
    around blk_put_request(Request). Likely some leftover from pre-BIO
    time...

2. Abstract out the fine __scsi_insert_special() function out from
    the SCSI code.

    Now that I have finally managed to kill all those IDE 'specific'
    REQ_BLAH request types, we can do this final step, and it will be
    used soon at least by ATA code as well. The goal is that
    scsi_request_fn and do_ide_request should start to look similar
    like silblings.

    Its called blk_insert_request() now and even documented in code.

3. Change some stuff over from extern inline to static inline in
    blkdev.h. (trivia...)

This patch doesn't change *any* functionality, so its not exposing
SCSI to any danger :-).
parent 22a22e0a
...@@ -2884,7 +2884,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller, ...@@ -2884,7 +2884,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
Command->BufferHeader = Request->bio; Command->BufferHeader = Request->bio;
Command->RequestBuffer = Request->buffer; Command->RequestBuffer = Request->buffer;
blkdev_dequeue_request(Request); blkdev_dequeue_request(Request);
blkdev_release_request(Request); blk_put_request(Request);
DAC960_QueueReadWriteCommand(Command); DAC960_QueueReadWriteCommand(Command);
return true; return true;
} }
......
...@@ -1233,9 +1233,47 @@ struct request *__blk_get_request(request_queue_t *q, int rw) ...@@ -1233,9 +1233,47 @@ struct request *__blk_get_request(request_queue_t *q, int rw)
return rq; return rq;
} }
void blk_put_request(struct request *rq) /**
* blk_insert_request - insert a special request in to a request queue
* @q: request queue where request should be inserted
* @rq: request to be inserted
* @at_head: insert request at head or tail of queue
* @data: private data
*
* Description:
* Many block devices need to execute commands asynchronously, so they don't
* block the whole kernel from preemption during request execution. This is
* accomplished normally by inserting aritficial requests tagged as
* REQ_SPECIAL in to the corresponding request queue, and letting them be
* scheduled for actual execution by the request queue.
*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the head
* of the queue for things like a QUEUE_FULL message from a device, or a
* host that is unable to accept a particular command.
*/
void blk_insert_request(request_queue_t *q, struct request *rq,
int at_head, void *data)
{ {
blkdev_release_request(rq); unsigned long flags;
/*
* tell I/O scheduler that this isn't a regular read/write (ie it
* must not attempt merges on this) and that it acts as a soft
* barrier
*/
rq->flags &= REQ_QUEUED;
rq->flags |= REQ_SPECIAL | REQ_BARRIER;
rq->special = data;
spin_lock_irqsave(q->queue_lock, flags);
/* If command is tagged, release the tag */
if(blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
_elv_add_request(q, rq, !at_head, 0);
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
} }
/* RO fail safe mechanism */ /* RO fail safe mechanism */
...@@ -1307,7 +1345,7 @@ static inline void add_request(request_queue_t * q, struct request * req, ...@@ -1307,7 +1345,7 @@ static inline void add_request(request_queue_t * q, struct request * req,
/* /*
* Must be called with queue lock held and interrupts disabled * Must be called with queue lock held and interrupts disabled
*/ */
void blkdev_release_request(struct request *req) void blk_put_request(struct request *req)
{ {
struct request_list *rl = req->rl; struct request_list *rl = req->rl;
request_queue_t *q = req->q; request_queue_t *q = req->q;
...@@ -1370,7 +1408,7 @@ static void attempt_merge(request_queue_t *q, struct request *req, ...@@ -1370,7 +1408,7 @@ static void attempt_merge(request_queue_t *q, struct request *req,
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
blkdev_release_request(next); blk_put_request(next);
} }
} }
...@@ -1568,7 +1606,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1568,7 +1606,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
add_request(q, req, insert_here); add_request(q, req, insert_here);
out: out:
if (freereq) if (freereq)
blkdev_release_request(freereq); blk_put_request(freereq);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return 0; return 0;
...@@ -2003,7 +2041,7 @@ void end_that_request_last(struct request *req) ...@@ -2003,7 +2041,7 @@ void end_that_request_last(struct request *req)
if (req->waiting) if (req->waiting)
complete(req->waiting); complete(req->waiting);
blkdev_release_request(req); blk_put_request(req);
} }
#define MB(kb) ((kb) << 10) #define MB(kb) ((kb) << 10)
...@@ -2064,7 +2102,6 @@ EXPORT_SYMBOL(blk_cleanup_queue); ...@@ -2064,7 +2102,6 @@ EXPORT_SYMBOL(blk_cleanup_queue);
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
EXPORT_SYMBOL(generic_make_request); EXPORT_SYMBOL(generic_make_request);
EXPORT_SYMBOL(blkdev_release_request);
EXPORT_SYMBOL(generic_unplug_device); EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_plug_device); EXPORT_SYMBOL(blk_plug_device);
EXPORT_SYMBOL(blk_remove_plug); EXPORT_SYMBOL(blk_remove_plug);
...@@ -2088,6 +2125,7 @@ EXPORT_SYMBOL(blk_hw_contig_segment); ...@@ -2088,6 +2125,7 @@ EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
EXPORT_SYMBOL(__blk_get_request); EXPORT_SYMBOL(__blk_get_request);
EXPORT_SYMBOL(blk_put_request); EXPORT_SYMBOL(blk_put_request);
EXPORT_SYMBOL(blk_insert_request);
EXPORT_SYMBOL(blk_queue_prep_rq); EXPORT_SYMBOL(blk_queue_prep_rq);
......
...@@ -50,53 +50,6 @@ ...@@ -50,53 +50,6 @@
* This entire source file deals with the new queueing code. * This entire source file deals with the new queueing code.
*/ */
/*
* Function: __scsi_insert_special()
*
* Purpose: worker for scsi_insert_special_*()
*
* Arguments: q - request queue where request should be inserted
* rq - request to be inserted
* data - private data
* at_head - insert request at head or tail of queue
*
* Lock status: Assumed that queue lock is not held upon entry.
*
* Returns: Nothing
*/
static void __scsi_insert_special(request_queue_t *q, struct request *rq,
void *data, int at_head)
{
unsigned long flags;
ASSERT_LOCK(q->queue_lock, 0);
/*
* tell I/O scheduler that this isn't a regular read/write (ie it
* must not attempt merges on this) and that it acts as a soft
* barrier
*/
rq->flags &= REQ_QUEUED;
rq->flags |= REQ_SPECIAL | REQ_BARRIER;
rq->special = data;
/*
* We have the option of inserting the head or the tail of the queue.
* Typically we use the tail for new ioctls and so forth. We use the
* head of the queue for things like a QUEUE_FULL message from a
* device, or a host that is unable to accept a particular command.
*/
spin_lock_irqsave(q->queue_lock, flags);
/* If command is tagged, release the tag */
if(blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
_elv_add_request(q, rq, !at_head, 0);
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
/* /*
* Function: scsi_insert_special_cmd() * Function: scsi_insert_special_cmd()
* *
...@@ -121,7 +74,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head) ...@@ -121,7 +74,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
{ {
request_queue_t *q = &SCpnt->device->request_queue; request_queue_t *q = &SCpnt->device->request_queue;
__scsi_insert_special(q, SCpnt->request, SCpnt, at_head); blk_insert_request(q, SCpnt->request, at_head, SCpnt);
return 0; return 0;
} }
...@@ -149,7 +102,7 @@ int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head) ...@@ -149,7 +102,7 @@ int scsi_insert_special_req(Scsi_Request * SRpnt, int at_head)
{ {
request_queue_t *q = &SRpnt->sr_device->request_queue; request_queue_t *q = &SRpnt->sr_device->request_queue;
__scsi_insert_special(q, SRpnt->sr_request, SRpnt, at_head); blk_insert_request(q, SRpnt->sr_request, at_head, SRpnt);
return 0; return 0;
} }
......
...@@ -281,12 +281,13 @@ extern int wipe_partitions(kdev_t dev); ...@@ -281,12 +281,13 @@ extern int wipe_partitions(kdev_t dev);
extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
extern void generic_make_request(struct bio *bio); extern void generic_make_request(struct bio *bio);
extern inline request_queue_t *bdev_get_queue(struct block_device *bdev); extern inline request_queue_t *bdev_get_queue(struct block_device *bdev);
extern void blkdev_release_request(struct request *); extern void blk_put_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *); extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern void __blk_attempt_remerge(request_queue_t *, struct request *); extern void __blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int); extern struct request *blk_get_request(request_queue_t *, int, int);
extern struct request *__blk_get_request(request_queue_t *, int); extern struct request *__blk_get_request(request_queue_t *, int);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_plug_device(request_queue_t *); extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *); extern int blk_remove_plug(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *); extern void blk_recount_segments(request_queue_t *, struct bio *);
...@@ -309,20 +310,21 @@ extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *); ...@@ -309,20 +310,21 @@ extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(request_queue_t *); extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64); extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void blk_queue_max_sectors(request_queue_t *q, unsigned short); extern void blk_queue_max_sectors(request_queue_t *, unsigned short);
extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short); extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short); extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int); extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short); extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long); extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *); extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn); extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
extern void generic_unplug_device(void *); extern void generic_unplug_device(void *);
/* /*
* tag stuff * tag stuff
*/ */
...@@ -348,15 +350,12 @@ extern int * blk_size[MAX_BLKDEV]; /* in units of 1024 bytes */ ...@@ -348,15 +350,12 @@ extern int * blk_size[MAX_BLKDEV]; /* in units of 1024 bytes */
extern void drive_stat_acct(struct request *, int, int); extern void drive_stat_acct(struct request *, int, int);
extern inline void blk_clear(int major) static inline void blk_clear(int major)
{ {
blk_size[major] = NULL; blk_size[major] = NULL;
#if 0
blk_size_in_bytes[major] = NULL;
#endif
} }
extern inline int queue_hardsect_size(request_queue_t *q) static inline int queue_hardsect_size(request_queue_t *q)
{ {
int retval = 512; int retval = 512;
...@@ -366,7 +365,7 @@ extern inline int queue_hardsect_size(request_queue_t *q) ...@@ -366,7 +365,7 @@ extern inline int queue_hardsect_size(request_queue_t *q)
return retval; return retval;
} }
extern inline int bdev_hardsect_size(struct block_device *bdev) static inline int bdev_hardsect_size(struct block_device *bdev)
{ {
return queue_hardsect_size(bdev_get_queue(bdev)); return queue_hardsect_size(bdev_get_queue(bdev));
} }
...@@ -375,7 +374,7 @@ extern inline int bdev_hardsect_size(struct block_device *bdev) ...@@ -375,7 +374,7 @@ extern inline int bdev_hardsect_size(struct block_device *bdev)
#define blk_started_io(nsects) do { } while (0) #define blk_started_io(nsects) do { } while (0)
/* assumes size > 256 */ /* assumes size > 256 */
extern inline unsigned int blksize_bits(unsigned int size) static inline unsigned int blksize_bits(unsigned int size)
{ {
unsigned int bits = 8; unsigned int bits = 8;
do { do {
......
...@@ -61,7 +61,7 @@ nbd_end_request(struct request *req) ...@@ -61,7 +61,7 @@ nbd_end_request(struct request *req)
bio->bi_next = NULL; bio->bi_next = NULL;
bio_endio(bio, uptodate); bio_endio(bio, uptodate);
} }
blkdev_release_request(req); blk_put_request(req);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment