Commit 98e6f109 authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds

[PATCH] kill old stuff

and fix the start/stop thing as well. I think this is all of them.
parent 20189bf5
...@@ -1961,7 +1961,7 @@ static void do_cciss_request(request_queue_t *q) ...@@ -1961,7 +1961,7 @@ static void do_cciss_request(request_queue_t *q)
goto queue; goto queue;
startio: startio:
__blk_stop_queue(q); blk_stop_queue(q);
start_io(h); start_io(h);
} }
...@@ -2021,8 +2021,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) ...@@ -2021,8 +2021,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
/* /*
* See if we can queue up some more IO * See if we can queue up some more IO
*/ */
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
blk_start_queue(&h->queue); blk_start_queue(&h->queue);
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* /*
......
...@@ -391,12 +391,6 @@ void blk_queue_dma_alignment(request_queue_t *q, int mask) ...@@ -391,12 +391,6 @@ void blk_queue_dma_alignment(request_queue_t *q, int mask)
q->dma_alignment = mask; q->dma_alignment = mask;
} }
void blk_queue_assign_lock(request_queue_t *q, spinlock_t *lock)
{
spin_lock_init(lock);
q->queue_lock = lock;
}
/** /**
* blk_queue_find_tag - find a request by its tag and queue * blk_queue_find_tag - find a request by its tag and queue
* *
...@@ -1076,30 +1070,12 @@ static void blk_unplug_timeout(unsigned long data) ...@@ -1076,30 +1070,12 @@ static void blk_unplug_timeout(unsigned long data)
* blk_start_queue() will clear the stop flag on the queue, and call * blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when * the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Must not be called from driver * entered. Also see blk_stop_queue(). Must not be called from driver
* request function due to recursion issues. * request function due to recursion issues. Queue lock must be held.
**/ **/
void blk_start_queue(request_queue_t *q) void blk_start_queue(request_queue_t *q)
{ {
if (test_and_clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) { if (test_and_clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
unsigned long flags; schedule_work(&q->unplug_work);
spin_lock_irqsave(q->queue_lock, flags);
if (!elv_queue_empty(q))
q->request_fn(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
/**
* __blk_stop_queue: see blk_stop_queue()
*
* Description:
* Like blk_stop_queue(), but queue_lock must be held
**/
void __blk_stop_queue(request_queue_t *q)
{
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
} }
/** /**
...@@ -1114,15 +1090,12 @@ void __blk_stop_queue(request_queue_t *q) ...@@ -1114,15 +1090,12 @@ void __blk_stop_queue(request_queue_t *q)
* or if it simply chooses not to queue more I/O at one point, it can * or if it simply chooses not to queue more I/O at one point, it can
* call this function to prevent the request_fn from being called until * call this function to prevent the request_fn from being called until
* the driver has signalled it's ready to go again. This happens by calling * the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. * blk_start_queue() to restart queue operations. Queue lock must be held.
**/ **/
void blk_stop_queue(request_queue_t *q) void blk_stop_queue(request_queue_t *q)
{ {
unsigned long flags; blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
spin_lock_irqsave(q->queue_lock, flags);
__blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
} }
/** /**
...@@ -2364,7 +2337,6 @@ EXPORT_SYMBOL(blk_rq_map_sg); ...@@ -2364,7 +2337,6 @@ EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio); EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
EXPORT_SYMBOL(submit_bio); EXPORT_SYMBOL(submit_bio);
EXPORT_SYMBOL(blk_queue_assign_lock);
EXPORT_SYMBOL(blk_phys_contig_segment); EXPORT_SYMBOL(blk_phys_contig_segment);
EXPORT_SYMBOL(blk_hw_contig_segment); EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
...@@ -2383,7 +2355,6 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); ...@@ -2383,7 +2355,6 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags);
EXPORT_SYMBOL(blk_start_queue); EXPORT_SYMBOL(blk_start_queue);
EXPORT_SYMBOL(blk_stop_queue); EXPORT_SYMBOL(blk_stop_queue);
EXPORT_SYMBOL(__blk_stop_queue);
EXPORT_SYMBOL(blk_run_queue); EXPORT_SYMBOL(blk_run_queue);
EXPORT_SYMBOL(blk_run_queues); EXPORT_SYMBOL(blk_run_queues);
......
...@@ -431,7 +431,6 @@ extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); ...@@ -431,7 +431,6 @@ extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int); extern void blk_queue_dma_alignment(request_queue_t *, int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment