Commit b35ed010 authored by Stephen Lord's avatar Stephen Lord Committed by Nathan Scott

[XFS] clean up use of run_task_queue in xfs

SGI Modid: 2.5.x-xfs:slinx:133254a
parent 895b494c
......@@ -1124,7 +1124,7 @@ _pagebuf_wait_unpin(
if (atomic_read(&PBP(pb)->pb_pin_count) == 0) {
break;
}
blk_run_queues();
pagebuf_run_task_queue(pb);
schedule();
}
remove_wait_queue(&PBP(pb)->pb_waiters, &wait);
......@@ -1162,7 +1162,8 @@ pagebuf_iodone_work(
void
pagebuf_iodone(
page_buf_t *pb)
page_buf_t *pb,
int schedule)
{
pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
if (pb->pb_error == 0) {
......@@ -1172,8 +1173,12 @@ pagebuf_iodone(
PB_TRACE(pb, PB_TRACE_REC(done), pb->pb_iodone);
if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
queue_work(pagebuf_workqueue, &pb->pb_iodone_work);
if (schedule) {
INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
queue_work(pagebuf_workqueue, &pb->pb_iodone_work);
} else {
pagebuf_iodone_work(pb);
}
} else {
up(&pb->pb_iodonesema);
}
......@@ -1291,9 +1296,9 @@ bio_end_io_pagebuf(
}
}
if (atomic_dec_and_test(&PBP(pb)->pb_io_remaining)) {
if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
pb->pb_locked = 0;
pagebuf_iodone(pb);
pagebuf_iodone(pb, 1);
}
bio_put(bio);
......@@ -1345,7 +1350,7 @@ pagebuf_iorequest( /* start real I/O */
* completion callout which happens before we have started
* all the I/O from calling iodone too early
*/
atomic_set(&PBP(pb)->pb_io_remaining, 1);
atomic_set(&pb->pb_io_remaining, 1);
/* Special code path for reading a sub page size pagebuf in --
* we populate up the whole page, and hence the other metadata
......@@ -1369,7 +1374,7 @@ pagebuf_iorequest( /* start real I/O */
bvec->bv_len = PAGE_CACHE_SIZE;
bvec->bv_offset = 0;
atomic_inc(&PBP(pb)->pb_io_remaining);
atomic_inc(&pb->pb_io_remaining);
submit_bio(READ, bio);
goto io_submitted;
......@@ -1402,7 +1407,7 @@ pagebuf_iorequest( /* start real I/O */
map_i = 0;
next_chunk:
atomic_inc(&PBP(pb)->pb_io_remaining);
atomic_inc(&pb->pb_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages;
......@@ -1444,10 +1449,8 @@ pagebuf_iorequest( /* start real I/O */
io_submitted:
if (atomic_dec_and_test(&PBP(pb)->pb_io_remaining) == 1) {
pagebuf_iodone(pb);
} else if ((pb->pb_flags & (PBF_SYNC|PBF_ASYNC)) == PBF_SYNC) {
blk_run_queues();
if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
pagebuf_iodone(pb, 0);
}
return status < 0 ? status : 0;
......@@ -1465,7 +1468,7 @@ pagebuf_iowait(
page_buf_t *pb)
{
PB_TRACE(pb, PB_TRACE_REC(iowait), 0);
blk_run_queues();
pagebuf_run_task_queue(pb);
down(&pb->pb_iodonesema);
PB_TRACE(pb, PB_TRACE_REC(iowaited), (int)pb->pb_error);
return pb->pb_error;
......@@ -1709,10 +1712,10 @@ pagebuf_daemon(
__pagebuf_iorequest(pb);
}
if (count)
blk_run_queues();
if (as_list_len > 0)
purge_addresses();
if (count)
pagebuf_run_task_queue(NULL);
force_flush = 0;
} while (pb_daemon->active == 1);
......@@ -1783,7 +1786,7 @@ pagebuf_delwri_flush(
spin_unlock(&pb_daemon->pb_delwrite_lock);
blk_run_queues();
pagebuf_run_task_queue(NULL);
if (pinptr)
*pinptr = pincount;
......
......@@ -207,6 +207,7 @@ typedef struct page_buf_s {
size_t pb_count_desired; /* desired transfer size */
void *pb_addr; /* virtual address of buffer */
struct work_struct pb_iodone_work;
atomic_t pb_io_remaining;/* #outstanding I/O requests */
page_buf_iodone_t pb_iodone; /* I/O completion function */
page_buf_relse_t pb_relse; /* releasing function */
page_buf_bdstrat_t pb_strat; /* pre-write function */
......@@ -306,7 +307,9 @@ static inline int pagebuf_geterror(page_buf_t *pb)
}
extern void pagebuf_iodone( /* mark buffer I/O complete */
page_buf_t *); /* buffer to mark */
page_buf_t *, /* buffer to mark */
int); /* run completion locally, or in
* a helper thread. */
extern void pagebuf_ioerror( /* mark buffer in error (or not) */
page_buf_t *, /* buffer to mark */
......@@ -375,6 +378,14 @@ static __inline__ int __pagebuf_iorequest(page_buf_t *pb)
return pagebuf_iorequest(pb);
}
static __inline__ void pagebuf_run_task_queue(page_buf_t *pb)
{
if (pb && (atomic_read(&pb->pb_io_remaining) == 0))
return;
blk_run_queues();
}
extern struct workqueue_struct *pagebuf_workqueue;
#endif /* __PAGE_BUF_H__ */
......@@ -52,7 +52,6 @@ typedef struct page_buf_private_s {
page_buf_t pb_common; /* public part of structure */
struct semaphore pb_sema; /* semaphore for lockables */
unsigned long pb_flushtime; /* time to flush pagebuf */
atomic_t pb_io_remaining;/* #outstanding I/O requests */
atomic_t pb_pin_count; /* pin count */
wait_queue_head_t pb_waiters; /* unpin waiters */
#ifdef PAGEBUF_LOCK_TRACKING
......
......@@ -113,8 +113,7 @@ pagebuf_lock(
ASSERT(pb->pb_flags & _PBF_LOCKABLE);
PB_TRACE(pb, PB_TRACE_REC(lock), 0);
if (atomic_read(&PBP(pb)->pb_io_remaining))
blk_run_queues();
pagebuf_run_task_queue(pb);
down(&PBP(pb)->pb_sema);
PB_SET_OWNER(pb);
PB_TRACE(pb, PB_TRACE_REC(locked), 0);
......
......@@ -210,7 +210,7 @@ static inline int xfs_bawrite(void *mp, page_buf_t *bp)
bp->pb_strat = xfs_bdstrat_cb;
xfs_buf_undelay(bp);
if ((ret = pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC)) == 0)
blk_run_queues();
pagebuf_run_task_queue(bp);
return ret;
}
......@@ -236,7 +236,7 @@ static inline void xfs_buf_relse(page_buf_t *bp)
#define xfs_biodone(pb) \
pagebuf_iodone(pb)
pagebuf_iodone(pb, 0)
#define xfs_incore(buftarg,blkno,len,lockit) \
pagebuf_find(buftarg, blkno ,len, lockit)
......@@ -265,7 +265,7 @@ static inline int XFS_bwrite(page_buf_t *pb)
error = pagebuf_iowait(pb);
xfs_buf_relse(pb);
} else {
blk_run_queues();
pagebuf_run_task_queue(pb);
error = 0;
}
......
......@@ -1824,7 +1824,8 @@ kdbm_pb(int argc, const char **argv, const char **envp, struct pt_regs *regs)
bp.pb_common.pb_bn,
(unsigned long) bp.pb_common.pb_count_desired);
kdb_printf(" pb_io_remaining %d pb_error %u\n",
bp.pb_io_remaining.counter, bp.pb_common.pb_error);
bp.pb_common.pb_io_remaining.counter,
bp.pb_common.pb_error);
kdb_printf(" pb_page_count %u pb_offset 0x%x pb_pages 0x%p\n",
bp.pb_common.pb_page_count, bp.pb_common.pb_offset,
bp.pb_common.pb_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment