Commit 33c66485 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Create `kblockd' workqueue

keventd is inappropriate for running block request queues because keventd
itself can get blocked on disk I/O.  Via call_usermodehelper()'s vfork and,
presumably, GFP_KERNEL allocations.

So create a new gang of kernel threads whose mandate is for running low-level
disk operations.  It must ever block on disk IO, so any memory allocations
should be GFP_NOIO.

We mainly use it for running unplug operations from interrupt context.
parent 3abbd8ff
...@@ -8,6 +8,11 @@ ...@@ -8,6 +8,11 @@
# In the future, some of these should be built conditionally. # In the future, some of these should be built conditionally.
# #
#
# NOTE that ll_rw_blk.c must come early in linkage order - it starts the
# kblockd threads
#
obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o deadline-iosched.o obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o deadline-iosched.o
obj-$(CONFIG_MAC_FLOPPY) += swim3.o obj-$(CONFIG_MAC_FLOPPY) += swim3.o
......
...@@ -48,9 +48,15 @@ static spinlock_t blk_plug_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; ...@@ -48,9 +48,15 @@ static spinlock_t blk_plug_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
*/ */
static int queue_nr_requests; static int queue_nr_requests;
unsigned long blk_max_low_pfn, blk_max_pfn;
static wait_queue_head_t congestion_wqh[2]; static wait_queue_head_t congestion_wqh[2];
/*
* Controlling structure to kblockd
*/
static struct workqueue_struct *kblockd_workqueue;
unsigned long blk_max_low_pfn, blk_max_pfn;
static inline int batch_requests(void) static inline int batch_requests(void)
{ {
return min(BLKDEV_MAX_RQ / 8, 8); return min(BLKDEV_MAX_RQ / 8, 8);
...@@ -2308,10 +2314,24 @@ void blk_rq_prep_restart(struct request *rq) ...@@ -2308,10 +2314,24 @@ void blk_rq_prep_restart(struct request *rq)
rq->current_nr_sectors = rq->hard_cur_sectors; rq->current_nr_sectors = rq->hard_cur_sectors;
} }
int kblockd_schedule_work(struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
}
void kblockd_flush(void)
{
flush_workqueue(kblockd_workqueue);
}
int __init blk_dev_init(void) int __init blk_dev_init(void)
{ {
int i; int i;
kblockd_workqueue = create_workqueue("kblockd");
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
request_cachep = kmem_cache_create("blkdev_requests", request_cachep = kmem_cache_create("blkdev_requests",
sizeof(struct request), 0, 0, NULL, NULL); sizeof(struct request), 0, 0, NULL, NULL);
if (!request_cachep) if (!request_cachep)
...@@ -2331,7 +2351,7 @@ int __init blk_dev_init(void) ...@@ -2331,7 +2351,7 @@ int __init blk_dev_init(void)
for (i = 0; i < ARRAY_SIZE(congestion_wqh); i++) for (i = 0; i < ARRAY_SIZE(congestion_wqh); i++)
init_waitqueue_head(&congestion_wqh[i]); init_waitqueue_head(&congestion_wqh[i]);
return 0; return 0;
}; }
EXPORT_SYMBOL(process_that_request_first); EXPORT_SYMBOL(process_that_request_first);
EXPORT_SYMBOL(end_that_request_first); EXPORT_SYMBOL(end_that_request_first);
......
...@@ -561,6 +561,10 @@ static inline void put_dev_sector(Sector p) ...@@ -561,6 +561,10 @@ static inline void put_dev_sector(Sector p)
page_cache_release(p.v); page_cache_release(p.v);
} }
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
void kblockd_flush(void);
#ifdef CONFIG_LBD #ifdef CONFIG_LBD
# include <asm/div64.h> # include <asm/div64.h>
# define sector_div(a, b) do_div(a, b) # define sector_div(a, b) do_div(a, b)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment