Commit 03ba3782 authored by Jens Axboe's avatar Jens Axboe

writeback: switch to per-bdi threads for flushing data

This gets rid of pdflush for bdi writeout and kupdated style cleaning.
pdflush writeout suffers from lack of locality and also requires more
threads to handle the same workload, since it has to work in a
non-blocking fashion against each queue. This also introduces lumpy
behaviour and potential request starvation, since pdflush can be starved
for queue access if others are accessing it. A sample ffsb workload that
does random writes to files is about 8% faster here on a simple SATA drive
during the benchmark phase. File layout also seems a LOT more smooth in
vmstat:

 r  b   swpd   free   buff  cache   si   so    bi    bo   in    cs us sy id wa
 0  1      0 608848   2652 375372    0    0     0 71024  604    24  1 10 48 42
 0  1      0 549644   2712 433736    0    0     0 60692  505    27  1  8 48 44
 1  0      0 476928   2784 505192    0    0     4 29540  553    24  0  9 53 37
 0  1      0 457972   2808 524008    0    0     0 54876  331    16  0  4 38 58
 0  1      0 366128   2928 614284    0    0     4 92168  710    58  0 13 53 34
 0  1      0 295092   3000 684140    0    0     0 62924  572    23  0  9 53 37
 0  1      0 236592   3064 741704    0    0     4 58256  523    17  0  8 48 44
 0  1      0 165608   3132 811464    0    0     0 57460  560    21  0  8 54 38
 0  1      0 102952   3200 873164    0    0     4 74748  540    29  1 10 48 41
 0  1      0  48604   3252 926472    0    0     0 53248  469    29  0  7 47 45

where vanilla tends to fluctuate a lot in the creation phase:

 r  b   swpd   free   buff  cache   si   so    bi    bo   in    cs us sy id wa
 1  1      0 678716   5792 303380    0    0     0 74064  565    50  1 11 52 36
 1  0      0 662488   5864 319396    0    0     4   352  302   329  0  2 47 51
 0  1      0 599312   5924 381468    0    0     0 78164  516    55  0  9 51 40
 0  1      0 519952   6008 459516    0    0     4 78156  622    56  1 11 52 37
 1  1      0 436640   6092 541632    0    0     0 82244  622    54  0 11 48 41
 0  1      0 436640   6092 541660    0    0     0     8  152    39  0  0 51 49
 0  1      0 332224   6200 644252    0    0     4 102800  728    46  1 13 49 36
 1  0      0 274492   6260 701056    0    0     4 12328  459    49  0  7 50 43
 0  1      0 211220   6324 763356    0    0     0 106940  515    37  1 10 51 39
 1  0      0 160412   6376 813468    0    0     0  8224  415    43  0  6 49 45
 1  1      0  85980   6452 886556    0    0     4 113516  575    39  1 11 54 34
 0  2      0  85968   6452 886620    0    0     0  1640  158   211  0  0 46 54

A 10 disk test with btrfs performs 26% faster with per-bdi flushing. A
SSD based writeback test on XFS performs over 20% better as well, with
the throughput being very stable around 1GB/sec, where pdflush only
manages 750MB/sec and fluctuates wildly while doing so. Random buffered
writes to many files behave a lot better as well, as does random mmap'ed
writes.

A separate thread is added to sync the super blocks. In the long term,
adding sync_supers_bdi() functionality could get rid of this thread again.
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 66f3b8e2
...@@ -281,7 +281,7 @@ static void free_more_memory(void) ...@@ -281,7 +281,7 @@ static void free_more_memory(void)
struct zone *zone; struct zone *zone;
int nid; int nid;
wakeup_pdflush(1024); wakeup_flusher_threads(1024);
yield(); yield();
for_each_online_node(nid) { for_each_online_node(nid) {
......
This diff is collapsed.
...@@ -168,7 +168,7 @@ int __put_super_and_need_restart(struct super_block *sb) ...@@ -168,7 +168,7 @@ int __put_super_and_need_restart(struct super_block *sb)
* Drops a temporary reference, frees superblock if there's no * Drops a temporary reference, frees superblock if there's no
* references left. * references left.
*/ */
static void put_super(struct super_block *sb) void put_super(struct super_block *sb)
{ {
spin_lock(&sb_lock); spin_lock(&sb_lock);
__put_super(sb); __put_super(sb);
......
...@@ -120,7 +120,7 @@ static void sync_filesystems(int wait) ...@@ -120,7 +120,7 @@ static void sync_filesystems(int wait)
*/ */
SYSCALL_DEFINE0(sync) SYSCALL_DEFINE0(sync)
{ {
wakeup_pdflush(0); wakeup_flusher_threads(0);
sync_filesystems(0); sync_filesystems(0);
sync_filesystems(1); sync_filesystems(1);
if (unlikely(laptop_mode)) if (unlikely(laptop_mode))
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/proportions.h> #include <linux/proportions.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <asm/atomic.h> #include <asm/atomic.h>
struct page; struct page;
...@@ -23,7 +25,8 @@ struct dentry; ...@@ -23,7 +25,8 @@ struct dentry;
* Bits in backing_dev_info.state * Bits in backing_dev_info.state
*/ */
enum bdi_state { enum bdi_state {
BDI_pdflush, /* A pdflush thread is working this device */ BDI_pending, /* On its way to being activated */
BDI_wb_alloc, /* Default embedded wb allocated */
BDI_async_congested, /* The async (write) queue is getting full */ BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */ BDI_sync_congested, /* The sync queue is getting full */
BDI_unused, /* Available bits start here */ BDI_unused, /* Available bits start here */
...@@ -39,9 +42,22 @@ enum bdi_stat_item { ...@@ -39,9 +42,22 @@ enum bdi_stat_item {
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
struct bdi_writeback {
struct list_head list; /* hangs off the bdi */
struct backing_dev_info *bdi; /* our parent bdi */
unsigned int nr;
unsigned long last_old_flush; /* last old data flush */
struct task_struct *task; /* writeback task */
struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
};
struct backing_dev_info { struct backing_dev_info {
struct list_head bdi_list; struct list_head bdi_list;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */ unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */ unsigned int capabilities; /* Device capabilities */
...@@ -58,11 +74,15 @@ struct backing_dev_info { ...@@ -58,11 +74,15 @@ struct backing_dev_info {
unsigned int min_ratio; unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac; unsigned int max_ratio, max_prop_frac;
struct device *dev; struct bdi_writeback wb; /* default writeback info for this bdi */
spinlock_t wb_lock; /* protects update side of wb_list */
struct list_head wb_list; /* the flusher threads hanging off this bdi */
unsigned long wb_mask; /* bitmask of registered tasks */
unsigned int wb_cnt; /* number of registered tasks */
struct list_head b_dirty; /* dirty inodes */ struct list_head work_list;
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */ struct device *dev;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir; struct dentry *debug_dir;
...@@ -77,10 +97,20 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, ...@@ -77,10 +97,20 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...); const char *fmt, ...);
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi); void bdi_unregister(struct backing_dev_info *bdi);
void bdi_start_writeback(struct writeback_control *wbc);
int bdi_writeback_task(struct bdi_writeback *wb);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
extern struct mutex bdi_lock; extern spinlock_t bdi_lock;
extern struct list_head bdi_list; extern struct list_head bdi_list;
static inline int wb_has_dirty_io(struct bdi_writeback *wb)
{
return !list_empty(&wb->b_dirty) ||
!list_empty(&wb->b_io) ||
!list_empty(&wb->b_more_io);
}
static inline void __add_bdi_stat(struct backing_dev_info *bdi, static inline void __add_bdi_stat(struct backing_dev_info *bdi,
enum bdi_stat_item item, s64 amount) enum bdi_stat_item item, s64 amount)
{ {
...@@ -270,6 +300,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) ...@@ -270,6 +300,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
return bdi->capabilities & BDI_CAP_SWAP_BACKED; return bdi->capabilities & BDI_CAP_SWAP_BACKED;
} }
static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
{
return bdi == &default_backing_dev_info;
}
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{ {
return bdi_cap_writeback_dirty(mapping->backing_dev_info); return bdi_cap_writeback_dirty(mapping->backing_dev_info);
...@@ -285,4 +320,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping) ...@@ -285,4 +320,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping)
return bdi_cap_swap_backed(mapping->backing_dev_info); return bdi_cap_swap_backed(mapping->backing_dev_info);
} }
static inline int bdi_sched_wait(void *word)
{
schedule();
return 0;
}
#endif /* _LINUX_BACKING_DEV_H */ #endif /* _LINUX_BACKING_DEV_H */
...@@ -1786,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *, ...@@ -1786,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
struct vfsmount *mnt); struct vfsmount *mnt);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
int __put_super_and_need_restart(struct super_block *sb); int __put_super_and_need_restart(struct super_block *sb);
void put_super(struct super_block *sb);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ /* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \ #define fops_get(fops) \
...@@ -2182,7 +2183,6 @@ extern int bdev_read_only(struct block_device *); ...@@ -2182,7 +2183,6 @@ extern int bdev_read_only(struct block_device *);
extern int set_blocksize(struct block_device *, int); extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int); extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int);
extern int sb_has_dirty_inodes(struct super_block *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
......
...@@ -40,6 +40,8 @@ enum writeback_sync_modes { ...@@ -40,6 +40,8 @@ enum writeback_sync_modes {
struct writeback_control { struct writeback_control {
struct backing_dev_info *bdi; /* If !NULL, only write back this struct backing_dev_info *bdi; /* If !NULL, only write back this
queue */ queue */
struct super_block *sb; /* if !NULL, only write inodes from
this super_block */
enum writeback_sync_modes sync_mode; enum writeback_sync_modes sync_mode;
unsigned long *older_than_this; /* If !NULL, only write back inodes unsigned long *older_than_this; /* If !NULL, only write back inodes
older than this */ older than this */
...@@ -76,10 +78,13 @@ struct writeback_control { ...@@ -76,10 +78,13 @@ struct writeback_control {
/* /*
* fs/fs-writeback.c * fs/fs-writeback.c
*/ */
void writeback_inodes(struct writeback_control *wbc); struct bdi_writeback;
int inode_wait(void *); int inode_wait(void *);
long writeback_inodes_sb(struct super_block *); long writeback_inodes_sb(struct super_block *);
long sync_inodes_sb(struct super_block *); long sync_inodes_sb(struct super_block *);
void writeback_inodes_wbc(struct writeback_control *wbc);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
void wakeup_flusher_threads(long nr_pages);
/* writeback.h requires fs.h; it, too, is not included from here. */ /* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode) static inline void wait_on_inode(struct inode *inode)
...@@ -99,7 +104,6 @@ static inline void inode_sync_wait(struct inode *inode) ...@@ -99,7 +104,6 @@ static inline void inode_sync_wait(struct inode *inode)
/* /*
* mm/page-writeback.c * mm/page-writeback.c
*/ */
int wakeup_pdflush(long nr_pages);
void laptop_io_completion(void); void laptop_io_completion(void);
void laptop_sync_completion(void); void laptop_sync_completion(void);
void throttle_vm_writeout(gfp_t gfp_mask); void throttle_vm_writeout(gfp_t gfp_mask);
......
This diff is collapsed.
...@@ -35,15 +35,6 @@ ...@@ -35,15 +35,6 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
/*
* The maximum number of pages to writeout in a single bdflush/kupdate
* operation. We do this so we don't hold I_SYNC against an inode for
* enormous amounts of time, which would block a userspace task which has
* been forced to throttle against that inode. Also, the code reevaluates
* the dirty each time it has written this many pages.
*/
#define MAX_WRITEBACK_PAGES 1024
/* /*
* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
* will look to see if it needs to force writeback or throttling. * will look to see if it needs to force writeback or throttling.
...@@ -117,8 +108,6 @@ EXPORT_SYMBOL(laptop_mode); ...@@ -117,8 +108,6 @@ EXPORT_SYMBOL(laptop_mode);
/* End of sysctl-exported parameters */ /* End of sysctl-exported parameters */
static void background_writeout(unsigned long _min_pages);
/* /*
* Scale the writeback cache size proportional to the relative writeout speeds. * Scale the writeback cache size proportional to the relative writeout speeds.
* *
...@@ -326,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) ...@@ -326,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{ {
int ret = 0; int ret = 0;
mutex_lock(&bdi_lock); spin_lock(&bdi_lock);
if (min_ratio > bdi->max_ratio) { if (min_ratio > bdi->max_ratio) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
...@@ -338,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) ...@@ -338,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
ret = -EINVAL; ret = -EINVAL;
} }
} }
mutex_unlock(&bdi_lock); spin_unlock(&bdi_lock);
return ret; return ret;
} }
...@@ -350,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) ...@@ -350,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
if (max_ratio > 100) if (max_ratio > 100)
return -EINVAL; return -EINVAL;
mutex_lock(&bdi_lock); spin_lock(&bdi_lock);
if (bdi->min_ratio > max_ratio) { if (bdi->min_ratio > max_ratio) {
ret = -EINVAL; ret = -EINVAL;
} else { } else {
bdi->max_ratio = max_ratio; bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
} }
mutex_unlock(&bdi_lock); spin_unlock(&bdi_lock);
return ret; return ret;
} }
...@@ -543,7 +532,7 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -543,7 +532,7 @@ static void balance_dirty_pages(struct address_space *mapping)
* up. * up.
*/ */
if (bdi_nr_reclaimable > bdi_thresh) { if (bdi_nr_reclaimable > bdi_thresh) {
writeback_inodes(&wbc); writeback_inodes_wbc(&wbc);
pages_written += write_chunk - wbc.nr_to_write; pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh, get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi); &bdi_thresh, bdi);
...@@ -572,7 +561,7 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -572,7 +561,7 @@ static void balance_dirty_pages(struct address_space *mapping)
if (pages_written >= write_chunk) if (pages_written >= write_chunk)
break; /* We've done our duty */ break; /* We've done our duty */
congestion_wait(BLK_RW_ASYNC, HZ/10); schedule_timeout(1);
} }
if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
...@@ -591,10 +580,18 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -591,10 +580,18 @@ static void balance_dirty_pages(struct address_space *mapping)
* background_thresh, to keep the amount of dirty memory low. * background_thresh, to keep the amount of dirty memory low.
*/ */
if ((laptop_mode && pages_written) || if ((laptop_mode && pages_written) ||
(!laptop_mode && (global_page_state(NR_FILE_DIRTY) (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
+ global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_UNSTABLE_NFS))
> background_thresh))) > background_thresh))) {
pdflush_operation(background_writeout, 0); struct writeback_control wbc = {
.bdi = bdi,
.sync_mode = WB_SYNC_NONE,
.nr_to_write = nr_writeback,
};
bdi_start_writeback(&wbc);
}
} }
void set_page_dirty_balance(struct page *page, int page_mkwrite) void set_page_dirty_balance(struct page *page, int page_mkwrite)
...@@ -678,124 +675,10 @@ void throttle_vm_writeout(gfp_t gfp_mask) ...@@ -678,124 +675,10 @@ void throttle_vm_writeout(gfp_t gfp_mask)
} }
} }
/*
* writeback at least _min_pages, and keep writing until the amount of dirty
* memory is less than the background threshold, or until we're all clean.
*/
static void background_writeout(unsigned long _min_pages)
{
long min_pages = _min_pages;
struct writeback_control wbc = {
.bdi = NULL,
.sync_mode = WB_SYNC_NONE,
.older_than_this = NULL,
.nr_to_write = 0,
.nonblocking = 1,
.range_cyclic = 1,
};
for ( ; ; ) {
unsigned long background_thresh;
unsigned long dirty_thresh;
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
if (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) < background_thresh
&& min_pages <= 0)
break;
wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
writeback_inodes(&wbc);
min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
/* Wrote less than expected */
if (wbc.encountered_congestion || wbc.more_io)
congestion_wait(BLK_RW_ASYNC, HZ/10);
else
break;
}
}
}
/*
* Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
* the whole world. Returns 0 if a pdflush thread was dispatched. Returns
* -1 if all pdflush threads were busy.
*/
int wakeup_pdflush(long nr_pages)
{
if (nr_pages == 0)
nr_pages = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
return pdflush_operation(background_writeout, nr_pages);
}
static void wb_timer_fn(unsigned long unused);
static void laptop_timer_fn(unsigned long unused); static void laptop_timer_fn(unsigned long unused);
static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
/*
* Periodic writeback of "old" data.
*
* Define "old": the first time one of an inode's pages is dirtied, we mark the
* dirtying-time in the inode's address_space. So this periodic writeback code
* just walks the superblock inode list, writing back any inodes which are
* older than a specific point in time.
*
* Try to run once per dirty_writeback_interval. But if a writeback event
* takes longer than a dirty_writeback_interval interval, then leave a
* one-second gap.
*
* older_than_this takes precedence over nr_to_write. So we'll only write back
* all dirty pages if they are all attached to "old" mappings.
*/
static void wb_kupdate(unsigned long arg)
{
unsigned long oldest_jif;
unsigned long start_jif;
unsigned long next_jif;
long nr_to_write;
struct writeback_control wbc = {
.bdi = NULL,
.sync_mode = WB_SYNC_NONE,
.older_than_this = &oldest_jif,
.nr_to_write = 0,
.nonblocking = 1,
.for_kupdate = 1,
.range_cyclic = 1,
};
sync_supers();
oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
start_jif = jiffies;
next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
nr_to_write = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
while (nr_to_write > 0) {
wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
writeback_inodes(&wbc);
if (wbc.nr_to_write > 0) {
if (wbc.encountered_congestion || wbc.more_io)
congestion_wait(BLK_RW_ASYNC, HZ/10);
else
break; /* All the old data is written */
}
nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
}
if (time_before(next_jif, jiffies + HZ))
next_jif = jiffies + HZ;
if (dirty_writeback_interval)
mod_timer(&wb_timer, next_jif);
}
/* /*
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
*/ */
...@@ -803,28 +686,24 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write, ...@@ -803,28 +686,24 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length, loff_t *ppos) struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
{ {
proc_dointvec(table, write, file, buffer, length, ppos); proc_dointvec(table, write, file, buffer, length, ppos);
if (dirty_writeback_interval)
mod_timer(&wb_timer, jiffies +
msecs_to_jiffies(dirty_writeback_interval * 10));
else
del_timer(&wb_timer);
return 0; return 0;
} }
static void wb_timer_fn(unsigned long unused) static void do_laptop_sync(struct work_struct *work)
{
if (pdflush_operation(wb_kupdate, 0) < 0)
mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
}
static void laptop_flush(unsigned long unused)
{ {
sys_sync(); wakeup_flusher_threads(0);
kfree(work);
} }
static void laptop_timer_fn(unsigned long unused) static void laptop_timer_fn(unsigned long unused)
{ {
pdflush_operation(laptop_flush, 0); struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
INIT_WORK(work, do_laptop_sync);
schedule_work(work);
}
} }
/* /*
...@@ -907,8 +786,6 @@ void __init page_writeback_init(void) ...@@ -907,8 +786,6 @@ void __init page_writeback_init(void)
{ {
int shift; int shift;
mod_timer(&wb_timer,
jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
writeback_set_ratelimit(); writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb); register_cpu_notifier(&ratelimit_nb);
......
...@@ -1720,7 +1720,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -1720,7 +1720,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
*/ */
if (total_scanned > sc->swap_cluster_max + if (total_scanned > sc->swap_cluster_max +
sc->swap_cluster_max / 2) { sc->swap_cluster_max / 2) {
wakeup_pdflush(laptop_mode ? 0 : total_scanned); wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
sc->may_writepage = 1; sc->may_writepage = 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment