Commit 8ba8ed54 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux

* 'writeback-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux:
  writeback: remove vm_dirties and task->dirties
  writeback: hard throttle 1000+ dd on a slow USB stick
  mm: Make task in balance_dirty_pages() killable
parents bbbc4791 468e6a20
...@@ -184,7 +184,6 @@ extern struct cred init_cred; ...@@ -184,7 +184,6 @@ extern struct cred init_cred;
[PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
}, \ }, \
.thread_group = LIST_HEAD_INIT(tsk.thread_group), \ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \ INIT_IDS \
INIT_PERF_EVENTS(tsk) \ INIT_PERF_EVENTS(tsk) \
INIT_TRACE_IRQFLAGS \ INIT_TRACE_IRQFLAGS \
......
...@@ -1521,7 +1521,6 @@ struct task_struct { ...@@ -1521,7 +1521,6 @@ struct task_struct {
#ifdef CONFIG_FAULT_INJECTION #ifdef CONFIG_FAULT_INJECTION
int make_it_fail; int make_it_fail;
#endif #endif
struct prop_local_single dirties;
/* /*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call * when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause * balance_dirty_pages() for some dirty throttling pause
......
...@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account) ...@@ -162,7 +162,6 @@ static void account_kernel_stack(struct thread_info *ti, int account)
void free_task(struct task_struct *tsk) void free_task(struct task_struct *tsk)
{ {
prop_local_destroy_single(&tsk->dirties);
account_kernel_stack(tsk->stack, -1); account_kernel_stack(tsk->stack, -1);
free_thread_info(tsk->stack); free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk); rt_mutex_debug_task_free(tsk);
...@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) ...@@ -274,10 +273,6 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
tsk->stack = ti; tsk->stack = ti;
err = prop_local_init_single(&tsk->dirties);
if (err)
goto out;
setup_thread_stack(tsk, orig); setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk); clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk); clear_tsk_need_resched(tsk);
......
...@@ -128,7 +128,6 @@ unsigned long global_dirty_limit; ...@@ -128,7 +128,6 @@ unsigned long global_dirty_limit;
* *
*/ */
static struct prop_descriptor vm_completions; static struct prop_descriptor vm_completions;
static struct prop_descriptor vm_dirties;
/* /*
* couple the period to the dirty_ratio: * couple the period to the dirty_ratio:
...@@ -154,7 +153,6 @@ static void update_completion_period(void) ...@@ -154,7 +153,6 @@ static void update_completion_period(void)
{ {
int shift = calc_period_shift(); int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift); prop_change_shift(&vm_completions, shift);
prop_change_shift(&vm_dirties, shift);
writeback_set_ratelimit(); writeback_set_ratelimit();
} }
...@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) ...@@ -235,11 +233,6 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
} }
EXPORT_SYMBOL_GPL(bdi_writeout_inc); EXPORT_SYMBOL_GPL(bdi_writeout_inc);
void task_dirty_inc(struct task_struct *tsk)
{
prop_inc_single(&vm_dirties, &tsk->dirties);
}
/* /*
* Obtain an accurate fraction of the BDI's portion. * Obtain an accurate fraction of the BDI's portion.
*/ */
...@@ -1133,17 +1126,17 @@ static void balance_dirty_pages(struct address_space *mapping, ...@@ -1133,17 +1126,17 @@ static void balance_dirty_pages(struct address_space *mapping,
pages_dirtied, pages_dirtied,
pause, pause,
start_time); start_time);
__set_current_state(TASK_UNINTERRUPTIBLE); __set_current_state(TASK_KILLABLE);
io_schedule_timeout(pause); io_schedule_timeout(pause);
dirty_thresh = hard_dirty_limit(dirty_thresh);
/* /*
* max-pause area. If dirty exceeded but still within this * This is typically equal to (nr_dirty < dirty_thresh) and can
* area, no need to sleep for more than 200ms: (a) 8 pages per * also keep "1000+ dd on a slow USB stick" under control.
* 200ms is typically more than enough to curb heavy dirtiers;
* (b) the pause time limit makes the dirtiers more responsive.
*/ */
if (nr_dirty < dirty_thresh) if (task_ratelimit)
break;
if (fatal_signal_pending(current))
break; break;
} }
...@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void) ...@@ -1395,7 +1388,6 @@ void __init page_writeback_init(void)
shift = calc_period_shift(); shift = calc_period_shift();
prop_descriptor_init(&vm_completions, shift); prop_descriptor_init(&vm_completions, shift);
prop_descriptor_init(&vm_dirties, shift);
} }
/** /**
...@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) ...@@ -1724,7 +1716,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
__inc_zone_page_state(page, NR_DIRTIED); __inc_zone_page_state(page, NR_DIRTIED);
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE); task_io_account_write(PAGE_CACHE_SIZE);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment