Commit b698f0a1 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm/fs: delete PF_SWAPWRITE

PF_SWAPWRITE has been redundant since v3.2 commit ee72886d ("mm:
vmscan: do not writeback filesystem pages in direct reclaim").

Coincidentally, NeilBrown's current patch "remove inode_congested()"
deletes may_write_to_inode(), which appeared to be the one function which
took notice of PF_SWAPWRITE.  But if you study the old logic, and the
conditions under which may_write_to_inode() was called, you discover that
flag and function have been pointless for a decade.

Link: https://lkml.kernel.org/r/75e80e7-742d-e3bd-531-614db8961e4@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: NeilBrown <neilb@suse.de>
Cc: Jan Kara <jack@suse.de>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d794103d
...@@ -2197,7 +2197,6 @@ void wb_workfn(struct work_struct *work) ...@@ -2197,7 +2197,6 @@ void wb_workfn(struct work_struct *work)
long pages_written; long pages_written;
set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
current->flags |= PF_SWAPWRITE;
if (likely(!current_is_workqueue_rescuer() || if (likely(!current_is_workqueue_rescuer() ||
!test_bit(WB_registered, &wb->state))) { !test_bit(WB_registered, &wb->state))) {
...@@ -2226,8 +2225,6 @@ void wb_workfn(struct work_struct *work) ...@@ -2226,8 +2225,6 @@ void wb_workfn(struct work_struct *work)
wb_wakeup(wb); wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval) else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb); wb_wakeup_delayed(wb);
current->flags &= ~PF_SWAPWRITE;
} }
/* /*
......
...@@ -2818,7 +2818,7 @@ xfs_btree_split_worker( ...@@ -2818,7 +2818,7 @@ xfs_btree_split_worker(
* in any way. * in any way.
*/ */
if (args->kswapd) if (args->kswapd)
new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; new_pflags |= PF_MEMALLOC | PF_KSWAPD;
current_set_flags_nested(&pflags, new_pflags); current_set_flags_nested(&pflags, new_pflags);
xfs_trans_set_context(args->cur->bc_tp); xfs_trans_set_context(args->cur->bc_tp);
......
...@@ -1689,7 +1689,6 @@ extern struct pid *cad_pid; ...@@ -1689,7 +1689,6 @@ extern struct pid *cad_pid;
* I am cleaning dirty pages from some other bdi. */ * I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
......
...@@ -1350,7 +1350,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, ...@@ -1350,7 +1350,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
bool is_thp = false; bool is_thp = false;
struct page *page; struct page *page;
struct page *page2; struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
int rc, nr_subpages; int rc, nr_subpages;
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(thp_split_pages); LIST_HEAD(thp_split_pages);
...@@ -1359,9 +1358,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, ...@@ -1359,9 +1358,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
trace_mm_migrate_pages_start(mode, reason); trace_mm_migrate_pages_start(mode, reason);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
thp_subpage_migration: thp_subpage_migration:
for (pass = 0; pass < 10 && (retry || thp_retry); pass++) { for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
retry = 0; retry = 0;
...@@ -1516,9 +1512,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, ...@@ -1516,9 +1512,6 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded, trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
nr_thp_failed, nr_thp_split, mode, reason); nr_thp_failed, nr_thp_split, mode, reason);
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
if (ret_succeeded) if (ret_succeeded)
*ret_succeeded = nr_succeeded; *ret_succeeded = nr_succeeded;
......
...@@ -4457,7 +4457,7 @@ static int kswapd(void *p) ...@@ -4457,7 +4457,7 @@ static int kswapd(void *p)
* us from recursively trying to free more memory as we're * us from recursively trying to free more memory as we're
* trying to free the first piece of memory in the first place). * trying to free the first piece of memory in the first place).
*/ */
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; tsk->flags |= PF_MEMALLOC | PF_KSWAPD;
set_freezable(); set_freezable();
WRITE_ONCE(pgdat->kswapd_order, 0); WRITE_ONCE(pgdat->kswapd_order, 0);
...@@ -4508,7 +4508,7 @@ static int kswapd(void *p) ...@@ -4508,7 +4508,7 @@ static int kswapd(void *p)
goto kswapd_try_sleep; goto kswapd_try_sleep;
} }
tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD);
return 0; return 0;
} }
...@@ -4749,11 +4749,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in ...@@ -4749,11 +4749,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
fs_reclaim_acquire(sc.gfp_mask); fs_reclaim_acquire(sc.gfp_mask);
/* /*
* We need to be able to allocate from the reserves for RECLAIM_UNMAP * We need to be able to allocate from the reserves for RECLAIM_UNMAP
* and we also need to be able to write out pages for RECLAIM_WRITE
* and RECLAIM_UNMAP.
*/ */
noreclaim_flag = memalloc_noreclaim_save(); noreclaim_flag = memalloc_noreclaim_save();
p->flags |= PF_SWAPWRITE;
set_task_reclaim_state(p, &sc.reclaim_state); set_task_reclaim_state(p, &sc.reclaim_state);
if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
...@@ -4767,7 +4764,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in ...@@ -4767,7 +4764,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
} }
set_task_reclaim_state(p, NULL); set_task_reclaim_state(p, NULL);
current->flags &= ~PF_SWAPWRITE;
memalloc_noreclaim_restore(noreclaim_flag); memalloc_noreclaim_restore(noreclaim_flag);
fs_reclaim_release(sc.gfp_mask); fs_reclaim_release(sc.gfp_mask);
psi_memstall_leave(&pflags); psi_memstall_leave(&pflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment