Commit 361a2a22 authored by Minchan Kim's avatar Minchan Kim Committed by Linus Torvalds

mm: replace migrate_[prep|finish] with lru_cache_[disable|enable]

Currently, migrate_[prep|finish] is merely a wrapper of
lru_cache_[disable|enable].  There is not much to gain from having
additional abstraction.

Use lru_cache_[disable|enable] instead of migrate_[prep|finish], which
would be more descriptive.

note: migrate_prep_local in compaction.c changed into lru_add_drain to
avoid CPU schedule cost with involving many other CPUs to keep old
behavior.

Link: https://lkml.kernel.org/r/20210319175127.886124-2-minchan@kernel.orgSigned-off-by: default avatarMinchan Kim <minchan@kernel.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: John Dias <joaodias@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Oliver Sang <oliver.sang@intel.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d479960e
......@@ -45,9 +45,6 @@ extern struct page *alloc_migration_target(struct page *page, unsigned long priv
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page);
extern void migrate_prep(void);
extern void migrate_finish(void);
extern void migrate_prep_local(void);
extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
......@@ -67,10 +64,6 @@ static inline struct page *alloc_migration_target(struct page *page,
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
{ return -EBUSY; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_finish(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
static inline void migrate_page_states(struct page *newpage, struct page *page)
{
}
......
......@@ -2354,7 +2354,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
cc->free_pfn, end_pfn, sync);
migrate_prep_local();
/* lru_add_drain_all could be expensive with involving other CPUs */
lru_add_drain();
while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
int err;
......
......@@ -1124,7 +1124,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
int err = 0;
nodemask_t tmp;
migrate_prep();
lru_cache_disable();
mmap_read_lock(mm);
......@@ -1209,7 +1209,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
}
mmap_read_unlock(mm);
migrate_finish();
lru_cache_enable();
if (err < 0)
return err;
return busy;
......@@ -1325,7 +1325,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
migrate_prep();
lru_cache_disable();
}
{
NODEMASK_SCRATCH(scratch);
......@@ -1374,7 +1374,7 @@ static long do_mbind(unsigned long start, unsigned long len,
mpol_out:
mpol_put(new);
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
migrate_finish();
lru_cache_enable();
return err;
}
......
......@@ -57,30 +57,6 @@
#include "internal.h"
/*
* migrate_prep() needs to be called before we start compiling a list of pages
* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
* undesirable, use migrate_prep_local()
*/
void migrate_prep(void)
{
/*
* Clear the LRU lists so pages can be isolated.
*/
lru_cache_disable();
}
void migrate_finish(void)
{
lru_cache_enable();
}
/* Do the necessary work of migrate_prep but not if it involves other CPUs */
void migrate_prep_local(void)
{
lru_add_drain();
}
int isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct address_space *mapping;
......@@ -1771,7 +1747,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int start, i;
int err = 0, err1;
migrate_prep();
lru_cache_disable();
for (i = start = 0; i < nr_pages; i++) {
const void __user *p;
......@@ -1840,7 +1816,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
if (err >= 0)
err = err1;
out:
migrate_finish();
lru_cache_enable();
return err;
}
......
......@@ -8681,7 +8681,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
migrate_prep();
lru_cache_disable();
while (pfn < end || !list_empty(&cc->migratepages)) {
if (fatal_signal_pending(current)) {
......@@ -8716,7 +8716,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
break;
}
migrate_finish();
lru_cache_enable();
if (ret < 0) {
alloc_contig_dump_pages(&cc->migratepages);
putback_movable_pages(&cc->migratepages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment