Commit 1dfab5ab authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: fold move_anon() and move_file()

Turn the move type enum into flags and give the flags field a shorter
name.  Once that is done, move_anon() and move_file() are simple enough to
just fold them into the callsites.

[akpm@linux-foundation.org: tweak MOVE_MASK definition, per Michal]
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 241994ed
...@@ -369,21 +369,18 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) ...@@ -369,21 +369,18 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
/* Stuffs for move charges at task migration. */ /* Stuffs for move charges at task migration. */
/* /*
* Types of charges to be moved. "move_charge_at_immitgrate" and * Types of charges to be moved.
* "immigrate_flags" are treated as a left-shifted bitmap of these types.
*/ */
enum move_type { #define MOVE_ANON 0x1U
MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ #define MOVE_FILE 0x2U
MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */ #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
NR_MOVE_TYPE,
};
/* "mc" and its members are protected by cgroup_mutex */ /* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct { static struct move_charge_struct {
spinlock_t lock; /* for from, to */ spinlock_t lock; /* for from, to */
struct mem_cgroup *from; struct mem_cgroup *from;
struct mem_cgroup *to; struct mem_cgroup *to;
unsigned long immigrate_flags; unsigned long flags;
unsigned long precharge; unsigned long precharge;
unsigned long moved_charge; unsigned long moved_charge;
unsigned long moved_swap; unsigned long moved_swap;
...@@ -394,16 +391,6 @@ static struct move_charge_struct { ...@@ -394,16 +391,6 @@ static struct move_charge_struct {
.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
}; };
static bool move_anon(void)
{
return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
}
static bool move_file(void)
{
return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
}
/* /*
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur. * limit reclaim to prevent infinite loops, if they ever occur.
...@@ -3500,7 +3487,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css, ...@@ -3500,7 +3487,7 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
if (val >= (1 << NR_MOVE_TYPE)) if (val & ~MOVE_MASK)
return -EINVAL; return -EINVAL;
/* /*
...@@ -4773,12 +4760,12 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma, ...@@ -4773,12 +4760,12 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
if (!page || !page_mapped(page)) if (!page || !page_mapped(page))
return NULL; return NULL;
if (PageAnon(page)) { if (PageAnon(page)) {
/* we don't move shared anon */ if (!(mc.flags & MOVE_ANON))
if (!move_anon())
return NULL; return NULL;
} else if (!move_file()) } else {
/* we ignore mapcount for file pages */ if (!(mc.flags & MOVE_FILE))
return NULL; return NULL;
}
if (!get_page_unless_zero(page)) if (!get_page_unless_zero(page))
return NULL; return NULL;
...@@ -4792,7 +4779,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, ...@@ -4792,7 +4779,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
struct page *page = NULL; struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent); swp_entry_t ent = pte_to_swp_entry(ptent);
if (!move_anon() || non_swap_entry(ent)) if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
return NULL; return NULL;
/* /*
* Because lookup_swap_cache() updates some statistics counter, * Because lookup_swap_cache() updates some statistics counter,
...@@ -4821,7 +4808,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, ...@@ -4821,7 +4808,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
if (!vma->vm_file) /* anonymous vma */ if (!vma->vm_file) /* anonymous vma */
return NULL; return NULL;
if (!move_file()) if (!(mc.flags & MOVE_FILE))
return NULL; return NULL;
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
...@@ -4900,7 +4887,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, ...@@ -4900,7 +4887,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
page = pmd_page(pmd); page = pmd_page(pmd);
VM_BUG_ON_PAGE(!page || !PageHead(page), page); VM_BUG_ON_PAGE(!page || !PageHead(page), page);
if (!move_anon()) if (!(mc.flags & MOVE_ANON))
return ret; return ret;
if (page->mem_cgroup == mc.from) { if (page->mem_cgroup == mc.from) {
ret = MC_TARGET_PAGE; ret = MC_TARGET_PAGE;
...@@ -5042,15 +5029,15 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, ...@@ -5042,15 +5029,15 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
struct task_struct *p = cgroup_taskset_first(tset); struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0; int ret = 0;
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unsigned long move_charge_at_immigrate; unsigned long move_flags;
/* /*
* We are now commited to this value whatever it is. Changes in this * We are now commited to this value whatever it is. Changes in this
* tunable will only affect upcoming migrations, not the current one. * tunable will only affect upcoming migrations, not the current one.
* So we need to save it, and keep it going. * So we need to save it, and keep it going.
*/ */
move_charge_at_immigrate = memcg->move_charge_at_immigrate; move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate);
if (move_charge_at_immigrate) { if (move_flags) {
struct mm_struct *mm; struct mm_struct *mm;
struct mem_cgroup *from = mem_cgroup_from_task(p); struct mem_cgroup *from = mem_cgroup_from_task(p);
...@@ -5070,7 +5057,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, ...@@ -5070,7 +5057,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
spin_lock(&mc.lock); spin_lock(&mc.lock);
mc.from = from; mc.from = from;
mc.to = memcg; mc.to = memcg;
mc.immigrate_flags = move_charge_at_immigrate; mc.flags = move_flags;
spin_unlock(&mc.lock); spin_unlock(&mc.lock);
/* We set mc.moving_task later */ /* We set mc.moving_task later */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment