Commit eb8e478e authored by Sahitya Tummala's avatar Sahitya Tummala Committed by Sasha Levin

mm/list_lru.c: fix list_lru_count_node() to be race free

[ Upstream commit 2c80cd57 ]

list_lru_count_node() iterates over all memcgs to get the total number of
entries on the node but it can race with memcg_drain_all_list_lrus(),
which migrates the entries from a dead cgroup to another.  This can return
incorrect number of entries from list_lru_count_node().

Fix this by keeping track of entries per node and simply return it in
list_lru_count_node().

Link: http://lkml.kernel.org/r/1498707555-30525-1-git-send-email-stummala@codeaurora.orgSigned-off-by: default avatarSahitya Tummala <stummala@codeaurora.org>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Alexander Polakov <apolyakov@beget.ru>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
parent 9052d52e
...@@ -44,6 +44,7 @@ struct list_lru_node { ...@@ -44,6 +44,7 @@ struct list_lru_node {
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus; struct list_lru_memcg *memcg_lrus;
#endif #endif
long nr_items;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct list_lru { struct list_lru {
......
...@@ -103,6 +103,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) ...@@ -103,6 +103,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
if (list_empty(item)) { if (list_empty(item)) {
list_add_tail(item, &l->list); list_add_tail(item, &l->list);
l->nr_items++; l->nr_items++;
nlru->nr_items++;
spin_unlock(&nlru->lock); spin_unlock(&nlru->lock);
return true; return true;
} }
...@@ -122,6 +123,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) ...@@ -122,6 +123,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
if (!list_empty(item)) { if (!list_empty(item)) {
list_del_init(item); list_del_init(item);
l->nr_items--; l->nr_items--;
nlru->nr_items--;
spin_unlock(&nlru->lock); spin_unlock(&nlru->lock);
return true; return true;
} }
...@@ -169,15 +171,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one); ...@@ -169,15 +171,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
unsigned long list_lru_count_node(struct list_lru *lru, int nid) unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{ {
long count = 0; struct list_lru_node *nlru;
int memcg_idx;
count += __list_lru_count_one(lru, nid, -1); nlru = &lru->node[nid];
if (list_lru_memcg_aware(lru)) { return nlru->nr_items;
for_each_memcg_cache_index(memcg_idx)
count += __list_lru_count_one(lru, nid, memcg_idx);
}
return count;
} }
EXPORT_SYMBOL_GPL(list_lru_count_node); EXPORT_SYMBOL_GPL(list_lru_count_node);
...@@ -212,6 +209,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, ...@@ -212,6 +209,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
assert_spin_locked(&nlru->lock); assert_spin_locked(&nlru->lock);
case LRU_REMOVED: case LRU_REMOVED:
isolated++; isolated++;
nlru->nr_items--;
/* /*
* If the lru lock has been dropped, our list * If the lru lock has been dropped, our list
* traversal is now invalid and so we have to * traversal is now invalid and so we have to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment