Commit 8869b8f6 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

memcg: memcontrol whitespace cleanups

Sorry, before getting down to more important changes, I'd like to do some
cleanup in memcontrol.c.  This patch doesn't change the code generated, but
cleans up whitespace, moves up a double declaration, removes an unused enum,
removes void returns, removes misleading comments, that kind of thing.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8289546e
...@@ -137,6 +137,7 @@ struct mem_cgroup { ...@@ -137,6 +137,7 @@ struct mem_cgroup {
*/ */
struct mem_cgroup_stat stat; struct mem_cgroup_stat stat;
}; };
static struct mem_cgroup init_mem_cgroup;
/* /*
* We use the lower bit of the page->page_cgroup pointer as a bit spin * We use the lower bit of the page->page_cgroup pointer as a bit spin
...@@ -177,20 +178,11 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc) ...@@ -177,20 +178,11 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
return page_zonenum(pc->page); return page_zonenum(pc->page);
} }
enum {
MEM_CGROUP_TYPE_UNSPEC = 0,
MEM_CGROUP_TYPE_MAPPED,
MEM_CGROUP_TYPE_CACHED,
MEM_CGROUP_TYPE_ALL,
MEM_CGROUP_TYPE_MAX,
};
enum charge_type { enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0, MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
MEM_CGROUP_CHARGE_TYPE_MAPPED, MEM_CGROUP_CHARGE_TYPE_MAPPED,
}; };
/* /*
* Always modified under lru lock. Then, not necessary to preempt_disable() * Always modified under lru lock. Then, not necessary to preempt_disable()
*/ */
...@@ -199,11 +191,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, ...@@ -199,11 +191,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
{ {
int val = (charge)? 1 : -1; int val = (charge)? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat; struct mem_cgroup_stat *stat = &mem->stat;
VM_BUG_ON(!irqs_disabled());
VM_BUG_ON(!irqs_disabled());
if (flags & PAGE_CGROUP_FLAG_CACHE) if (flags & PAGE_CGROUP_FLAG_CACHE)
__mem_cgroup_stat_add_safe(stat, __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
MEM_CGROUP_STAT_CACHE, val);
else else
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
} }
...@@ -240,8 +231,6 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, ...@@ -240,8 +231,6 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
return total; return total;
} }
static struct mem_cgroup init_mem_cgroup;
static inline static inline
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{ {
...@@ -273,8 +262,7 @@ void mm_free_cgroup(struct mm_struct *mm) ...@@ -273,8 +262,7 @@ void mm_free_cgroup(struct mm_struct *mm)
static inline int page_cgroup_locked(struct page *page) static inline int page_cgroup_locked(struct page *page)
{ {
return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
&page->page_cgroup);
} }
static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
...@@ -285,8 +273,7 @@ static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) ...@@ -285,8 +273,7 @@ static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
struct page_cgroup *page_get_page_cgroup(struct page *page) struct page_cgroup *page_get_page_cgroup(struct page *page)
{ {
return (struct page_cgroup *) return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
(page->page_cgroup & ~PAGE_CGROUP_LOCK);
} }
static void __always_inline lock_page_cgroup(struct page *page) static void __always_inline lock_page_cgroup(struct page *page)
...@@ -308,7 +295,6 @@ static void __always_inline unlock_page_cgroup(struct page *page) ...@@ -308,7 +295,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
* A can can detect failure of clearing by following * A can can detect failure of clearing by following
* clear_page_cgroup(page, pc) == pc * clear_page_cgroup(page, pc) == pc
*/ */
static struct page_cgroup *clear_page_cgroup(struct page *page, static struct page_cgroup *clear_page_cgroup(struct page *page,
struct page_cgroup *pc) struct page_cgroup *pc)
{ {
...@@ -417,6 +403,7 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) ...@@ -417,6 +403,7 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
return (int)((rss * 100L) / total); return (int)((rss * 100L) / total);
} }
/* /*
* This function is called from vmscan.c. In page reclaiming loop. balance * This function is called from vmscan.c. In page reclaiming loop. balance
* between active and inactive list is calculated. For memory controller * between active and inactive list is calculated. For memory controller
...@@ -480,7 +467,6 @@ long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, ...@@ -480,7 +467,6 @@ long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
return (nr_inactive >> priority); return (nr_inactive >> priority);
} }
...@@ -601,16 +587,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -601,16 +587,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
rcu_read_lock(); rcu_read_lock();
mem = rcu_dereference(mm->mem_cgroup); mem = rcu_dereference(mm->mem_cgroup);
/* /*
* For every charge from the cgroup, increment reference * For every charge from the cgroup, increment reference count
* count
*/ */
css_get(&mem->css); css_get(&mem->css);
rcu_read_unlock(); rcu_read_unlock();
/*
* If we created the page_cgroup, we should free it on exceeding
* the cgroup limit.
*/
while (res_counter_charge(&mem->res, PAGE_SIZE)) { while (res_counter_charge(&mem->res, PAGE_SIZE)) {
if (!(gfp_mask & __GFP_WAIT)) if (!(gfp_mask & __GFP_WAIT))
goto out; goto out;
...@@ -660,7 +641,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -660,7 +641,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
/* Update statistics vector */
__mem_cgroup_add_list(pc); __mem_cgroup_add_list(pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
...@@ -673,26 +653,19 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, ...@@ -673,26 +653,19 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
return -ENOMEM; return -ENOMEM;
} }
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
gfp_t gfp_mask)
{ {
return mem_cgroup_charge_common(page, mm, gfp_mask, return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_MAPPED); MEM_CGROUP_CHARGE_TYPE_MAPPED);
} }
/*
* See if the cached pages should be charged at all?
*/
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
int ret = 0;
if (!mm) if (!mm)
mm = &init_mm; mm = &init_mm;
return mem_cgroup_charge_common(page, mm, gfp_mask,
ret = mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE); MEM_CGROUP_CHARGE_TYPE_CACHE);
return ret;
} }
/* /*
...@@ -742,11 +715,11 @@ void mem_cgroup_uncharge_page(struct page *page) ...@@ -742,11 +715,11 @@ void mem_cgroup_uncharge_page(struct page *page)
* Returns non-zero if a page (under migration) has valid page_cgroup member. * Returns non-zero if a page (under migration) has valid page_cgroup member.
* Refcnt of page_cgroup is incremented. * Refcnt of page_cgroup is incremented.
*/ */
int mem_cgroup_prepare_migration(struct page *page) int mem_cgroup_prepare_migration(struct page *page)
{ {
struct page_cgroup *pc; struct page_cgroup *pc;
int ret = 0; int ret = 0;
lock_page_cgroup(page); lock_page_cgroup(page);
pc = page_get_page_cgroup(page); pc = page_get_page_cgroup(page);
if (pc && atomic_inc_not_zero(&pc->ref_cnt)) if (pc && atomic_inc_not_zero(&pc->ref_cnt))
...@@ -759,28 +732,30 @@ void mem_cgroup_end_migration(struct page *page) ...@@ -759,28 +732,30 @@ void mem_cgroup_end_migration(struct page *page)
{ {
mem_cgroup_uncharge_page(page); mem_cgroup_uncharge_page(page);
} }
/* /*
* We know both *page* and *newpage* are now not-on-LRU and Pg_locked. * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
* And no race with uncharge() routines because page_cgroup for *page* * And no race with uncharge() routines because page_cgroup for *page*
* has extra one reference by mem_cgroup_prepare_migration. * has extra one reference by mem_cgroup_prepare_migration.
*/ */
void mem_cgroup_page_migration(struct page *page, struct page *newpage) void mem_cgroup_page_migration(struct page *page, struct page *newpage)
{ {
struct page_cgroup *pc; struct page_cgroup *pc;
struct mem_cgroup *mem; struct mem_cgroup *mem;
unsigned long flags; unsigned long flags;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
retry: retry:
pc = page_get_page_cgroup(page); pc = page_get_page_cgroup(page);
if (!pc) if (!pc)
return; return;
mem = pc->mem_cgroup; mem = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
if (clear_page_cgroup(page, pc) != pc) if (clear_page_cgroup(page, pc) != pc)
goto retry; goto retry;
spin_lock_irqsave(&mz->lru_lock, flags);
spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_remove_list(pc); __mem_cgroup_remove_list(pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
...@@ -793,7 +768,6 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) ...@@ -793,7 +768,6 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
spin_lock_irqsave(&mz->lru_lock, flags); spin_lock_irqsave(&mz->lru_lock, flags);
__mem_cgroup_add_list(pc); __mem_cgroup_add_list(pc);
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
return;
} }
/* /*
...@@ -802,8 +776,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) ...@@ -802,8 +776,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
* *And* this routine doesn't reclaim page itself, just removes page_cgroup. * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
*/ */
#define FORCE_UNCHARGE_BATCH (128) #define FORCE_UNCHARGE_BATCH (128)
static void static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
mem_cgroup_force_empty_list(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz, struct mem_cgroup_per_zone *mz,
int active) int active)
{ {
...@@ -837,27 +810,27 @@ mem_cgroup_force_empty_list(struct mem_cgroup *mem, ...@@ -837,27 +810,27 @@ mem_cgroup_force_empty_list(struct mem_cgroup *mem,
} else /* being uncharged ? ...do relax */ } else /* being uncharged ? ...do relax */
break; break;
} }
spin_unlock_irqrestore(&mz->lru_lock, flags); spin_unlock_irqrestore(&mz->lru_lock, flags);
if (!list_empty(list)) { if (!list_empty(list)) {
cond_resched(); cond_resched();
goto retry; goto retry;
} }
return;
} }
/* /*
* make mem_cgroup's charge to be 0 if there is no task. * make mem_cgroup's charge to be 0 if there is no task.
* This enables deleting this mem_cgroup. * This enables deleting this mem_cgroup.
*/ */
int mem_cgroup_force_empty(struct mem_cgroup *mem) int mem_cgroup_force_empty(struct mem_cgroup *mem)
{ {
int ret = -EBUSY; int ret = -EBUSY;
int node, zid; int node, zid;
css_get(&mem->css); css_get(&mem->css);
/* /*
* page reclaim code (kswapd etc..) will move pages between * page reclaim code (kswapd etc..) will move pages between
` * active_list <-> inactive_list while we don't take a lock. * active_list <-> inactive_list while we don't take a lock.
* So, we have to do loop here until all lists are empty. * So, we have to do loop here until all lists are empty.
*/ */
while (mem->res.usage > 0) { while (mem->res.usage > 0) {
...@@ -879,8 +852,6 @@ int mem_cgroup_force_empty(struct mem_cgroup *mem) ...@@ -879,8 +852,6 @@ int mem_cgroup_force_empty(struct mem_cgroup *mem)
return ret; return ret;
} }
int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
{ {
*tmp = memparse(buf, &buf); *tmp = memparse(buf, &buf);
...@@ -918,8 +889,7 @@ static ssize_t mem_force_empty_write(struct cgroup *cont, ...@@ -918,8 +889,7 @@ static ssize_t mem_force_empty_write(struct cgroup *cont,
size_t nbytes, loff_t *ppos) size_t nbytes, loff_t *ppos)
{ {
struct mem_cgroup *mem = mem_cgroup_from_cont(cont); struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
int ret; int ret = mem_cgroup_force_empty(mem);
ret = mem_cgroup_force_empty(mem);
if (!ret) if (!ret)
ret = nbytes; ret = nbytes;
return ret; return ret;
...@@ -928,7 +898,6 @@ static ssize_t mem_force_empty_write(struct cgroup *cont, ...@@ -928,7 +898,6 @@ static ssize_t mem_force_empty_write(struct cgroup *cont,
/* /*
* Note: This should be removed if cgroup supports write-only file. * Note: This should be removed if cgroup supports write-only file.
*/ */
static ssize_t mem_force_empty_read(struct cgroup *cont, static ssize_t mem_force_empty_read(struct cgroup *cont,
struct cftype *cft, struct cftype *cft,
struct file *file, char __user *userbuf, struct file *file, char __user *userbuf,
...@@ -937,7 +906,6 @@ static ssize_t mem_force_empty_read(struct cgroup *cont, ...@@ -937,7 +906,6 @@ static ssize_t mem_force_empty_read(struct cgroup *cont,
return -EINVAL; return -EINVAL;
} }
static const struct mem_cgroup_stat_desc { static const struct mem_cgroup_stat_desc {
const char *msg; const char *msg;
u64 unit; u64 unit;
...@@ -990,8 +958,6 @@ static int mem_control_stat_open(struct inode *unused, struct file *file) ...@@ -990,8 +958,6 @@ static int mem_control_stat_open(struct inode *unused, struct file *file)
return single_open(file, mem_control_stat_show, cont); return single_open(file, mem_control_stat_show, cont);
} }
static struct cftype mem_cgroup_files[] = { static struct cftype mem_cgroup_files[] = {
{ {
.name = "usage_in_bytes", .name = "usage_in_bytes",
...@@ -1057,9 +1023,6 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) ...@@ -1057,9 +1023,6 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]); kfree(mem->info.nodeinfo[node]);
} }
static struct mem_cgroup init_mem_cgroup;
static struct cgroup_subsys_state * static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{ {
...@@ -1149,7 +1112,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, ...@@ -1149,7 +1112,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
out: out:
mmput(mm); mmput(mm);
return;
} }
struct cgroup_subsys mem_cgroup_subsys = { struct cgroup_subsys mem_cgroup_subsys = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment