Commit d5b69e38 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

memcg: memcontrol uninlined and static

More cleanup to memcontrol.c, this time changing some of the code generated.
Let the compiler decide what to inline (except for page_cgroup_locked which is
only used when CONFIG_DEBUG_VM): the __always_inline on lock_page_cgroup etc.
was quite a waste since bit_spin_lock etc.  are inlines in a header file; made
mem_cgroup_force_empty and mem_cgroup_write_strategy static.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8869b8f6
...@@ -168,12 +168,12 @@ struct page_cgroup { ...@@ -168,12 +168,12 @@ struct page_cgroup {
#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */ #define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */ #define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
static inline int page_cgroup_nid(struct page_cgroup *pc) static int page_cgroup_nid(struct page_cgroup *pc)
{ {
return page_to_nid(pc->page); return page_to_nid(pc->page);
} }
static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc) static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
{ {
return page_zonenum(pc->page); return page_zonenum(pc->page);
} }
...@@ -199,14 +199,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, ...@@ -199,14 +199,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
} }
static inline struct mem_cgroup_per_zone * static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
{ {
BUG_ON(!mem->info.nodeinfo[nid]);
return &mem->info.nodeinfo[nid]->zoneinfo[zid]; return &mem->info.nodeinfo[nid]->zoneinfo[zid];
} }
static inline struct mem_cgroup_per_zone * static struct mem_cgroup_per_zone *
page_cgroup_zoneinfo(struct page_cgroup *pc) page_cgroup_zoneinfo(struct page_cgroup *pc)
{ {
struct mem_cgroup *mem = pc->mem_cgroup; struct mem_cgroup *mem = pc->mem_cgroup;
...@@ -231,16 +230,14 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, ...@@ -231,16 +230,14 @@ static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
return total; return total;
} }
static inline static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{ {
return container_of(cgroup_subsys_state(cont, return container_of(cgroup_subsys_state(cont,
mem_cgroup_subsys_id), struct mem_cgroup, mem_cgroup_subsys_id), struct mem_cgroup,
css); css);
} }
static inline static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{ {
return container_of(task_subsys_state(p, mem_cgroup_subsys_id), return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
struct mem_cgroup, css); struct mem_cgroup, css);
...@@ -276,13 +273,12 @@ struct page_cgroup *page_get_page_cgroup(struct page *page) ...@@ -276,13 +273,12 @@ struct page_cgroup *page_get_page_cgroup(struct page *page)
return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
} }
static void __always_inline lock_page_cgroup(struct page *page) static void lock_page_cgroup(struct page *page)
{ {
bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
VM_BUG_ON(!page_cgroup_locked(page));
} }
static void __always_inline unlock_page_cgroup(struct page *page) static void unlock_page_cgroup(struct page *page)
{ {
bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
} }
...@@ -741,16 +737,14 @@ void mem_cgroup_end_migration(struct page *page) ...@@ -741,16 +737,14 @@ void mem_cgroup_end_migration(struct page *page)
void mem_cgroup_page_migration(struct page *page, struct page *newpage) void mem_cgroup_page_migration(struct page *page, struct page *newpage)
{ {
struct page_cgroup *pc; struct page_cgroup *pc;
struct mem_cgroup *mem;
unsigned long flags;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
unsigned long flags;
retry: retry:
pc = page_get_page_cgroup(page); pc = page_get_page_cgroup(page);
if (!pc) if (!pc)
return; return;
mem = pc->mem_cgroup;
mz = page_cgroup_zoneinfo(pc); mz = page_cgroup_zoneinfo(pc);
if (clear_page_cgroup(page, pc) != pc) if (clear_page_cgroup(page, pc) != pc)
goto retry; goto retry;
...@@ -822,7 +816,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, ...@@ -822,7 +816,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
* make mem_cgroup's charge to be 0 if there is no task. * make mem_cgroup's charge to be 0 if there is no task.
* This enables deleting this mem_cgroup. * This enables deleting this mem_cgroup.
*/ */
int mem_cgroup_force_empty(struct mem_cgroup *mem) static int mem_cgroup_force_empty(struct mem_cgroup *mem)
{ {
int ret = -EBUSY; int ret = -EBUSY;
int node, zid; int node, zid;
...@@ -852,7 +846,7 @@ int mem_cgroup_force_empty(struct mem_cgroup *mem) ...@@ -852,7 +846,7 @@ int mem_cgroup_force_empty(struct mem_cgroup *mem)
return ret; return ret;
} }
int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
{ {
*tmp = memparse(buf, &buf); *tmp = memparse(buf, &buf);
if (*buf != '\0') if (*buf != '\0')
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment