Commit dd6eecb9 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds

mempolicy: unexport get_vma_policy() and remove its "task" arg

- get_vma_policy(task) is not safe if task != current, remove this
  argument.

- get_vma_policy() no longer has callers outside of mempolicy.c,
  make it static.
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2c7c3a7d
...@@ -137,8 +137,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, ...@@ -137,8 +137,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr); unsigned long addr);
struct mempolicy *get_vma_policy(struct task_struct *tsk,
struct vm_area_struct *vma, unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma); bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void); extern void numa_default_policy(void);
......
...@@ -1616,27 +1616,24 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, ...@@ -1616,27 +1616,24 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
} }
/* /*
* get_vma_policy(@task, @vma, @addr) * get_vma_policy(@vma, @addr)
* @task: task for fallback if vma policy == default
* @vma: virtual memory area whose policy is sought * @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup * @addr: address in @vma for shared policy lookup
* *
* Returns effective policy for a VMA at specified address. * Returns effective policy for a VMA at specified address.
* Falls back to @task or system default policy, as necessary. * Falls back to current->mempolicy or system default policy, as necessary.
* Current or other task's task mempolicy and non-shared vma policies must be
* protected by task_lock(task) by the caller.
* Shared policies [those marked as MPOL_F_SHARED] require an extra reference * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
* count--added by the get_policy() vm_op, as appropriate--to protect against * count--added by the get_policy() vm_op, as appropriate--to protect against
* freeing by another task. It is the caller's responsibility to free the * freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies. * extra reference for shared policies.
*/ */
struct mempolicy *get_vma_policy(struct task_struct *task, static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
struct vm_area_struct *vma, unsigned long addr) unsigned long addr)
{ {
struct mempolicy *pol = __get_vma_policy(vma, addr); struct mempolicy *pol = __get_vma_policy(vma, addr);
if (!pol) if (!pol)
pol = get_task_policy(task); pol = get_task_policy(current);
return pol; return pol;
} }
...@@ -1864,7 +1861,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, ...@@ -1864,7 +1861,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
{ {
struct zonelist *zl; struct zonelist *zl;
*mpol = get_vma_policy(current, vma, addr); *mpol = get_vma_policy(vma, addr);
*nodemask = NULL; /* assume !MPOL_BIND */ *nodemask = NULL; /* assume !MPOL_BIND */
if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
...@@ -2019,7 +2016,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, ...@@ -2019,7 +2016,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
retry_cpuset: retry_cpuset:
pol = get_vma_policy(current, vma, addr); pol = get_vma_policy(vma, addr);
cpuset_mems_cookie = read_mems_allowed_begin(); cpuset_mems_cookie = read_mems_allowed_begin();
if (unlikely(pol->mode == MPOL_INTERLEAVE)) { if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
...@@ -2285,7 +2282,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long ...@@ -2285,7 +2282,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
BUG_ON(!vma); BUG_ON(!vma);
pol = get_vma_policy(current, vma, addr); pol = get_vma_policy(vma, addr);
if (!(pol->flags & MPOL_F_MOF)) if (!(pol->flags & MPOL_F_MOF))
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment