Commit 1a08ae36 authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds

mm cma: rename PF_MEMALLOC_NOCMA to PF_MEMALLOC_PIN

PF_MEMALLOC_NOCMA is used ot guarantee that the allocator will not
return pages that might belong to CMA region.  This is currently used
for long term gup to make sure that such pins are not going to be done
on any CMA pages.

When PF_MEMALLOC_NOCMA has been introduced we haven't realized that it
is focusing on CMA pages too much and that there is larger class of
pages that need the same treatment.  MOVABLE zone cannot contain any
long term pins as well so it makes sense to reuse and redefine this flag
for that usecase as well.  Rename the flag to PF_MEMALLOC_PIN which
defines an allocation context which can only get pages suitable for
long-term pins.

Also rename: memalloc_nocma_save()/memalloc_nocma_restore to
memalloc_pin_save()/memalloc_pin_restore() and make the new functions
common.

[rppt@linux.ibm.com: fix renaming of PF_MEMALLOC_NOCMA to PF_MEMALLOC_PIN]
  Link: https://lkml.kernel.org/r/20210331163816.11517-1-rppt@kernel.org

Link: https://lkml.kernel.org/r/20210215161349.246722-6-pasha.tatashin@soleen.comSigned-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: James Morris <jmorris@namei.org>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Tyler Hicks <tyhicks@linux.microsoft.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6e7f34eb
...@@ -1583,7 +1583,7 @@ extern struct pid *cad_pid; ...@@ -1583,7 +1583,7 @@ extern struct pid *cad_pid;
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
......
...@@ -271,29 +271,18 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) ...@@ -271,29 +271,18 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags; current->flags = (current->flags & ~PF_MEMALLOC) | flags;
} }
#ifdef CONFIG_CMA static inline unsigned int memalloc_pin_save(void)
static inline unsigned int memalloc_nocma_save(void)
{ {
unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; unsigned int flags = current->flags & PF_MEMALLOC_PIN;
current->flags |= PF_MEMALLOC_NOCMA; current->flags |= PF_MEMALLOC_PIN;
return flags; return flags;
} }
static inline void memalloc_nocma_restore(unsigned int flags) static inline void memalloc_pin_restore(unsigned int flags)
{ {
current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
} }
#else
static inline unsigned int memalloc_nocma_save(void)
{
return 0;
}
static inline void memalloc_nocma_restore(unsigned int flags)
{
}
#endif
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
......
...@@ -1722,7 +1722,7 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -1722,7 +1722,7 @@ static long __gup_longterm_locked(struct mm_struct *mm,
long rc; long rc;
if (gup_flags & FOLL_LONGTERM) if (gup_flags & FOLL_LONGTERM)
flags = memalloc_nocma_save(); flags = memalloc_pin_save();
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
gup_flags); gup_flags);
...@@ -1731,7 +1731,7 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -1731,7 +1731,7 @@ static long __gup_longterm_locked(struct mm_struct *mm,
if (rc > 0) if (rc > 0)
rc = check_and_migrate_cma_pages(mm, start, rc, pages, rc = check_and_migrate_cma_pages(mm, start, rc, pages,
vmas, gup_flags); vmas, gup_flags);
memalloc_nocma_restore(flags); memalloc_pin_restore(flags);
} }
return rc; return rc;
} }
......
...@@ -1079,11 +1079,11 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) ...@@ -1079,11 +1079,11 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
{ {
struct page *page; struct page *page;
bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA); bool pin = !!(current->flags & PF_MEMALLOC_PIN);
lockdep_assert_held(&hugetlb_lock); lockdep_assert_held(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
if (nocma && is_migrate_cma_page(page)) if (pin && is_migrate_cma_page(page))
continue; continue;
if (PageHWPoison(page)) if (PageHWPoison(page))
......
...@@ -3865,7 +3865,7 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask, ...@@ -3865,7 +3865,7 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
unsigned int pflags = current->flags; unsigned int pflags = current->flags;
if (!(pflags & PF_MEMALLOC_NOCMA) && if (!(pflags & PF_MEMALLOC_PIN) &&
gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA; alloc_flags |= ALLOC_CMA;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment