Commit b45e2da6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "10 patches.

  Subsystems affected by this patch series: MAINTAINERS and mm (slub,
  pagealloc, memcg, kasan, vmalloc, migration, hugetlb, memory-failure,
  and process_vm_access)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/process_vm_access.c: include compat.h
  mm,hwpoison: fix printing of page flags
  MAINTAINERS: add Vlastimil as slab allocators maintainer
  mm/hugetlb: fix potential missing huge page size info
  mm: migrate: initialize err in do_migrate_pages
  mm/vmalloc.c: fix potential memory leak
  arm/kasan: fix the array size of kasan_early_shadow_pte[]
  mm/memcontrol: fix warning in mem_cgroup_page_lruvec()
  mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint
  mm, slub: consider rest of partial list if acquire_slab() fails
parents 8cbe71e7 eb351d75
...@@ -16313,6 +16313,7 @@ M: Pekka Enberg <penberg@kernel.org> ...@@ -16313,6 +16313,7 @@ M: Pekka Enberg <penberg@kernel.org>
M: David Rientjes <rientjes@google.com> M: David Rientjes <rientjes@google.com>
M: Joonsoo Kim <iamjoonsoo.kim@lge.com> M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org> M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: include/linux/sl?b*.h F: include/linux/sl?b*.h
......
...@@ -35,8 +35,12 @@ struct kunit_kasan_expectation { ...@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
#define KASAN_SHADOW_INIT 0 #define KASAN_SHADOW_INIT 0
#endif #endif
#ifndef PTE_HWTABLE_PTRS
#define PTE_HWTABLE_PTRS 0
#endif
extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD]; extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD]; extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
......
...@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, ...@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
{ {
struct mem_cgroup *memcg = page_memcg(page); struct mem_cgroup *memcg = page_memcg(page);
VM_WARN_ON_ONCE_PAGE(!memcg, page); VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
return mem_cgroup_lruvec(memcg, pgdat); return mem_cgroup_lruvec(memcg, pgdat);
} }
......
...@@ -4371,7 +4371,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ...@@ -4371,7 +4371,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
* So we need to block hugepage fault by PG_hwpoison bit check. * So we need to block hugepage fault by PG_hwpoison bit check.
*/ */
if (unlikely(PageHWPoison(page))) { if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON | ret = VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h)); VM_FAULT_SET_HINDEX(hstate_index(h));
goto backout_unlocked; goto backout_unlocked;
} }
......
...@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud) ...@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
return false; return false;
} }
#endif #endif
pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss; pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
__page_aligned_bss;
static inline bool kasan_pte_table(pmd_t pmd) static inline bool kasan_pte_table(pmd_t pmd)
{ {
......
...@@ -1940,7 +1940,7 @@ int soft_offline_page(unsigned long pfn, int flags) ...@@ -1940,7 +1940,7 @@ int soft_offline_page(unsigned long pfn, int flags)
goto retry; goto retry;
} }
} else if (ret == -EIO) { } else if (ret == -EIO) {
pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n", pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
__func__, pfn, page->flags, &page->flags); __func__, pfn, page->flags, &page->flags);
} }
......
...@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, ...@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags) const nodemask_t *to, int flags)
{ {
int busy = 0; int busy = 0;
int err; int err = 0;
nodemask_t tmp; nodemask_t tmp;
migrate_prep(); migrate_prep();
......
...@@ -2862,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, ...@@ -2862,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
{ {
struct page *page; struct page *page;
#ifdef CONFIG_CMA if (IS_ENABLED(CONFIG_CMA)) {
/* /*
* Balance movable allocations between regular and CMA areas by * Balance movable allocations between regular and CMA areas by
* allocating from CMA when over half of the zone's free memory * allocating from CMA when over half of the zone's free memory
* is in the CMA area. * is in the CMA area.
*/ */
if (alloc_flags & ALLOC_CMA && if (alloc_flags & ALLOC_CMA &&
zone_page_state(zone, NR_FREE_CMA_PAGES) > zone_page_state(zone, NR_FREE_CMA_PAGES) >
zone_page_state(zone, NR_FREE_PAGES) / 2) { zone_page_state(zone, NR_FREE_PAGES) / 2) {
page = __rmqueue_cma_fallback(zone, order); page = __rmqueue_cma_fallback(zone, order);
if (page) if (page)
return page; goto out;
}
} }
#endif
retry: retry:
page = __rmqueue_smallest(zone, order, migratetype); page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) { if (unlikely(!page)) {
...@@ -2886,8 +2886,9 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, ...@@ -2886,8 +2886,9 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
alloc_flags)) alloc_flags))
goto retry; goto retry;
} }
out:
trace_mm_page_alloc_zone_locked(page, order, migratetype); if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page; return page;
} }
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/compat.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
......
...@@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, ...@@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
t = acquire_slab(s, n, page, object == NULL, &objects); t = acquire_slab(s, n, page, object == NULL, &objects);
if (!t) if (!t)
break; continue; /* cmpxchg raced */
available += objects; available += objects;
if (!object) { if (!object) {
......
...@@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count, ...@@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count,
return NULL; return NULL;
} }
if (flags & VM_MAP_PUT_PAGES) if (flags & VM_MAP_PUT_PAGES) {
area->pages = pages; area->pages = pages;
area->nr_pages = count;
}
return area->addr; return area->addr;
} }
EXPORT_SYMBOL(vmap); EXPORT_SYMBOL(vmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment