Commit 1f1183c4 authored by Andrew Morton's avatar Andrew Morton

merge mm-hotfixes-stable into mm-nonmm-stable to pick up stackdepot changes

parents 7d8cebb9 720da1e5
...@@ -14111,6 +14111,17 @@ F: mm/ ...@@ -14111,6 +14111,17 @@ F: mm/
F: tools/mm/ F: tools/mm/
F: tools/testing/selftests/mm/ F: tools/testing/selftests/mm/
MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org>
R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz>
R: Lorenzo Stoakes <lstoakes@gmail.com>
L: linux-mm@kvack.org
S: Maintained
W: http://www.linux-mm.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
F: mm/mmap.c
MEMORY TECHNOLOGY DEVICES (MTD) MEMORY TECHNOLOGY DEVICES (MTD)
M: Miquel Raynal <miquel.raynal@bootlin.com> M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at> M: Richard Weinberger <richard@nod.at>
......
...@@ -92,4 +92,7 @@ ...@@ -92,4 +92,7 @@
/********** VFS **********/ /********** VFS **********/
#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA)) #define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
/********** lib/stackdepot.c **********/
#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
#endif #endif
This diff is collapsed.
...@@ -362,6 +362,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args) ...@@ -362,6 +362,12 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
vaddr &= HPAGE_PUD_MASK; vaddr &= HPAGE_PUD_MASK;
pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pfn_pud(args->pud_pfn, args->page_prot);
/*
* Some architectures have debug checks to make sure
* huge pud mapping are only found with devmap entries
* For now test with only devmap entries.
*/
pud = pud_mkdevmap(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud); set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page); flush_dcache_page(page);
pudp_set_wrprotect(args->mm, vaddr, args->pudp); pudp_set_wrprotect(args->mm, vaddr, args->pudp);
...@@ -374,6 +380,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args) ...@@ -374,6 +380,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
WARN_ON(!pud_none(pud)); WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pfn_pud(args->pud_pfn, args->page_prot);
pud = pud_mkdevmap(pud);
pud = pud_wrprotect(pud); pud = pud_wrprotect(pud);
pud = pud_mkclean(pud); pud = pud_mkclean(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud); set_pud_at(args->mm, vaddr, args->pudp, pud);
...@@ -391,6 +398,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args) ...@@ -391,6 +398,7 @@ static void __init pud_advanced_tests(struct pgtable_debug_args *args)
#endif /* __PAGETABLE_PMD_FOLDED */ #endif /* __PAGETABLE_PMD_FOLDED */
pud = pfn_pud(args->pud_pfn, args->page_prot); pud = pfn_pud(args->pud_pfn, args->page_prot);
pud = pud_mkdevmap(pud);
pud = pud_mkyoung(pud); pud = pud_mkyoung(pud);
set_pud_at(args->mm, vaddr, args->pudp, pud); set_pud_at(args->mm, vaddr, args->pudp, pud);
flush_dcache_page(page); flush_dcache_page(page);
......
...@@ -4111,28 +4111,40 @@ static void filemap_cachestat(struct address_space *mapping, ...@@ -4111,28 +4111,40 @@ static void filemap_cachestat(struct address_space *mapping,
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, folio, last_index) { xas_for_each(&xas, folio, last_index) {
int order;
unsigned long nr_pages; unsigned long nr_pages;
pgoff_t folio_first_index, folio_last_index; pgoff_t folio_first_index, folio_last_index;
/*
* Don't deref the folio. It is not pinned, and might
* get freed (and reused) underneath us.
*
* We *could* pin it, but that would be expensive for
* what should be a fast and lightweight syscall.
*
* Instead, derive all information of interest from
* the rcu-protected xarray.
*/
if (xas_retry(&xas, folio)) if (xas_retry(&xas, folio))
continue; continue;
order = xa_get_order(xas.xa, xas.xa_index);
nr_pages = 1 << order;
folio_first_index = round_down(xas.xa_index, 1 << order);
folio_last_index = folio_first_index + nr_pages - 1;
/* Folios might straddle the range boundaries, only count covered pages */
if (folio_first_index < first_index)
nr_pages -= first_index - folio_first_index;
if (folio_last_index > last_index)
nr_pages -= folio_last_index - last_index;
if (xa_is_value(folio)) { if (xa_is_value(folio)) {
/* page is evicted */ /* page is evicted */
void *shadow = (void *)folio; void *shadow = (void *)folio;
bool workingset; /* not used */ bool workingset; /* not used */
int order = xa_get_order(xas.xa, xas.xa_index);
nr_pages = 1 << order;
folio_first_index = round_down(xas.xa_index, 1 << order);
folio_last_index = folio_first_index + nr_pages - 1;
/* Folios might straddle the range boundaries, only count covered pages */
if (folio_first_index < first_index)
nr_pages -= first_index - folio_first_index;
if (folio_last_index > last_index)
nr_pages -= folio_last_index - last_index;
cs->nr_evicted += nr_pages; cs->nr_evicted += nr_pages;
...@@ -4150,24 +4162,13 @@ static void filemap_cachestat(struct address_space *mapping, ...@@ -4150,24 +4162,13 @@ static void filemap_cachestat(struct address_space *mapping,
goto resched; goto resched;
} }
nr_pages = folio_nr_pages(folio);
folio_first_index = folio_pgoff(folio);
folio_last_index = folio_first_index + nr_pages - 1;
/* Folios might straddle the range boundaries, only count covered pages */
if (folio_first_index < first_index)
nr_pages -= first_index - folio_first_index;
if (folio_last_index > last_index)
nr_pages -= folio_last_index - last_index;
/* page is in cache */ /* page is in cache */
cs->nr_cache += nr_pages; cs->nr_cache += nr_pages;
if (folio_test_dirty(folio)) if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
cs->nr_dirty += nr_pages; cs->nr_dirty += nr_pages;
if (folio_test_writeback(folio)) if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
cs->nr_writeback += nr_pages; cs->nr_writeback += nr_pages;
resched: resched:
......
...@@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags) ...@@ -65,8 +65,7 @@ void kasan_save_track(struct kasan_track *track, gfp_t flags)
{ {
depot_stack_handle_t stack; depot_stack_handle_t stack;
stack = kasan_save_stack(flags, stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
kasan_set_track(track, stack); kasan_set_track(track, stack);
} }
...@@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, ...@@ -266,10 +265,9 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true; return true;
/* /*
* If the object is not put into quarantine, it will likely be quickly * Note: Keep per-object metadata to allow KASAN print stack traces for
* reallocated. Thus, release its metadata now. * use-after-free-before-realloc bugs.
*/ */
kasan_release_object_meta(cache, object);
/* Let slab put the object onto the freelist. */ /* Let slab put the object onto the freelist. */
return false; return false;
......
...@@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) ...@@ -485,16 +485,6 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
if (alloc_meta) { if (alloc_meta) {
/* Zero out alloc meta to mark it as invalid. */ /* Zero out alloc meta to mark it as invalid. */
__memset(alloc_meta, 0, sizeof(*alloc_meta)); __memset(alloc_meta, 0, sizeof(*alloc_meta));
/*
* Prepare the lock for saving auxiliary stack traces.
* Temporarily disable KASAN bug reporting to allow instrumented
* raw_spin_lock_init to access aux_lock, which resides inside
* of a redzone.
*/
kasan_disable_current();
raw_spin_lock_init(&alloc_meta->aux_lock);
kasan_enable_current();
} }
/* /*
...@@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object) ...@@ -506,18 +496,8 @@ void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
static void release_alloc_meta(struct kasan_alloc_meta *meta) static void release_alloc_meta(struct kasan_alloc_meta *meta)
{ {
/* Evict the stack traces from stack depot. */ /* Zero out alloc meta to mark it as invalid. */
stack_depot_put(meta->alloc_track.stack); __memset(meta, 0, sizeof(*meta));
stack_depot_put(meta->aux_stack[0]);
stack_depot_put(meta->aux_stack[1]);
/*
* Zero out alloc meta to mark it as invalid but keep aux_lock
* initialized to avoid having to reinitialize it when another object
* is allocated in the same slot.
*/
__memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
__memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
} }
static void release_free_meta(const void *object, struct kasan_free_meta *meta) static void release_free_meta(const void *object, struct kasan_free_meta *meta)
...@@ -529,27 +509,10 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta) ...@@ -529,27 +509,10 @@ static void release_free_meta(const void *object, struct kasan_free_meta *meta)
if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META) if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
return; return;
/* Evict the stack trace from the stack depot. */
stack_depot_put(meta->free_track.stack);
/* Mark free meta as invalid. */ /* Mark free meta as invalid. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
} }
void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
{
struct kasan_alloc_meta *alloc_meta;
struct kasan_free_meta *free_meta;
alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta)
release_alloc_meta(alloc_meta);
free_meta = kasan_get_free_meta(cache, object);
if (free_meta)
release_free_meta(object, free_meta);
}
size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
{ {
struct kasan_cache *info = &cache->kasan_info; struct kasan_cache *info = &cache->kasan_info;
...@@ -574,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) ...@@ -574,8 +537,6 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
struct kmem_cache *cache; struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
void *object; void *object;
depot_stack_handle_t new_handle, old_handle;
unsigned long flags;
if (is_kfence_address(addr) || !slab) if (is_kfence_address(addr) || !slab)
return; return;
...@@ -586,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) ...@@ -586,33 +547,18 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
if (!alloc_meta) if (!alloc_meta)
return; return;
new_handle = kasan_save_stack(0, depot_flags);
/*
* Temporarily disable KASAN bug reporting to allow instrumented
* spinlock functions to access aux_lock, which resides inside of a
* redzone.
*/
kasan_disable_current();
raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
old_handle = alloc_meta->aux_stack[1];
alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
alloc_meta->aux_stack[0] = new_handle; alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
kasan_enable_current();
stack_depot_put(old_handle);
} }
void kasan_record_aux_stack(void *addr) void kasan_record_aux_stack(void *addr)
{ {
return __kasan_record_aux_stack(addr, return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
} }
void kasan_record_aux_stack_noalloc(void *addr) void kasan_record_aux_stack_noalloc(void *addr)
{ {
return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET); return __kasan_record_aux_stack(addr, 0);
} }
void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
...@@ -623,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) ...@@ -623,7 +569,7 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
if (!alloc_meta) if (!alloc_meta)
return; return;
/* Evict previous stack traces (might exist for krealloc or mempool). */ /* Invalidate previous stack traces (might exist for krealloc or mempool). */
release_alloc_meta(alloc_meta); release_alloc_meta(alloc_meta);
kasan_save_track(&alloc_meta->alloc_track, flags); kasan_save_track(&alloc_meta->alloc_track, flags);
...@@ -637,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) ...@@ -637,7 +583,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object)
if (!free_meta) if (!free_meta)
return; return;
/* Evict previous stack trace (might exist for mempool). */ /* Invalidate previous stack trace (might exist for mempool). */
release_free_meta(object, free_meta); release_free_meta(object, free_meta);
kasan_save_track(&free_meta->free_track, 0); kasan_save_track(&free_meta->free_track, 0);
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kasan-tags.h> #include <linux/kasan-tags.h>
#include <linux/kfence.h> #include <linux/kfence.h>
#include <linux/spinlock.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
...@@ -265,13 +264,6 @@ struct kasan_global { ...@@ -265,13 +264,6 @@ struct kasan_global {
struct kasan_alloc_meta { struct kasan_alloc_meta {
struct kasan_track alloc_track; struct kasan_track alloc_track;
/* Free track is stored in kasan_free_meta. */ /* Free track is stored in kasan_free_meta. */
/*
* aux_lock protects aux_stack from accesses from concurrent
* kasan_record_aux_stack calls. It is a raw spinlock to avoid sleeping
* on RT kernels, as kasan_record_aux_stack_noalloc can be called from
* non-sleepable contexts.
*/
raw_spinlock_t aux_lock;
depot_stack_handle_t aux_stack[2]; depot_stack_handle_t aux_stack[2];
}; };
...@@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, ...@@ -398,10 +390,8 @@ struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
const void *object); const void *object);
void kasan_init_object_meta(struct kmem_cache *cache, const void *object); void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
void kasan_release_object_meta(struct kmem_cache *cache, const void *object);
#else #else
static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { } static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
static inline void kasan_release_object_meta(struct kmem_cache *cache, const void *object) { }
#endif #endif
depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags); depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags);
......
...@@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) ...@@ -145,7 +145,10 @@ static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
void *object = qlink_to_object(qlink, cache); void *object = qlink_to_object(qlink, cache);
struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object); struct kasan_free_meta *free_meta = kasan_get_free_meta(cache, object);
kasan_release_object_meta(cache, object); /*
* Note: Keep per-object metadata to allow KASAN print stack traces for
* use-after-free-before-realloc bugs.
*/
/* /*
* If init_on_free is enabled and KASAN's free metadata is stored in * If init_on_free is enabled and KASAN's free metadata is stored in
......
...@@ -2522,6 +2522,14 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) ...@@ -2522,6 +2522,14 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
if (managed_zone(pgdat->node_zones + z)) if (managed_zone(pgdat->node_zones + z))
break; break;
} }
/*
* If there are no managed zones, it should not proceed
* further.
*/
if (z < 0)
return 0;
wakeup_kswapd(pgdat->node_zones + z, 0, wakeup_kswapd(pgdat->node_zones + z, 0,
folio_order(folio), ZONE_MOVABLE); folio_order(folio), ZONE_MOVABLE);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment