Commit 6e48a966 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka

mm/kasan: Convert to struct folio and struct slab

KASAN accesses some slab related struct page fields so we need to
convert it to struct slab. Some places are a bit simplified thanks to
kasan_addr_to_slab() encapsulating the PageSlab flag check through
virt_to_slab().  When resolving object address to either a real slab or
a large kmalloc, use struct folio as the intermediate type for testing
the slab flag to avoid unnecessary implicit compound_head().

[ vbabka@suse.cz: use struct folio, adjust to differences in previous
  patches ]
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarAndrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Tested-by: default avatarHyeongogn Yoo <42.hyeyoo@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: <kasan-dev@googlegroups.com>
parent 50757018
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
struct kmem_cache; struct kmem_cache;
struct page; struct page;
struct slab;
struct vm_struct; struct vm_struct;
struct task_struct; struct task_struct;
...@@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) ...@@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
return 0; return 0;
} }
void __kasan_poison_slab(struct page *page); void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct page *page) static __always_inline void kasan_poison_slab(struct slab *slab)
{ {
if (kasan_enabled()) if (kasan_enabled())
__kasan_poison_slab(page); __kasan_poison_slab(slab);
} }
void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
...@@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache, ...@@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
slab_flags_t *flags) {} slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {} static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache, static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {} void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache, static inline void kasan_poison_object_data(struct kmem_cache *cache,
......
...@@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, ...@@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
} }
#endif #endif
void __kasan_poison_slab(struct page *page) void __kasan_poison_slab(struct slab *slab)
{ {
struct page *page = slab_page(slab);
unsigned long i; unsigned long i;
for (i = 0; i < compound_nr(page); i++) for (i = 0; i < compound_nr(page); i++)
...@@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip) ...@@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
void __kasan_slab_free_mempool(void *ptr, unsigned long ip) void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
{ {
struct page *page; struct folio *folio;
page = virt_to_head_page(ptr); folio = virt_to_folio(ptr);
/* /*
* Even though this function is only called for kmem_cache_alloc and * Even though this function is only called for kmem_cache_alloc and
...@@ -411,12 +412,14 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip) ...@@ -411,12 +412,14 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
* !PageSlab() when the size provided to kmalloc is larger than * !PageSlab() when the size provided to kmalloc is larger than
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc. * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
*/ */
if (unlikely(!PageSlab(page))) { if (unlikely(!folio_test_slab(folio))) {
if (____kasan_kfree_large(ptr, ip)) if (____kasan_kfree_large(ptr, ip))
return; return;
kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false); kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
} else { } else {
____kasan_slab_free(page->slab_cache, ptr, ip, false, false); struct slab *slab = folio_slab(folio);
____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
} }
} }
...@@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size, ...@@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags) void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
{ {
struct page *page; struct slab *slab;
if (unlikely(object == ZERO_SIZE_PTR)) if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object; return (void *)object;
...@@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag ...@@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
*/ */
kasan_unpoison(object, size, false); kasan_unpoison(object, size, false);
page = virt_to_head_page(object); slab = virt_to_slab(object);
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */ /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!PageSlab(page))) if (unlikely(!slab))
return __kasan_kmalloc_large(object, size, flags); return __kasan_kmalloc_large(object, size, flags);
else else
return ____kasan_kmalloc(page->slab_cache, object, size, flags); return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
} }
bool __kasan_check_byte(const void *address, unsigned long ip) bool __kasan_check_byte(const void *address, unsigned long ip)
......
...@@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8); ...@@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8);
static void __kasan_record_aux_stack(void *addr, bool can_alloc) static void __kasan_record_aux_stack(void *addr, bool can_alloc)
{ {
struct page *page = kasan_addr_to_page(addr); struct slab *slab = kasan_addr_to_slab(addr);
struct kmem_cache *cache; struct kmem_cache *cache;
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
void *object; void *object;
if (is_kfence_address(addr) || !(page && PageSlab(page))) if (is_kfence_address(addr) || !slab)
return; return;
cache = page->slab_cache; cache = slab->slab_cache;
object = nearest_obj(cache, page_slab(page), addr); object = nearest_obj(cache, slab, addr);
alloc_meta = kasan_get_alloc_meta(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
if (!alloc_meta) if (!alloc_meta)
return; return;
......
...@@ -265,6 +265,7 @@ bool kasan_report(unsigned long addr, size_t size, ...@@ -265,6 +265,7 @@ bool kasan_report(unsigned long addr, size_t size,
void kasan_report_invalid_free(void *object, unsigned long ip); void kasan_report_invalid_free(void *object, unsigned long ip);
struct page *kasan_addr_to_page(const void *addr); struct page *kasan_addr_to_page(const void *addr);
struct slab *kasan_addr_to_slab(const void *addr);
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc); depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
void kasan_set_track(struct kasan_track *track, gfp_t flags); void kasan_set_track(struct kasan_track *track, gfp_t flags);
......
...@@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size; ...@@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size;
static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink) static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
{ {
return virt_to_head_page(qlink)->slab_cache; return virt_to_slab(qlink)->slab_cache;
} }
static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
......
...@@ -150,6 +150,14 @@ struct page *kasan_addr_to_page(const void *addr) ...@@ -150,6 +150,14 @@ struct page *kasan_addr_to_page(const void *addr)
return NULL; return NULL;
} }
struct slab *kasan_addr_to_slab(const void *addr)
{
if ((addr >= (void *)PAGE_OFFSET) &&
(addr < high_memory))
return virt_to_slab(addr);
return NULL;
}
static void describe_object_addr(struct kmem_cache *cache, void *object, static void describe_object_addr(struct kmem_cache *cache, void *object,
const void *addr) const void *addr)
{ {
...@@ -248,8 +256,9 @@ static void print_address_description(void *addr, u8 tag) ...@@ -248,8 +256,9 @@ static void print_address_description(void *addr, u8 tag)
pr_err("\n"); pr_err("\n");
if (page && PageSlab(page)) { if (page && PageSlab(page)) {
struct kmem_cache *cache = page->slab_cache; struct slab *slab = page_slab(page);
void *object = nearest_obj(cache, page_slab(page), addr); struct kmem_cache *cache = slab->slab_cache;
void *object = nearest_obj(cache, slab, addr);
describe_object(cache, object, addr, tag); describe_object(cache, object, addr, tag);
} }
......
...@@ -12,7 +12,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info) ...@@ -12,7 +12,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
#ifdef CONFIG_KASAN_TAGS_IDENTIFY #ifdef CONFIG_KASAN_TAGS_IDENTIFY
struct kasan_alloc_meta *alloc_meta; struct kasan_alloc_meta *alloc_meta;
struct kmem_cache *cache; struct kmem_cache *cache;
struct page *page; struct slab *slab;
const void *addr; const void *addr;
void *object; void *object;
u8 tag; u8 tag;
...@@ -20,10 +20,10 @@ const char *kasan_get_bug_type(struct kasan_access_info *info) ...@@ -20,10 +20,10 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
tag = get_tag(info->access_addr); tag = get_tag(info->access_addr);
addr = kasan_reset_tag(info->access_addr); addr = kasan_reset_tag(info->access_addr);
page = kasan_addr_to_page(addr); slab = kasan_addr_to_slab(addr);
if (page && PageSlab(page)) { if (slab) {
cache = page->slab_cache; cache = slab->slab_cache;
object = nearest_obj(cache, page_slab(page), (void *)addr); object = nearest_obj(cache, slab, (void *)addr);
alloc_meta = kasan_get_alloc_meta(cache, object); alloc_meta = kasan_get_alloc_meta(cache, object);
if (alloc_meta) { if (alloc_meta) {
......
...@@ -2604,7 +2604,7 @@ static struct slab *cache_grow_begin(struct kmem_cache *cachep, ...@@ -2604,7 +2604,7 @@ static struct slab *cache_grow_begin(struct kmem_cache *cachep,
* page_address() in the latter returns a non-tagged pointer, * page_address() in the latter returns a non-tagged pointer,
* as it should be for slab pages. * as it should be for slab pages.
*/ */
kasan_poison_slab(slab_page(slab)); kasan_poison_slab(slab);
/* Get slab management. */ /* Get slab management. */
freelist = alloc_slabmgmt(cachep, slab, offset, freelist = alloc_slabmgmt(cachep, slab, offset,
......
...@@ -1961,7 +1961,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) ...@@ -1961,7 +1961,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
slab->slab_cache = s; slab->slab_cache = s;
kasan_poison_slab(slab_page(slab)); kasan_poison_slab(slab);
start = slab_address(slab); start = slab_address(slab);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment