Commit 43c4f67c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "7 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: fix false-positive WARN_ON() in truncate/invalidate for hugetlb
  kasan: support use-after-scope detection
  kasan: update kasan_global for gcc 7
  lib/debugobjects: export for use in modules
  zram: fix unbalanced idr management at hot removal
  thp: fix corner case of munlock() of PTE-mapped THPs
  mm, thp: propagation of conditional compilation in khugepaged.c
parents f513581c 5cbc198a
...@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class, ...@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class,
zram = idr_find(&zram_index_idr, dev_id); zram = idr_find(&zram_index_idr, dev_id);
if (zram) { if (zram) {
ret = zram_remove(zram); ret = zram_remove(zram);
idr_remove(&zram_index_idr, dev_id); if (!ret)
idr_remove(&zram_index_idr, dev_id);
} else { } else {
ret = -ENODEV; ret = -ENODEV;
} }
......
...@@ -263,7 +263,9 @@ ...@@ -263,7 +263,9 @@
#endif #endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 50000 #if GCC_VERSION >= 70000
#define KASAN_ABI_VERSION 5
#elif GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4 #define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902 #elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3 #define KASAN_ABI_VERSION 3
......
...@@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping, ...@@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
} }
/* /*
* Get the offset in PAGE_SIZE. * Get index of the page with in radix-tree
* (TODO: hugepage should have ->index in PAGE_SIZE) * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/ */
static inline pgoff_t page_to_pgoff(struct page *page) static inline pgoff_t page_to_index(struct page *page)
{ {
pgoff_t pgoff; pgoff_t pgoff;
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
if (likely(!PageTransTail(page))) if (likely(!PageTransTail(page)))
return page->index; return page->index;
...@@ -396,6 +393,18 @@ static inline pgoff_t page_to_pgoff(struct page *page) ...@@ -396,6 +393,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
return pgoff; return pgoff;
} }
/*
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
return page_to_index(page);
}
/* /*
* Return byte-offset into filesystem object for page. * Return byte-offset into filesystem object for page.
*/ */
......
...@@ -362,6 +362,7 @@ void debug_object_init(void *addr, struct debug_obj_descr *descr) ...@@ -362,6 +362,7 @@ void debug_object_init(void *addr, struct debug_obj_descr *descr)
__debug_object_init(addr, descr, 0); __debug_object_init(addr, descr, 0);
} }
EXPORT_SYMBOL_GPL(debug_object_init);
/** /**
* debug_object_init_on_stack - debug checks when an object on stack is * debug_object_init_on_stack - debug checks when an object on stack is
...@@ -376,6 +377,7 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) ...@@ -376,6 +377,7 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
__debug_object_init(addr, descr, 1); __debug_object_init(addr, descr, 1);
} }
EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
/** /**
* debug_object_activate - debug checks when an object is activated * debug_object_activate - debug checks when an object is activated
...@@ -449,6 +451,7 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr) ...@@ -449,6 +451,7 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(debug_object_activate);
/** /**
* debug_object_deactivate - debug checks when an object is deactivated * debug_object_deactivate - debug checks when an object is deactivated
...@@ -496,6 +499,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) ...@@ -496,6 +499,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
} }
EXPORT_SYMBOL_GPL(debug_object_deactivate);
/** /**
* debug_object_destroy - debug checks when an object is destroyed * debug_object_destroy - debug checks when an object is destroyed
...@@ -542,6 +546,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) ...@@ -542,6 +546,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
} }
EXPORT_SYMBOL_GPL(debug_object_destroy);
/** /**
* debug_object_free - debug checks when an object is freed * debug_object_free - debug checks when an object is freed
...@@ -582,6 +587,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) ...@@ -582,6 +587,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
} }
EXPORT_SYMBOL_GPL(debug_object_free);
/** /**
* debug_object_assert_init - debug checks when object should be init-ed * debug_object_assert_init - debug checks when object should be init-ed
...@@ -626,6 +632,7 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) ...@@ -626,6 +632,7 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
} }
EXPORT_SYMBOL_GPL(debug_object_assert_init);
/** /**
* debug_object_active_state - debug checks object usage state machine * debug_object_active_state - debug checks object usage state machine
...@@ -673,6 +680,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, ...@@ -673,6 +680,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
raw_spin_unlock_irqrestore(&db->lock, flags); raw_spin_unlock_irqrestore(&db->lock, flags);
} }
EXPORT_SYMBOL_GPL(debug_object_active_state);
#ifdef CONFIG_DEBUG_OBJECTS_FREE #ifdef CONFIG_DEBUG_OBJECTS_FREE
static void __debug_check_no_obj_freed(const void *address, unsigned long size) static void __debug_check_no_obj_freed(const void *address, unsigned long size)
......
...@@ -20,6 +20,11 @@ ...@@ -20,6 +20,11 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/module.h> #include <linux/module.h>
/*
* Note: test functions are marked noinline so that their names appear in
* reports.
*/
static noinline void __init kmalloc_oob_right(void) static noinline void __init kmalloc_oob_right(void)
{ {
char *ptr; char *ptr;
...@@ -411,6 +416,29 @@ static noinline void __init copy_user_test(void) ...@@ -411,6 +416,29 @@ static noinline void __init copy_user_test(void)
kfree(kmem); kfree(kmem);
} }
static noinline void __init use_after_scope_test(void)
{
volatile char *volatile p;
pr_info("use-after-scope on int\n");
{
int local = 0;
p = (char *)&local;
}
p[0] = 1;
p[3] = 1;
pr_info("use-after-scope on array\n");
{
char local[1024] = {0};
p = local;
}
p[0] = 1;
p[1023] = 1;
}
static int __init kmalloc_tests_init(void) static int __init kmalloc_tests_init(void)
{ {
kmalloc_oob_right(); kmalloc_oob_right();
...@@ -436,6 +464,7 @@ static int __init kmalloc_tests_init(void) ...@@ -436,6 +464,7 @@ static int __init kmalloc_tests_init(void)
kasan_global_oob(); kasan_global_oob();
ksize_unpoisons_memory(); ksize_unpoisons_memory();
copy_user_test(); copy_user_test();
use_after_scope_test();
return -EAGAIN; return -EAGAIN;
} }
......
...@@ -764,6 +764,25 @@ EXPORT_SYMBOL(__asan_storeN_noabort); ...@@ -764,6 +764,25 @@ EXPORT_SYMBOL(__asan_storeN_noabort);
void __asan_handle_no_return(void) {} void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return); EXPORT_SYMBOL(__asan_handle_no_return);
/* Emitted by compiler to poison large objects when they go out of scope. */
void __asan_poison_stack_memory(const void *addr, size_t size)
{
/*
* Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
* by redzones, so we simply round up size to simplify logic.
*/
kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
KASAN_USE_AFTER_SCOPE);
}
EXPORT_SYMBOL(__asan_poison_stack_memory);
/* Emitted by compiler to unpoison large objects when they go into scope. */
void __asan_unpoison_stack_memory(const void *addr, size_t size)
{
kasan_unpoison_shadow(addr, size);
}
EXPORT_SYMBOL(__asan_unpoison_stack_memory);
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
static int kasan_mem_notifier(struct notifier_block *nb, static int kasan_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define KASAN_STACK_MID 0xF2 #define KASAN_STACK_MID 0xF2
#define KASAN_STACK_RIGHT 0xF3 #define KASAN_STACK_RIGHT 0xF3
#define KASAN_STACK_PARTIAL 0xF4 #define KASAN_STACK_PARTIAL 0xF4
#define KASAN_USE_AFTER_SCOPE 0xF8
/* Don't break randconfig/all*config builds */ /* Don't break randconfig/all*config builds */
#ifndef KASAN_ABI_VERSION #ifndef KASAN_ABI_VERSION
...@@ -53,6 +54,9 @@ struct kasan_global { ...@@ -53,6 +54,9 @@ struct kasan_global {
#if KASAN_ABI_VERSION >= 4 #if KASAN_ABI_VERSION >= 4
struct kasan_source_location *location; struct kasan_source_location *location;
#endif #endif
#if KASAN_ABI_VERSION >= 5
char *odr_indicator;
#endif
}; };
/** /**
......
...@@ -90,6 +90,9 @@ static void print_error_description(struct kasan_access_info *info) ...@@ -90,6 +90,9 @@ static void print_error_description(struct kasan_access_info *info)
case KASAN_KMALLOC_FREE: case KASAN_KMALLOC_FREE:
bug_type = "use-after-free"; bug_type = "use-after-free";
break; break;
case KASAN_USE_AFTER_SCOPE:
bug_type = "use-after-scope";
break;
} }
pr_err("BUG: KASAN: %s in %pS at addr %p\n", pr_err("BUG: KASAN: %s in %pS at addr %p\n",
......
...@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = { ...@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = {
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
}; };
#ifdef CONFIG_SYSFS
static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
...@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = { ...@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = {
.attrs = khugepaged_attr, .attrs = khugepaged_attr,
.name = "khugepaged", .name = "khugepaged",
}; };
#endif /* CONFIG_SYSFS */
#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
......
...@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page) ...@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
*/ */
spin_lock_irq(zone_lru_lock(zone)); spin_lock_irq(zone_lru_lock(zone));
nr_pages = hpage_nr_pages(page); if (!TestClearPageMlocked(page)) {
if (!TestClearPageMlocked(page)) /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
nr_pages = 1;
goto unlock_out; goto unlock_out;
}
nr_pages = hpage_nr_pages(page);
__mod_zone_page_state(zone, NR_MLOCK, -nr_pages); __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
if (__munlock_isolate_lru_page(page, true)) { if (__munlock_isolate_lru_page(page, true)) {
......
...@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
if (!trylock_page(page)) if (!trylock_page(page))
continue; continue;
WARN_ON(page_to_pgoff(page) != index); WARN_ON(page_to_index(page) != index);
if (PageWriteback(page)) { if (PageWriteback(page)) {
unlock_page(page); unlock_page(page);
continue; continue;
...@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
} }
lock_page(page); lock_page(page);
WARN_ON(page_to_pgoff(page) != index); WARN_ON(page_to_index(page) != index);
wait_on_page_writeback(page); wait_on_page_writeback(page);
truncate_inode_page(mapping, page); truncate_inode_page(mapping, page);
unlock_page(page); unlock_page(page);
...@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
if (!trylock_page(page)) if (!trylock_page(page))
continue; continue;
WARN_ON(page_to_pgoff(page) != index); WARN_ON(page_to_index(page) != index);
/* Middle of THP: skip */ /* Middle of THP: skip */
if (PageTransTail(page)) { if (PageTransTail(page)) {
...@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
} }
lock_page(page); lock_page(page);
WARN_ON(page_to_pgoff(page) != index); WARN_ON(page_to_index(page) != index);
if (page->mapping != mapping) { if (page->mapping != mapping) {
unlock_page(page); unlock_page(page);
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment