Commit ed8ea815 authored by Michel Lespinasse's avatar Michel Lespinasse Committed by Linus Torvalds

mm: add CONFIG_DEBUG_VM_RB build option

Add a CONFIG_DEBUG_VM_RB build option for the previously existing
DEBUG_MM_RB code.  Now that Andi Kleen modified it to avoid using
recursive algorithms, we can expose it a bit more.

Also extend this code to validate_mm() after stack expansion, and to check
that the vma's start and last pgoffs have not changed since the nodes were
inserted on the anon vma interval tree (as it is important that the nodes
be reindexed after each such update).
Signed-off-by: default avatarMichel Lespinasse <walken@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Daniel Santos <daniel.santos@pobox.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 86c2ad19
...@@ -1386,6 +1386,9 @@ struct anon_vma_chain *anon_vma_interval_tree_iter_first( ...@@ -1386,6 +1386,9 @@ struct anon_vma_chain *anon_vma_interval_tree_iter_first(
struct rb_root *root, unsigned long start, unsigned long last); struct rb_root *root, unsigned long start, unsigned long last);
struct anon_vma_chain *anon_vma_interval_tree_iter_next( struct anon_vma_chain *anon_vma_interval_tree_iter_next(
struct anon_vma_chain *node, unsigned long start, unsigned long last); struct anon_vma_chain *node, unsigned long start, unsigned long last);
#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
#endif
#define anon_vma_interval_tree_foreach(avc, root, start, last) \ #define anon_vma_interval_tree_foreach(avc, root, start, last) \
for (avc = anon_vma_interval_tree_iter_first(root, start, last); \ for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
......
...@@ -66,6 +66,9 @@ struct anon_vma_chain { ...@@ -66,6 +66,9 @@ struct anon_vma_chain {
struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
struct rb_node rb; /* locked by anon_vma->mutex */ struct rb_node rb; /* locked by anon_vma->mutex */
unsigned long rb_subtree_last; unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
unsigned long cached_vma_start, cached_vma_last;
#endif
}; };
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -798,6 +798,15 @@ config DEBUG_VM ...@@ -798,6 +798,15 @@ config DEBUG_VM
If unsure, say N. If unsure, say N.
config DEBUG_VM_RB
bool "Debug VM red-black trees"
depends on DEBUG_VM
help
Enable this to turn on more extended checks in the virtual-memory
system that may impact performance.
If unsure, say N.
config DEBUG_VIRTUAL config DEBUG_VIRTUAL
bool "Debug VM translations" bool "Debug VM translations"
depends on DEBUG_KERNEL && X86 depends on DEBUG_KERNEL && X86
......
...@@ -70,4 +70,43 @@ static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc) ...@@ -70,4 +70,43 @@ static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
} }
INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last, INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
avc_start_pgoff, avc_last_pgoff,, anon_vma_interval_tree) avc_start_pgoff, avc_last_pgoff,
static inline, __anon_vma_interval_tree)
void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root *root)
{
#ifdef CONFIG_DEBUG_VM_RB
node->cached_vma_start = avc_start_pgoff(node);
node->cached_vma_last = avc_last_pgoff(node);
#endif
__anon_vma_interval_tree_insert(node, root);
}
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
struct rb_root *root)
{
__anon_vma_interval_tree_remove(node, root);
}
struct anon_vma_chain *
anon_vma_interval_tree_iter_first(struct rb_root *root,
unsigned long first, unsigned long last)
{
return __anon_vma_interval_tree_iter_first(root, first, last);
}
struct anon_vma_chain *
anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
unsigned long first, unsigned long last)
{
return __anon_vma_interval_tree_iter_next(node, first, last);
}
#ifdef CONFIG_DEBUG_VM_RB
void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
{
WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
}
#endif
...@@ -51,12 +51,6 @@ static void unmap_region(struct mm_struct *mm, ...@@ -51,12 +51,6 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
/*
* WARNING: the debugging will use recursive algorithms so never enable this
* unless you know what you are doing.
*/
#undef DEBUG_MM_RB
/* description of effects of mapping type and prot in current implementation. /* description of effects of mapping type and prot in current implementation.
* this is due to the limited x86 page protection hardware. The expected * this is due to the limited x86 page protection hardware. The expected
* behavior is in parens: * behavior is in parens:
...@@ -303,7 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) ...@@ -303,7 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
return retval; return retval;
} }
#ifdef DEBUG_MM_RB #ifdef CONFIG_DEBUG_VM_RB
static int browse_rb(struct rb_root *root) static int browse_rb(struct rb_root *root)
{ {
int i = 0, j; int i = 0, j;
...@@ -337,9 +331,12 @@ void validate_mm(struct mm_struct *mm) ...@@ -337,9 +331,12 @@ void validate_mm(struct mm_struct *mm)
{ {
int bug = 0; int bug = 0;
int i = 0; int i = 0;
struct vm_area_struct *tmp = mm->mmap; struct vm_area_struct *vma = mm->mmap;
while (tmp) { while (vma) {
tmp = tmp->vm_next; struct anon_vma_chain *avc;
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
anon_vma_interval_tree_verify(avc);
vma = vma->vm_next;
i++; i++;
} }
if (i != mm->map_count) if (i != mm->map_count)
...@@ -1790,6 +1787,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) ...@@ -1790,6 +1787,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
} }
vma_unlock_anon_vma(vma); vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma); khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error; return error;
} }
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
...@@ -1843,6 +1841,7 @@ int expand_downwards(struct vm_area_struct *vma, ...@@ -1843,6 +1841,7 @@ int expand_downwards(struct vm_area_struct *vma,
} }
vma_unlock_anon_vma(vma); vma_unlock_anon_vma(vma);
khugepaged_enter_vma_merge(vma); khugepaged_enter_vma_merge(vma);
validate_mm(vma->vm_mm);
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment