Commit 02fdb25f authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm/mmap: change detached vma locking scheme

Don't set the lock to the mm lock so that the detached VMA tree does not
complain about being unlocked when the mmap_lock is dropped prior to
freeing the tree.

Introduce mt_on_stack() for setting the external lock to NULL only when
LOCKDEP is used.

Move the destroying of the detached tree outside the mmap lock all
together.

Link: https://lkml.kernel.org/r/20230719183142.ktgcmuj2pnlr3h3s@revolverSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oliver Sang <oliver.sang@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 134d153c
...@@ -187,10 +187,13 @@ typedef struct lockdep_map *lockdep_map_p; ...@@ -187,10 +187,13 @@ typedef struct lockdep_map *lockdep_map_p;
#define mt_set_external_lock(mt, lock) \ #define mt_set_external_lock(mt, lock) \
(mt)->ma_external_lock = &(lock)->dep_map (mt)->ma_external_lock = &(lock)->dep_map
#define mt_on_stack(mt) (mt).ma_external_lock = NULL
#else #else
typedef struct { /* nothing */ } lockdep_map_p; typedef struct { /* nothing */ } lockdep_map_p;
#define mt_lock_is_held(mt) 1 #define mt_lock_is_held(mt) 1
#define mt_set_external_lock(mt, lock) do { } while (0) #define mt_set_external_lock(mt, lock) do { } while (0)
#define mt_on_stack(mt) do { } while (0)
#endif #endif
/* /*
......
...@@ -2428,7 +2428,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -2428,7 +2428,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long locked_vm = 0; unsigned long locked_vm = 0;
MA_STATE(mas_detach, &mt_detach, 0, 0); MA_STATE(mas_detach, &mt_detach, 0, 0);
mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
mt_set_external_lock(&mt_detach, &mm->mmap_lock); mt_on_stack(mt_detach);
/* /*
* If we need to split any vma, do it now to save pain later. * If we need to split any vma, do it now to save pain later.
...@@ -2546,11 +2546,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, ...@@ -2546,11 +2546,11 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
/* Statistics and freeing VMAs */ /* Statistics and freeing VMAs */
mas_set(&mas_detach, start); mas_set(&mas_detach, start);
remove_mt(mm, &mas_detach); remove_mt(mm, &mas_detach);
__mt_destroy(&mt_detach);
validate_mm(mm); validate_mm(mm);
if (unlock) if (unlock)
mmap_read_unlock(mm); mmap_read_unlock(mm);
__mt_destroy(&mt_detach);
return 0; return 0;
clear_tree_failed: clear_tree_failed:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment