Commit ec8832d0 authored by Alistair Popple's avatar Alistair Popple Committed by Andrew Morton

mmu_notifiers: don't invalidate secondary TLBs as part of mmu_notifier_invalidate_range_end()

Secondary TLBs are now invalidated from the architecture specific TLB
invalidation functions.  Therefore there is no need to explicitly notify
or invalidate as part of the range end functions.  This means we can
remove mmu_notifier_invalidate_range_end_only() and some of the
ptep_*_notify() functions.

Link: https://lkml.kernel.org/r/90d749d03cbab256ca0edeb5287069599566d783.1690292440.git-series.apopple@nvidia.comSigned-off-by: default avatarAlistair Popple <apopple@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Cc: Andrew Donnellan <ajd@linux.ibm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com>
Cc: Frederic Barrat <fbarrat@linux.ibm.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Nicolin Chen <nicolinc@nvidia.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Zhi Wang <zhi.wang.linux@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6bbd42e2
...@@ -395,8 +395,7 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm, ...@@ -395,8 +395,7 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
extern void __mmu_notifier_change_pte(struct mm_struct *mm, extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte); unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r, extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
extern bool extern bool
...@@ -481,14 +480,7 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) ...@@ -481,14 +480,7 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
might_sleep(); might_sleep();
if (mm_has_notifiers(range->mm)) if (mm_has_notifiers(range->mm))
__mmu_notifier_invalidate_range_end(range, false); __mmu_notifier_invalidate_range_end(range);
}
static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
if (mm_has_notifiers(range->mm))
__mmu_notifier_invalidate_range_end(range, true);
} }
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
...@@ -582,45 +574,6 @@ static inline void mmu_notifier_range_init_owner( ...@@ -582,45 +574,6 @@ static inline void mmu_notifier_range_init_owner(
__young; \ __young; \
}) })
#define ptep_clear_flush_notify(__vma, __address, __ptep) \
({ \
unsigned long ___addr = __address & PAGE_MASK; \
struct mm_struct *___mm = (__vma)->vm_mm; \
pte_t ___pte; \
\
___pte = ptep_clear_flush(__vma, __address, __ptep); \
mmu_notifier_invalidate_range(___mm, ___addr, \
___addr + PAGE_SIZE); \
\
___pte; \
})
#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
struct mm_struct *___mm = (__vma)->vm_mm; \
pmd_t ___pmd; \
\
___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
mmu_notifier_invalidate_range(___mm, ___haddr, \
___haddr + HPAGE_PMD_SIZE); \
\
___pmd; \
})
#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
struct mm_struct *___mm = (__vma)->vm_mm; \
pud_t ___pud; \
\
___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
mmu_notifier_invalidate_range(___mm, ___haddr, \
___haddr + HPAGE_PUD_SIZE); \
\
___pud; \
})
/* /*
* set_pte_at_notify() sets the pte _after_ running the notifier. * set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU * This is safe to start by updating the secondary MMUs, because the primary MMU
...@@ -711,11 +664,6 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range) ...@@ -711,11 +664,6 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{ {
} }
static inline void
mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
{
}
static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
......
...@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
} }
flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
ptep_clear_flush_notify(vma, addr, pvmw.pte); ptep_clear_flush(vma, addr, pvmw.pte);
if (new_page) if (new_page)
set_pte_at_notify(mm, addr, pvmw.pte, set_pte_at_notify(mm, addr, pvmw.pte,
mk_pte(new_page, vma->vm_page_prot)); mk_pte(new_page, vma->vm_page_prot));
......
...@@ -2003,7 +2003,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, ...@@ -2003,7 +2003,7 @@ static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
count_vm_event(THP_SPLIT_PUD); count_vm_event(THP_SPLIT_PUD);
pudp_huge_clear_flush_notify(vma, haddr, pud); pudp_huge_clear_flush(vma, haddr, pud);
} }
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
...@@ -2023,11 +2023,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, ...@@ -2023,11 +2023,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
out: out:
spin_unlock(ptl); spin_unlock(ptl);
/* mmu_notifier_invalidate_range_end(&range);
* No need to double call mmu_notifier->invalidate_range() callback as
* the above pudp_huge_clear_flush_notify() did already call it.
*/
mmu_notifier_invalidate_range_only_end(&range);
} }
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
...@@ -2094,7 +2090,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2094,7 +2090,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
count_vm_event(THP_SPLIT_PMD); count_vm_event(THP_SPLIT_PMD);
if (!vma_is_anonymous(vma)) { if (!vma_is_anonymous(vma)) {
old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
/* /*
* We are going to unmap this huge page. So * We are going to unmap this huge page. So
* just go ahead and zap it * just go ahead and zap it
...@@ -2304,20 +2300,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2304,20 +2300,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
out: out:
spin_unlock(ptl); spin_unlock(ptl);
/* mmu_notifier_invalidate_range_end(&range);
* No need to double call mmu_notifier->invalidate_range() callback.
* They are 3 cases to consider inside __split_huge_pmd_locked():
* 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
* 2) __split_huge_zero_page_pmd() read only zero page and any write
* fault will trigger a flush_notify before pointing to a new page
* (it is fine if the secondary mmu keeps pointing to the old zero
* page in the meantime)
* 3) Split a huge pmd into pte pointing to the same page. No need
* to invalidate secondary tlb entry they are all still valid.
* any further changes to individual pte will notify. So no need
* to call mmu_notifier->invalidate_range()
*/
mmu_notifier_invalidate_range_only_end(&range);
} }
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
......
...@@ -5688,7 +5688,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -5688,7 +5688,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
/* Break COW or unshare */ /* Break COW or unshare */
huge_ptep_clear_flush(vma, haddr, ptep); huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end);
page_remove_rmap(&old_folio->page, vma, true); page_remove_rmap(&old_folio->page, vma, true);
hugepage_add_new_anon_rmap(new_folio, vma, haddr); hugepage_add_new_anon_rmap(new_folio, vma, haddr);
if (huge_pte_uffd_wp(pte)) if (huge_pte_uffd_wp(pte))
......
...@@ -3155,7 +3155,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3155,7 +3155,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
* that left a window where the new PTE could be loaded into * that left a window where the new PTE could be loaded into
* some TLBs while the old PTE remains in others. * some TLBs while the old PTE remains in others.
*/ */
ptep_clear_flush_notify(vma, vmf->address, vmf->pte); ptep_clear_flush(vma, vmf->address, vmf->pte);
folio_add_new_anon_rmap(new_folio, vma, vmf->address); folio_add_new_anon_rmap(new_folio, vma, vmf->address);
folio_add_lru_vma(new_folio, vma); folio_add_lru_vma(new_folio, vma);
/* /*
...@@ -3201,11 +3201,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -3201,11 +3201,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
} }
/* mmu_notifier_invalidate_range_end(&range);
* No need to double call mmu_notifier->invalidate_range() callback as
* the above ptep_clear_flush_notify() did already call it.
*/
mmu_notifier_invalidate_range_only_end(&range);
if (new_folio) if (new_folio)
folio_put(new_folio); folio_put(new_folio);
......
...@@ -658,7 +658,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ...@@ -658,7 +658,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (flush) { if (flush) {
flush_cache_page(vma, addr, pte_pfn(orig_pte)); flush_cache_page(vma, addr, pte_pfn(orig_pte));
ptep_clear_flush_notify(vma, addr, ptep); ptep_clear_flush(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, entry); set_pte_at_notify(mm, addr, ptep, entry);
update_mmu_cache(vma, addr, ptep); update_mmu_cache(vma, addr, ptep);
} else { } else {
...@@ -763,13 +763,8 @@ static void __migrate_device_pages(unsigned long *src_pfns, ...@@ -763,13 +763,8 @@ static void __migrate_device_pages(unsigned long *src_pfns,
src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
} }
/*
* No need to double call mmu_notifier->invalidate_range() callback as
* the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
* did already call it.
*/
if (notified) if (notified)
mmu_notifier_invalidate_range_only_end(&range); mmu_notifier_invalidate_range_end(&range);
} }
/** /**
......
...@@ -551,7 +551,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) ...@@ -551,7 +551,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
static void static void
mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
struct mmu_notifier_range *range, bool only_end) struct mmu_notifier_range *range)
{ {
struct mmu_notifier *subscription; struct mmu_notifier *subscription;
int id; int id;
...@@ -559,24 +559,6 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, ...@@ -559,24 +559,6 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
id = srcu_read_lock(&srcu); id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
srcu_read_lock_held(&srcu)) { srcu_read_lock_held(&srcu)) {
/*
* Call invalidate_range here too to avoid the need for the
* subsystem of having to register an invalidate_range_end
* call-back when there is invalidate_range already. Usually a
* subsystem registers either invalidate_range_start()/end() or
* invalidate_range(), so this will be no additional overhead
* (besides the pointer check).
*
* We skip call to invalidate_range() if we know it is safe ie
* call site use mmu_notifier_invalidate_range_only_end() which
* is safe to do when we know that a call to invalidate_range()
* already happen under page table lock.
*/
if (!only_end && subscription->ops->invalidate_range)
subscription->ops->invalidate_range(subscription,
range->mm,
range->start,
range->end);
if (subscription->ops->invalidate_range_end) { if (subscription->ops->invalidate_range_end) {
if (!mmu_notifier_range_blockable(range)) if (!mmu_notifier_range_blockable(range))
non_block_start(); non_block_start();
...@@ -589,8 +571,7 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, ...@@ -589,8 +571,7 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
srcu_read_unlock(&srcu, id); srcu_read_unlock(&srcu, id);
} }
void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
bool only_end)
{ {
struct mmu_notifier_subscriptions *subscriptions = struct mmu_notifier_subscriptions *subscriptions =
range->mm->notifier_subscriptions; range->mm->notifier_subscriptions;
...@@ -600,7 +581,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range, ...@@ -600,7 +581,7 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
mn_itree_inv_end(subscriptions); mn_itree_inv_end(subscriptions);
if (!hlist_empty(&subscriptions->list)) if (!hlist_empty(&subscriptions->list))
mn_hlist_invalidate_end(subscriptions, range, only_end); mn_hlist_invalidate_end(subscriptions, range);
lock_map_release(&__mmu_notifier_invalidate_range_start_map); lock_map_release(&__mmu_notifier_invalidate_range_start_map);
} }
......
...@@ -985,13 +985,6 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) ...@@ -985,13 +985,6 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
#endif #endif
} }
/*
* No need to call mmu_notifier_invalidate_range() as we are
* downgrading page table protection not changing it to point
* to a new page.
*
* See Documentation/mm/mmu_notifier.rst
*/
if (ret) if (ret)
cleaned++; cleaned++;
} }
...@@ -1549,8 +1542,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1549,8 +1542,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
hugetlb_vma_unlock_write(vma); hugetlb_vma_unlock_write(vma);
flush_tlb_range(vma, flush_tlb_range(vma,
range.start, range.end); range.start, range.end);
mmu_notifier_invalidate_range(mm,
range.start, range.end);
/* /*
* The ref count of the PMD page was * The ref count of the PMD page was
* dropped which is part of the way map * dropped which is part of the way map
...@@ -1623,9 +1614,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1623,9 +1614,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* copied pages. * copied pages.
*/ */
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else if (folio_test_anon(folio)) { } else if (folio_test_anon(folio)) {
swp_entry_t entry = { .val = page_private(subpage) }; swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte; pte_t swp_pte;
...@@ -1637,9 +1625,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1637,9 +1625,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
folio_test_swapcache(folio))) { folio_test_swapcache(folio))) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
ret = false; ret = false;
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
break; break;
} }
...@@ -1670,9 +1655,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1670,9 +1655,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
if (ref_count == 1 + map_count && if (ref_count == 1 + map_count &&
!folio_test_dirty(folio)) { !folio_test_dirty(folio)) {
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE);
dec_mm_counter(mm, MM_ANONPAGES); dec_mm_counter(mm, MM_ANONPAGES);
goto discard; goto discard;
} }
...@@ -1727,9 +1709,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1727,9 +1709,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_uffd_wp(pteval)) if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte); swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte);
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else { } else {
/* /*
* This is a locked file-backed folio, * This is a locked file-backed folio,
...@@ -1745,13 +1724,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1745,13 +1724,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
dec_mm_counter(mm, mm_counter_file(&folio->page)); dec_mm_counter(mm, mm_counter_file(&folio->page));
} }
discard: discard:
/*
* No need to call mmu_notifier_invalidate_range() it has be
* done above for all cases requiring it to happen under page
* table lock before mmu_notifier_invalidate_range_end()
*
* See Documentation/mm/mmu_notifier.rst
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_drain_local(); mlock_drain_local();
...@@ -1930,8 +1902,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1930,8 +1902,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
hugetlb_vma_unlock_write(vma); hugetlb_vma_unlock_write(vma);
flush_tlb_range(vma, flush_tlb_range(vma,
range.start, range.end); range.start, range.end);
mmu_notifier_invalidate_range(mm,
range.start, range.end);
/* /*
* The ref count of the PMD page was * The ref count of the PMD page was
...@@ -2036,9 +2006,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -2036,9 +2006,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* copied pages. * copied pages.
*/ */
dec_mm_counter(mm, mm_counter(&folio->page)); dec_mm_counter(mm, mm_counter(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else { } else {
swp_entry_t entry; swp_entry_t entry;
pte_t swp_pte; pte_t swp_pte;
...@@ -2102,13 +2069,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -2102,13 +2069,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
} }
/*
* No need to call mmu_notifier_invalidate_range() it has be
* done above for all cases requiring it to happen under page
* table lock before mmu_notifier_invalidate_range_end()
*
* See Documentation/mm/mmu_notifier.rst
*/
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_drain_local(); mlock_drain_local();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment