Commit ecfbd733 authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

hugetlb: take hugetlb vma_lock when clearing vma_lock->vma pointer

hugetlb file truncation/hole punch code may need to back out and take
locks in order in the routine hugetlb_unmap_file_folio().  This code could
race with vma freeing as pointed out in [1] and result in accessing a
stale vma pointer.  To address this, take the vma_lock when clearing the
vma_lock->vma pointer.

[1] https://lore.kernel.org/linux-mm/01f10195-7088-4462-6def-909549c75ef4@huawei.com/

[mike.kravetz@oracle.com: address build issues]
  Link: https://lkml.kernel.org/r/Yz5L1uxQYR1VqFtJ@monkey
Link: https://lkml.kernel.org/r/20221005011707.514612-3-mike.kravetz@oracle.com
Fixes: "hugetlb: use new vma_lock for pmd sharing synchronization"
Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: James Houghton <jthoughton@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 131a79b4
...@@ -93,6 +93,7 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; ...@@ -93,6 +93,7 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
static int hugetlb_acct_memory(struct hstate *h, long delta); static int hugetlb_acct_memory(struct hstate *h, long delta);
static void hugetlb_vma_lock_free(struct vm_area_struct *vma); static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool) static inline bool subpool_is_free(struct hugepage_subpool *spool)
{ {
...@@ -5188,8 +5189,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, ...@@ -5188,8 +5189,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* be asynchrously deleted. If the page tables are shared, there * be asynchrously deleted. If the page tables are shared, there
* will be issues when accessed by someone else. * will be issues when accessed by someone else.
*/ */
hugetlb_vma_unlock_write(vma); __hugetlb_vma_unlock_write_free(vma);
hugetlb_vma_lock_free(vma);
i_mmap_unlock_write(vma->vm_file->f_mapping); i_mmap_unlock_write(vma->vm_file->f_mapping);
} }
...@@ -6828,6 +6828,30 @@ void hugetlb_vma_lock_release(struct kref *kref) ...@@ -6828,6 +6828,30 @@ void hugetlb_vma_lock_release(struct kref *kref)
kfree(vma_lock); kfree(vma_lock);
} }
void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
{
struct vm_area_struct *vma = vma_lock->vma;
/*
* vma_lock structure may or not be released as a result of put,
* it certainly will no longer be attached to vma so clear pointer.
* Semaphore synchronizes access to vma_lock->vma field.
*/
vma_lock->vma = NULL;
vma->vm_private_data = NULL;
up_write(&vma_lock->rw_sema);
kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
}
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
{
if (__vma_shareable_flags_pmd(vma)) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock);
}
}
static void hugetlb_vma_lock_free(struct vm_area_struct *vma) static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{ {
/* /*
...@@ -6839,14 +6863,8 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma) ...@@ -6839,14 +6863,8 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
if (vma->vm_private_data) { if (vma->vm_private_data) {
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
/* down_write(&vma_lock->rw_sema);
* vma_lock structure may or not be released, but it __hugetlb_vma_unlock_write_put(vma_lock);
* certainly will no longer be attached to vma so clear
* pointer.
*/
vma_lock->vma = NULL;
kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
vma->vm_private_data = NULL;
} }
} }
...@@ -6997,6 +7015,10 @@ void hugetlb_vma_lock_release(struct kref *kref) ...@@ -6997,6 +7015,10 @@ void hugetlb_vma_lock_release(struct kref *kref)
{ {
} }
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
{
}
static void hugetlb_vma_lock_free(struct vm_area_struct *vma) static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment