Commit 12710fd6 authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

hugetlb: rename vma_shareable() and refactor code

Rename the routine vma_shareable to vma_addr_pmd_shareable as it is
checking a specific address within the vma.  Refactor code to check if an
aligned range is shareable as this will be needed in a subsequent patch.

Link: https://lkml.kernel.org/r/20220914221810.95771-6-mike.kravetz@oracle.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: James Houghton <jthoughton@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c8627228
...@@ -6640,26 +6640,33 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma, ...@@ -6640,26 +6640,33 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
return saddr; return saddr;
} }
static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) static bool __vma_aligned_range_pmd_shareable(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ {
unsigned long base = addr & PUD_MASK;
unsigned long end = base + PUD_SIZE;
/* /*
* check on proper vm_flags and page table alignment * check on proper vm_flags and page table alignment
*/ */
if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, start, end))
return true; return true;
return false; return false;
} }
static bool vma_addr_pmd_shareable(struct vm_area_struct *vma,
unsigned long addr)
{
unsigned long start = addr & PUD_MASK;
unsigned long end = start + PUD_SIZE;
return __vma_aligned_range_pmd_shareable(vma, start, end);
}
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{ {
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
if (uffd_disable_huge_pmd_share(vma)) if (uffd_disable_huge_pmd_share(vma))
return false; return false;
#endif #endif
return vma_shareable(vma, addr); return vma_addr_pmd_shareable(vma, addr);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment