Commit ebe2e35e authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/rmap: introduce and use hugetlb_try_dup_anon_rmap()

hugetlb rmap handling differs quite a lot from "ordinary" rmap code.  For
example, hugetlb currently only supports entire mappings, and treats any
mapping as mapped using a single "logical PTE".  Let's move it out of the
way so we can overhaul our "ordinary" rmap.  implementation/interface.

So let's introduce and use hugetlb_try_dup_anon_rmap() to make all hugetlb
handling use dedicated hugetlb_* rmap functions.

Add sanity checks that we end up with the right folios in the right
functions.

Note that is_device_private_page() does not apply to hugetlb.

Link: https://lkml.kernel.org/r/20231220224504.646757-5-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarYin Fengwei <fengwei.yin@intel.com>
Reviewed-by: default avatarRyan Roberts <ryan.roberts@arm.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 44887f39
......@@ -1964,15 +1964,21 @@ static inline bool page_maybe_dma_pinned(struct page *page)
*
* The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
*/
static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
struct page *page)
static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
struct folio *folio)
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
return false;
return page_maybe_dma_pinned(page);
return folio_maybe_dma_pinned(folio);
}
static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
struct page *page)
{
return folio_needs_cow_for_dma(vma, page_folio(page));
}
/**
......
......@@ -211,6 +211,22 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
/* See page_try_dup_anon_rmap() */
static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
struct vm_area_struct *vma)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
if (PageAnonExclusive(&folio->page)) {
if (unlikely(folio_needs_cow_for_dma(vma, folio)))
return -EBUSY;
ClearPageAnonExclusive(&folio->page);
}
atomic_inc(&folio->_entire_mapcount);
return 0;
}
static inline void hugetlb_add_file_rmap(struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
......@@ -228,6 +244,8 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
static inline void __page_dup_rmap(struct page *page, bool compound)
{
VM_WARN_ON(folio_test_hugetlb(page_folio(page)));
if (compound) {
struct folio *folio = (struct folio *)page;
......
......@@ -5409,8 +5409,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
*/
if (!folio_test_anon(pte_folio)) {
hugetlb_add_file_rmap(pte_folio);
} else if (page_try_dup_anon_rmap(&pte_folio->page,
true, src_vma)) {
} else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
pte_t src_pte_old = entry;
struct folio *new_folio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment