Commit 7de856ff authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton

mm: khugepaged: support shmem mTHP collapse

Shmem already supports the allocation of mTHP, but khugepaged does not yet
support collapsing mTHP folios.  Now khugepaged is ready to support mTHP,
and this patch enables the collapse of shmem mTHP.

Link: https://lkml.kernel.org/r/b9da76aab4276eb6e5d12c479af2b5eea5b4575d.1724140601.git.baolin.wang@linux.alibaba.comSigned-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent dfa98f56
...@@ -1843,7 +1843,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1843,7 +1843,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
} }
} while (1); } while (1);
for (index = start; index < end; index++) { for (index = start; index < end;) {
xas_set(&xas, index); xas_set(&xas, index);
folio = xas_load(&xas); folio = xas_load(&xas);
...@@ -1862,6 +1862,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1862,6 +1862,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
} }
} }
nr_none++; nr_none++;
index++;
continue; continue;
} }
...@@ -1943,12 +1944,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -1943,12 +1944,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* we locked the first folio, then a THP might be there already. * we locked the first folio, then a THP might be there already.
* This will be discovered on the first iteration. * This will be discovered on the first iteration.
*/ */
if (folio_test_large(folio)) { if (folio_order(folio) == HPAGE_PMD_ORDER &&
result = folio_order(folio) == HPAGE_PMD_ORDER && folio->index == start) {
folio->index == start
/* Maybe PMD-mapped */ /* Maybe PMD-mapped */
? SCAN_PTE_MAPPED_HUGEPAGE result = SCAN_PTE_MAPPED_HUGEPAGE;
: SCAN_PAGE_COMPOUND;
goto out_unlock; goto out_unlock;
} }
...@@ -2009,6 +2008,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, ...@@ -2009,6 +2008,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* Accumulate the folios that are being collapsed. * Accumulate the folios that are being collapsed.
*/ */
list_add_tail(&folio->lru, &pagelist); list_add_tail(&folio->lru, &pagelist);
index += folio_nr_pages(folio);
continue; continue;
out_unlock: out_unlock:
folio_unlock(folio); folio_unlock(folio);
...@@ -2261,16 +2261,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, ...@@ -2261,16 +2261,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
continue; continue;
} }
/* if (folio_order(folio) == HPAGE_PMD_ORDER &&
* TODO: khugepaged should compact smaller compound pages folio->index == start) {
* into a PMD sized page
*/
if (folio_test_large(folio)) {
result = folio_order(folio) == HPAGE_PMD_ORDER &&
folio->index == start
/* Maybe PMD-mapped */ /* Maybe PMD-mapped */
? SCAN_PTE_MAPPED_HUGEPAGE result = SCAN_PTE_MAPPED_HUGEPAGE;
: SCAN_PAGE_COMPOUND;
/* /*
* For SCAN_PTE_MAPPED_HUGEPAGE, further processing * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
* by the caller won't touch the page cache, and so * by the caller won't touch the page cache, and so
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment