Commit 5c8525a3 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

mm: migrate_device: convert to migrate_device_coherent_folio()

Patch series "mm: finish isolate/putback_lru_page()".

Convert to use more folios in migrate_device.c, then we could remove
isolate_lru_page() and putback_lru_page().  


This patch (of 6):

Save a few calls to compound_head() and use folio throughout.

Link: https://lkml.kernel.org/r/20240826065814.1336616-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240826065814.1336616-2-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: default avatarAlistair Popple <apopple@nvidia.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 97b76796
......@@ -2335,7 +2335,7 @@ static int migrate_longterm_unpinnable_folios(
folio_get(folio);
gup_put_folio(folio, 1, FOLL_PIN);
if (migrate_device_coherent_page(&folio->page)) {
if (migrate_device_coherent_folio(folio)) {
ret = -EBUSY;
goto err;
}
......
......@@ -1208,7 +1208,7 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
int *last_cpupid);
void free_zone_device_folio(struct folio *folio);
int migrate_device_coherent_page(struct page *page);
int migrate_device_coherent_folio(struct folio *folio);
/*
* mm/gup.c
......
......@@ -708,7 +708,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
/*
* The only time there is no vma is when called from
* migrate_device_coherent_page(). However this isn't
* migrate_device_coherent_folio(). However this isn't
* called if the page could not be unmapped.
*/
VM_BUG_ON(!migrate);
......@@ -921,38 +921,38 @@ int migrate_device_range(unsigned long *src_pfns, unsigned long start,
EXPORT_SYMBOL(migrate_device_range);
/*
* Migrate a device coherent page back to normal memory. The caller should have
* a reference on page which will be copied to the new page if migration is
* Migrate a device coherent folio back to normal memory. The caller should have
* a reference on folio which will be copied to the new folio if migration is
* successful or dropped on failure.
*/
int migrate_device_coherent_page(struct page *page)
int migrate_device_coherent_folio(struct folio *folio)
{
unsigned long src_pfn, dst_pfn = 0;
struct page *dpage;
struct folio *dfolio;
WARN_ON_ONCE(PageCompound(page));
WARN_ON_ONCE(folio_test_large(folio));
lock_page(page);
src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
folio_lock(folio);
src_pfn = migrate_pfn(folio_pfn(folio)) | MIGRATE_PFN_MIGRATE;
/*
* We don't have a VMA and don't need to walk the page tables to find
* the source page. So call migrate_vma_unmap() directly to unmap the
* page as migrate_vma_setup() will fail if args.vma == NULL.
* the source folio. So call migrate_vma_unmap() directly to unmap the
* folio as migrate_vma_setup() will fail if args.vma == NULL.
*/
migrate_device_unmap(&src_pfn, 1, NULL);
if (!(src_pfn & MIGRATE_PFN_MIGRATE))
return -EBUSY;
dpage = alloc_page(GFP_USER | __GFP_NOWARN);
if (dpage) {
lock_page(dpage);
dst_pfn = migrate_pfn(page_to_pfn(dpage));
dfolio = folio_alloc(GFP_USER | __GFP_NOWARN, 0);
if (dfolio) {
folio_lock(dfolio);
dst_pfn = migrate_pfn(folio_pfn(dfolio));
}
migrate_device_pages(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
copy_highpage(dpage, page);
folio_copy(dfolio, folio);
migrate_device_finalize(&src_pfn, &dst_pfn, 1);
if (src_pfn & MIGRATE_PFN_MIGRATE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment