Commit 89574945 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Matthew Wilcox (Oracle)

mm: simplify freeing of devmap managed pages

Make put_devmap_managed_page return if it took charge of the page
or not and remove the separate page_is_devmap_managed helper.

Link: https://lkml.kernel.org/r/20220210072828.2930359-6-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Tested-by: default avatar"Sierra Guiza, Alejandro (Alex)" <alex.sierra@amd.com>

Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Christian Knig <christian.koenig@amd.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Karol Herbst <kherbst@redhat.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent 75e55d8a
...@@ -1094,33 +1094,24 @@ static inline bool is_zone_movable_page(const struct page *page) ...@@ -1094,33 +1094,24 @@ static inline bool is_zone_movable_page(const struct page *page)
#ifdef CONFIG_DEV_PAGEMAP_OPS #ifdef CONFIG_DEV_PAGEMAP_OPS
DECLARE_STATIC_KEY_FALSE(devmap_managed_key); DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
static inline bool page_is_devmap_managed(struct page *page) bool __put_devmap_managed_page(struct page *page);
static inline bool put_devmap_managed_page(struct page *page)
{ {
if (!static_branch_unlikely(&devmap_managed_key)) if (!static_branch_unlikely(&devmap_managed_key))
return false; return false;
if (!is_zone_device_page(page)) if (!is_zone_device_page(page))
return false; return false;
switch (page->pgmap->type) { if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
case MEMORY_DEVICE_PRIVATE: page->pgmap->type != MEMORY_DEVICE_FS_DAX)
case MEMORY_DEVICE_FS_DAX: return false;
return true; return __put_devmap_managed_page(page);
default:
break;
}
return false;
} }
void put_devmap_managed_page(struct page *page);
#else /* CONFIG_DEV_PAGEMAP_OPS */ #else /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool page_is_devmap_managed(struct page *page) static inline bool put_devmap_managed_page(struct page *page)
{ {
return false; return false;
} }
static inline void put_devmap_managed_page(struct page *page)
{
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page) static inline bool is_device_private_page(const struct page *page)
...@@ -1220,16 +1211,11 @@ static inline void put_page(struct page *page) ...@@ -1220,16 +1211,11 @@ static inline void put_page(struct page *page)
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
/* /*
* For devmap managed pages we need to catch refcount transition from * For some devmap managed pages we need to catch refcount transition
* 2 to 1, when refcount reach one it means the page is free and we * from 2 to 1:
* need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/ */
if (page_is_devmap_managed(&folio->page)) { if (put_devmap_managed_page(&folio->page))
put_devmap_managed_page(&folio->page);
return; return;
}
folio_put(folio); folio_put(folio);
} }
......
...@@ -502,24 +502,22 @@ void free_devmap_managed_page(struct page *page) ...@@ -502,24 +502,22 @@ void free_devmap_managed_page(struct page *page)
page->pgmap->ops->page_free(page); page->pgmap->ops->page_free(page);
} }
void put_devmap_managed_page(struct page *page) bool __put_devmap_managed_page(struct page *page)
{ {
int count;
if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
return;
count = page_ref_dec_return(page);
/* /*
* devmap page refcounts are 1-based, rather than 0-based: if * devmap page refcounts are 1-based, rather than 0-based: if
* refcount is 1, then the page is free and the refcount is * refcount is 1, then the page is free and the refcount is
* stable because nobody holds a reference on the page. * stable because nobody holds a reference on the page.
*/ */
if (count == 1) switch (page_ref_dec_return(page)) {
case 1:
free_devmap_managed_page(page); free_devmap_managed_page(page);
else if (!count) break;
case 0:
__put_page(page); __put_page(page);
break;
}
return true;
} }
EXPORT_SYMBOL(put_devmap_managed_page); EXPORT_SYMBOL(__put_devmap_managed_page);
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
...@@ -930,16 +930,8 @@ void release_pages(struct page **pages, int nr) ...@@ -930,16 +930,8 @@ void release_pages(struct page **pages, int nr)
unlock_page_lruvec_irqrestore(lruvec, flags); unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL; lruvec = NULL;
} }
/* if (put_devmap_managed_page(page))
* ZONE_DEVICE pages that return 'false' from
* page_is_devmap_managed() do not require special
* processing, and instead, expect a call to
* put_page_testzero().
*/
if (page_is_devmap_managed(page)) {
put_devmap_managed_page(page);
continue; continue;
}
if (put_page_testzero(page)) if (put_page_testzero(page))
put_dev_pagemap(page->pgmap); put_dev_pagemap(page->pgmap);
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment