mm: Remove __delete_from_page_cache()

This wrapper is no longer used.  Remove it and all references to it.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent fb5c2029
...@@ -604,7 +604,7 @@ void clear_inode(struct inode *inode) ...@@ -604,7 +604,7 @@ void clear_inode(struct inode *inode)
{ {
/* /*
* We have to cycle the i_pages lock here because reclaim can be in the * We have to cycle the i_pages lock here because reclaim can be in the
* process of removing the last page (in __delete_from_page_cache()) * process of removing the last page (in __filemap_remove_folio())
* and we must not free the mapping under it. * and we must not free the mapping under it.
*/ */
xa_lock_irq(&inode->i_data.i_pages); xa_lock_irq(&inode->i_data.i_pages);
......
...@@ -1107,10 +1107,6 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, ...@@ -1107,10 +1107,6 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
void filemap_remove_folio(struct folio *folio); void filemap_remove_folio(struct folio *folio);
void delete_from_page_cache(struct page *page); void delete_from_page_cache(struct page *page);
void __filemap_remove_folio(struct folio *folio, void *shadow); void __filemap_remove_folio(struct folio *folio, void *shadow);
static inline void __delete_from_page_cache(struct page *page, void *shadow)
{
__filemap_remove_folio(page_folio(page), shadow);
}
void replace_page_cache_page(struct page *old, struct page *new); void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping, void delete_from_page_cache_batch(struct address_space *mapping,
struct folio_batch *fbatch); struct folio_batch *fbatch);
......
...@@ -1935,7 +1935,7 @@ int memory_failure(unsigned long pfn, int flags) ...@@ -1935,7 +1935,7 @@ int memory_failure(unsigned long pfn, int flags)
/* /*
* Now take care of user space mappings. * Now take care of user space mappings.
* Abort on fail: __delete_from_page_cache() assumes unmapped page. * Abort on fail: __filemap_remove_folio() assumes unmapped page.
*/ */
if (!hwpoison_user_mappings(p, pfn, flags, p)) { if (!hwpoison_user_mappings(p, pfn, flags, p)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
......
...@@ -392,7 +392,7 @@ void shmem_uncharge(struct inode *inode, long pages) ...@@ -392,7 +392,7 @@ void shmem_uncharge(struct inode *inode, long pages)
struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_inode_info *info = SHMEM_I(inode);
unsigned long flags; unsigned long flags;
/* nrpages adjustment done by __delete_from_page_cache() or caller */ /* nrpages adjustment done by __filemap_remove_folio() or caller */
spin_lock_irqsave(&info->lock, flags); spin_lock_irqsave(&info->lock, flags);
info->alloced -= pages; info->alloced -= pages;
......
...@@ -443,7 +443,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range); ...@@ -443,7 +443,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
* mapping->invalidate_lock. * mapping->invalidate_lock.
* *
* Note: When this function returns, there can be a page in the process of * Note: When this function returns, there can be a page in the process of
* deletion (inside __delete_from_page_cache()) in the specified range. Thus * deletion (inside __filemap_remove_folio()) in the specified range. Thus
* mapping->nrpages can be non-zero when this function returns even after * mapping->nrpages can be non-zero when this function returns even after
* truncation of the whole mapping. * truncation of the whole mapping.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment