Commit c9edc242 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

swap: add swap_cache_get_folio()

Convert lookup_swap_cache() into swap_cache_get_folio() and add a
lookup_swap_cache() wrapper around it.

[akpm@linux-foundation.org: add CONFIG_SWAP=n stub for swap_cache_get_folio()]
Link: https://lkml.kernel.org/r/20220902194653.1739778-20-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0d698e25
...@@ -39,6 +39,8 @@ void __delete_from_swap_cache(struct folio *folio, ...@@ -39,6 +39,8 @@ void __delete_from_swap_cache(struct folio *folio,
void delete_from_swap_cache(struct folio *folio); void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin, void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end); unsigned long end);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct page *lookup_swap_cache(swp_entry_t entry, struct page *lookup_swap_cache(swp_entry_t entry,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr); unsigned long addr);
...@@ -99,6 +101,12 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc) ...@@ -99,6 +101,12 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0; return 0;
} }
static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
}
static inline struct page *lookup_swap_cache(swp_entry_t swp, static inline struct page *lookup_swap_cache(swp_entry_t swp,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr) unsigned long addr)
......
...@@ -317,24 +317,24 @@ static inline bool swap_use_vma_readahead(void) ...@@ -317,24 +317,24 @@ static inline bool swap_use_vma_readahead(void)
} }
/* /*
* Lookup a swap entry in the swap cache. A found page will be returned * Lookup a swap entry in the swap cache. A found folio will be returned
* unlocked and with its refcount incremented - we rely on the kernel * unlocked and with its refcount incremented - we rely on the kernel
* lock getting page table operations atomic even if we drop the page * lock getting page table operations atomic even if we drop the folio
* lock before returning. * lock before returning.
*/ */
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, struct folio *swap_cache_get_folio(swp_entry_t entry,
unsigned long addr) struct vm_area_struct *vma, unsigned long addr)
{ {
struct page *page; struct folio *folio;
struct swap_info_struct *si; struct swap_info_struct *si;
si = get_swap_device(entry); si = get_swap_device(entry);
if (!si) if (!si)
return NULL; return NULL;
page = find_get_page(swap_address_space(entry), swp_offset(entry)); folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
put_swap_device(si); put_swap_device(si);
if (page) { if (folio) {
bool vma_ra = swap_use_vma_readahead(); bool vma_ra = swap_use_vma_readahead();
bool readahead; bool readahead;
...@@ -342,10 +342,10 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, ...@@ -342,10 +342,10 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
* At the moment, we don't support PG_readahead for anon THP * At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat. * so let's bail out rather than confusing the readahead stat.
*/ */
if (unlikely(PageTransCompound(page))) if (unlikely(folio_test_large(folio)))
return page; return folio;
readahead = TestClearPageReadahead(page); readahead = folio_test_clear_readahead(folio);
if (vma && vma_ra) { if (vma && vma_ra) {
unsigned long ra_val; unsigned long ra_val;
int win, hits; int win, hits;
...@@ -366,7 +366,17 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, ...@@ -366,7 +366,17 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
} }
} }
return page; return folio;
}
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
unsigned long addr)
{
struct folio *folio = swap_cache_get_folio(entry, vma, addr);
if (!folio)
return NULL;
return folio_file_page(folio, swp_offset(entry));
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment