Commit d4f9565a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert do_swap_page()'s swapcache variable to a folio

The 'swapcache' variable is used to track whether the page is from the
swapcache or not.  It can do this equally well by being the folio of the
page rather than the page itself, and this saves a number of calls to
compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-16-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 63ad4add
......@@ -3724,8 +3724,8 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
vm_fault_t do_swap_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
struct page *page = NULL, *swapcache;
struct folio *swapcache, *folio = NULL;
struct page *page;
struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
bool exclusive = false;
......@@ -3768,11 +3768,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out;
page = lookup_swap_cache(entry, vma, vmf->address);
swapcache = page;
if (page)
folio = page_folio(page);
swapcache = folio;
if (!page) {
if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/* skip swapcache */
......@@ -3805,12 +3805,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
} else {
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
swapcache = page;
if (page)
folio = page_folio(page);
swapcache = folio;
}
if (!page) {
if (!folio) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
......@@ -3862,7 +3862,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page = ksm_might_need_to_copy(page, vma, vmf->address);
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
page = swapcache;
goto out_page;
}
folio = page_folio(page);
......@@ -3873,7 +3872,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* owner. Try removing the extra reference from the local LRU
* pagevecs if required.
*/
if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache &&
if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
!folio_test_ksm(folio) && !folio_test_lru(folio))
lru_add_drain();
}
......@@ -3914,7 +3913,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
*/
exclusive = pte_swp_exclusive(vmf->orig_pte);
if (page != swapcache) {
if (folio != swapcache) {
/*
* We have a fresh page that is not exposed to the
* swapcache -> certainly exclusive.
......@@ -3982,7 +3981,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
vmf->orig_pte = pte;
/* ksm created a completely new copy */
if (unlikely(page != swapcache && swapcache)) {
if (unlikely(folio != swapcache && swapcache)) {
page_add_new_anon_rmap(page, vma, vmf->address);
folio_add_lru_vma(folio, vma);
} else {
......@@ -3995,7 +3994,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
folio_unlock(folio);
if (page != swapcache && swapcache) {
if (folio != swapcache && swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
......@@ -4004,8 +4003,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* so that the swap count won't change under a
* parallel locked swapcache.
*/
unlock_page(swapcache);
put_page(swapcache);
folio_unlock(swapcache);
folio_put(swapcache);
}
if (vmf->flags & FAULT_FLAG_WRITE) {
......@@ -4029,9 +4028,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio_unlock(folio);
out_release:
folio_put(folio);
if (page != swapcache && swapcache) {
unlock_page(swapcache);
put_page(swapcache);
if (folio != swapcache && swapcache) {
folio_unlock(swapcache);
folio_put(swapcache);
}
if (si)
put_swap_device(si);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment