Commit f00f4843 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert unuse_pte() to use a folio throughout

Saves about eight calls to compound_head().

Link: https://lkml.kernel.org/r/20231211162214.2146080-4-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8d294a8c
...@@ -1741,21 +1741,25 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) ...@@ -1741,21 +1741,25 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct folio *folio) unsigned long addr, swp_entry_t entry, struct folio *folio)
{ {
struct page *page = folio_file_page(folio, swp_offset(entry)); struct page *page;
struct page *swapcache; struct folio *swapcache;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *pte, new_pte, old_pte; pte_t *pte, new_pte, old_pte;
bool hwpoisoned = PageHWPoison(page); bool hwpoisoned = false;
int ret = 1; int ret = 1;
swapcache = page; swapcache = folio;
folio = ksm_might_need_to_copy(folio, vma, addr); folio = ksm_might_need_to_copy(folio, vma, addr);
if (unlikely(!folio)) if (unlikely(!folio))
return -ENOMEM; return -ENOMEM;
else if (unlikely(folio == ERR_PTR(-EHWPOISON))) else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
hwpoisoned = true;
folio = swapcache;
}
page = folio_file_page(folio, swp_offset(entry));
if (PageHWPoison(page))
hwpoisoned = true; hwpoisoned = true;
else
page = folio_file_page(folio, swp_offset(entry));
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
...@@ -1766,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1766,13 +1770,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
old_pte = ptep_get(pte); old_pte = ptep_get(pte);
if (unlikely(hwpoisoned || !PageUptodate(page))) { if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
swp_entry_t swp_entry; swp_entry_t swp_entry;
dec_mm_counter(vma->vm_mm, MM_SWAPENTS); dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
if (hwpoisoned) { if (hwpoisoned) {
swp_entry = make_hwpoison_entry(swapcache); swp_entry = make_hwpoison_entry(page);
page = swapcache;
} else { } else {
swp_entry = make_poisoned_swp_entry(); swp_entry = make_poisoned_swp_entry();
} }
...@@ -1786,27 +1789,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1786,27 +1789,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
* when reading from swap. This metadata may be indexed by swap entry * when reading from swap. This metadata may be indexed by swap entry
* so this must be called before swap_free(). * so this must be called before swap_free().
*/ */
arch_swap_restore(entry, page_folio(page)); arch_swap_restore(entry, folio);
dec_mm_counter(vma->vm_mm, MM_SWAPENTS); dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES); inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
get_page(page); folio_get(folio);
if (page == swapcache) { if (folio == swapcache) {
rmap_t rmap_flags = RMAP_NONE; rmap_t rmap_flags = RMAP_NONE;
/* /*
* See do_swap_page(): PageWriteback() would be problematic. * See do_swap_page(): writeback would be problematic.
* However, we do a wait_on_page_writeback() just before this * However, we do a folio_wait_writeback() just before this
* call and have the page locked. * call and have the folio locked.
*/ */
VM_BUG_ON_PAGE(PageWriteback(page), page); VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
if (pte_swp_exclusive(old_pte)) if (pte_swp_exclusive(old_pte))
rmap_flags |= RMAP_EXCLUSIVE; rmap_flags |= RMAP_EXCLUSIVE;
page_add_anon_rmap(page, vma, addr, rmap_flags); page_add_anon_rmap(page, vma, addr, rmap_flags);
} else { /* ksm created a completely new copy */ } else { /* ksm created a completely new copy */
page_add_new_anon_rmap(page, vma, addr); folio_add_new_anon_rmap(folio, vma, addr);
lru_cache_add_inactive_or_unevictable(page, vma); folio_add_lru_vma(folio, vma);
} }
new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
if (pte_swp_soft_dirty(old_pte)) if (pte_swp_soft_dirty(old_pte))
...@@ -1819,9 +1822,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1819,9 +1822,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
out: out:
if (pte) if (pte)
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
if (page != swapcache) { if (folio != swapcache) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
} }
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment