mm/memcg: Add folio_lruvec_relock_irq() and folio_lruvec_relock_irqsave()

These are the folio equivalents of relock_page_lruvec_irq() and
folio_lruvec_relock_irqsave().  Also convert page_matches_lruvec()
to folio_matches_lruvec().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent e809c3fe
...@@ -1568,19 +1568,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec, ...@@ -1568,19 +1568,19 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
} }
/* Test requires a stable page->memcg binding, see page_memcg() */ /* Test requires a stable page->memcg binding, see page_memcg() */
static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) static inline bool folio_matches_lruvec(struct folio *folio,
struct lruvec *lruvec)
{ {
return lruvec_pgdat(lruvec) == page_pgdat(page) && return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
lruvec_memcg(lruvec) == page_memcg(page); lruvec_memcg(lruvec) == folio_memcg(folio);
} }
/* Don't lock again iff page's lruvec locked */ /* Don't lock again iff page's lruvec locked */
static inline struct lruvec *relock_page_lruvec_irq(struct page *page, static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
struct lruvec *locked_lruvec) struct lruvec *locked_lruvec)
{ {
struct folio *folio = page_folio(page);
if (locked_lruvec) { if (locked_lruvec) {
if (page_matches_lruvec(page, locked_lruvec)) if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec; return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec); unlock_page_lruvec_irq(locked_lruvec);
...@@ -1590,12 +1590,11 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page, ...@@ -1590,12 +1590,11 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
} }
/* Don't lock again iff page's lruvec locked */ /* Don't lock again iff page's lruvec locked */
static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec *locked_lruvec, unsigned long *flags) struct lruvec *locked_lruvec, unsigned long *flags)
{ {
struct folio *folio = page_folio(page);
if (locked_lruvec) { if (locked_lruvec) {
if (page_matches_lruvec(page, locked_lruvec)) if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec; return locked_lruvec;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags); unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
......
...@@ -271,6 +271,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) ...@@ -271,6 +271,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
/* Phase 1: page isolation */ /* Phase 1: page isolation */
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
struct folio *folio = page_folio(page);
if (TestClearPageMlocked(page)) { if (TestClearPageMlocked(page)) {
/* /*
...@@ -278,7 +279,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) ...@@ -278,7 +279,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
* so we can spare the get_page() here. * so we can spare the get_page() here.
*/ */
if (TestClearPageLRU(page)) { if (TestClearPageLRU(page)) {
lruvec = relock_page_lruvec_irq(page, lruvec); lruvec = folio_lruvec_relock_irq(folio, lruvec);
del_page_from_lru_list(page, lruvec); del_page_from_lru_list(page, lruvec);
continue; continue;
} else } else
......
...@@ -189,12 +189,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, ...@@ -189,12 +189,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
struct folio *folio = page_folio(page);
/* block memcg migration during page moving between lru */ /* block memcg migration during page moving between lru */
if (!TestClearPageLRU(page)) if (!TestClearPageLRU(page))
continue; continue;
lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
(*move_fn)(page, lruvec); (*move_fn)(page, lruvec);
SetPageLRU(page); SetPageLRU(page);
...@@ -893,11 +894,12 @@ void release_pages(struct page **pages, int nr) ...@@ -893,11 +894,12 @@ void release_pages(struct page **pages, int nr)
int i; int i;
LIST_HEAD(pages_to_free); LIST_HEAD(pages_to_free);
struct lruvec *lruvec = NULL; struct lruvec *lruvec = NULL;
unsigned long flags; unsigned long flags = 0;
unsigned int lock_batch; unsigned int lock_batch;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
struct page *page = pages[i]; struct page *page = pages[i];
struct folio *folio = page_folio(page);
/* /*
* Make sure the IRQ-safe lock-holding time does not get * Make sure the IRQ-safe lock-holding time does not get
...@@ -909,7 +911,7 @@ void release_pages(struct page **pages, int nr) ...@@ -909,7 +911,7 @@ void release_pages(struct page **pages, int nr)
lruvec = NULL; lruvec = NULL;
} }
page = compound_head(page); page = &folio->page;
if (is_huge_zero_page(page)) if (is_huge_zero_page(page))
continue; continue;
...@@ -948,7 +950,7 @@ void release_pages(struct page **pages, int nr) ...@@ -948,7 +950,7 @@ void release_pages(struct page **pages, int nr)
if (PageLRU(page)) { if (PageLRU(page)) {
struct lruvec *prev_lruvec = lruvec; struct lruvec *prev_lruvec = lruvec;
lruvec = relock_page_lruvec_irqsave(page, lruvec, lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
&flags); &flags);
if (prev_lruvec != lruvec) if (prev_lruvec != lruvec)
lock_batch = 0; lock_batch = 0;
...@@ -1052,8 +1054,9 @@ void __pagevec_lru_add(struct pagevec *pvec) ...@@ -1052,8 +1054,9 @@ void __pagevec_lru_add(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
struct folio *folio = page_folio(page);
lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
__pagevec_lru_add_fn(page, lruvec); __pagevec_lru_add_fn(page, lruvec);
} }
if (lruvec) if (lruvec)
......
...@@ -2200,7 +2200,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec, ...@@ -2200,7 +2200,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation * All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration). * inhibits memcg migration).
*/ */
VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page); VM_BUG_ON_PAGE(!folio_matches_lruvec(page_folio(page), lruvec), page);
add_page_to_lru_list(page, lruvec); add_page_to_lru_list(page, lruvec);
nr_pages = thp_nr_pages(page); nr_pages = thp_nr_pages(page);
nr_moved += nr_pages; nr_moved += nr_pages;
...@@ -4666,6 +4666,7 @@ void check_move_unevictable_pages(struct pagevec *pvec) ...@@ -4666,6 +4666,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) { for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
struct folio *folio = page_folio(page);
int nr_pages; int nr_pages;
if (PageTransTail(page)) if (PageTransTail(page))
...@@ -4678,7 +4679,7 @@ void check_move_unevictable_pages(struct pagevec *pvec) ...@@ -4678,7 +4679,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
if (!TestClearPageLRU(page)) if (!TestClearPageLRU(page))
continue; continue;
lruvec = relock_page_lruvec_irq(page, lruvec); lruvec = folio_lruvec_relock_irq(folio, lruvec);
if (page_evictable(page) && PageUnevictable(page)) { if (page_evictable(page) && PageUnevictable(page)) {
del_page_from_lru_list(page, lruvec); del_page_from_lru_list(page, lruvec);
ClearPageUnevictable(page); ClearPageUnevictable(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment