mm/gup: Turn compound_next() into gup_folio_next()

Convert both callers to work on folios instead of pages.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 2d7919a2
...@@ -230,20 +230,19 @@ static inline struct page *compound_range_next(struct page *start, ...@@ -230,20 +230,19 @@ static inline struct page *compound_range_next(struct page *start,
return page; return page;
} }
static inline struct page *compound_next(struct page **list, static inline struct folio *gup_folio_next(struct page **list,
unsigned long npages, unsigned long i, unsigned int *ntails) unsigned long npages, unsigned long i, unsigned int *ntails)
{ {
struct page *page; struct folio *folio = page_folio(list[i]);
unsigned int nr; unsigned int nr;
page = compound_head(list[i]);
for (nr = i + 1; nr < npages; nr++) { for (nr = i + 1; nr < npages; nr++) {
if (compound_head(list[nr]) != page) if (page_folio(list[nr]) != folio)
break; break;
} }
*ntails = nr - i; *ntails = nr - i;
return page; return folio;
} }
/** /**
...@@ -271,17 +270,17 @@ static inline struct page *compound_next(struct page **list, ...@@ -271,17 +270,17 @@ static inline struct page *compound_next(struct page **list,
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty) bool make_dirty)
{ {
unsigned long index; unsigned long i;
struct page *head; struct folio *folio;
unsigned int ntails; unsigned int nr;
if (!make_dirty) { if (!make_dirty) {
unpin_user_pages(pages, npages); unpin_user_pages(pages, npages);
return; return;
} }
for (index = 0; index < npages; index += ntails) { for (i = 0; i < npages; i += nr) {
head = compound_next(pages, npages, index, &ntails); folio = gup_folio_next(pages, npages, i, &nr);
/* /*
* Checking PageDirty at this point may race with * Checking PageDirty at this point may race with
* clear_page_dirty_for_io(), but that's OK. Two key * clear_page_dirty_for_io(), but that's OK. Two key
...@@ -302,9 +301,12 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, ...@@ -302,9 +301,12 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
* written back, so it gets written back again in the * written back, so it gets written back again in the
* next writeback cycle. This is harmless. * next writeback cycle. This is harmless.
*/ */
if (!PageDirty(head)) if (!folio_test_dirty(folio)) {
set_page_dirty_lock(head); folio_lock(folio);
put_compound_head(head, ntails, FOLL_PIN); folio_mark_dirty(folio);
folio_unlock(folio);
}
gup_put_folio(folio, nr, FOLL_PIN);
} }
} }
EXPORT_SYMBOL(unpin_user_pages_dirty_lock); EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
...@@ -357,9 +359,9 @@ EXPORT_SYMBOL(unpin_user_page_range_dirty_lock); ...@@ -357,9 +359,9 @@ EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
*/ */
void unpin_user_pages(struct page **pages, unsigned long npages) void unpin_user_pages(struct page **pages, unsigned long npages)
{ {
unsigned long index; unsigned long i;
struct page *head; struct folio *folio;
unsigned int ntails; unsigned int nr;
/* /*
* If this WARN_ON() fires, then the system *might* be leaking pages (by * If this WARN_ON() fires, then the system *might* be leaking pages (by
...@@ -369,9 +371,9 @@ void unpin_user_pages(struct page **pages, unsigned long npages) ...@@ -369,9 +371,9 @@ void unpin_user_pages(struct page **pages, unsigned long npages)
if (WARN_ON(IS_ERR_VALUE(npages))) if (WARN_ON(IS_ERR_VALUE(npages)))
return; return;
for (index = 0; index < npages; index += ntails) { for (i = 0; i < npages; i += nr) {
head = compound_next(pages, npages, index, &ntails); folio = gup_folio_next(pages, npages, i, &nr);
put_compound_head(head, ntails, FOLL_PIN); gup_put_folio(folio, nr, FOLL_PIN);
} }
} }
EXPORT_SYMBOL(unpin_user_pages); EXPORT_SYMBOL(unpin_user_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment