Commit 3291e09a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

drm: convert drm_gem_put_pages() to use a folio_batch

Remove a few hidden compound_head() calls by converting the returned page
to a folio once and using the folio APIs.

Link: https://lkml.kernel.org/r/20230621164557.3510324-6-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0b62af28
...@@ -496,13 +496,13 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) ...@@ -496,13 +496,13 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
EXPORT_SYMBOL(drm_gem_create_mmap_offset); EXPORT_SYMBOL(drm_gem_create_mmap_offset);
/* /*
* Move pages to appropriate lru and release the pagevec, decrementing the * Move folios to appropriate lru and release the folios, decrementing the
* ref count of those pages. * ref count of those folios.
*/ */
static void drm_gem_check_release_pagevec(struct pagevec *pvec) static void drm_gem_check_release_batch(struct folio_batch *fbatch)
{ {
check_move_unevictable_pages(pvec); check_move_unevictable_folios(fbatch);
__pagevec_release(pvec); __folio_batch_release(fbatch);
cond_resched(); cond_resched();
} }
...@@ -534,10 +534,10 @@ static void drm_gem_check_release_pagevec(struct pagevec *pvec) ...@@ -534,10 +534,10 @@ static void drm_gem_check_release_pagevec(struct pagevec *pvec)
struct page **drm_gem_get_pages(struct drm_gem_object *obj) struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{ {
struct address_space *mapping; struct address_space *mapping;
struct page *p, **pages; struct page **pages;
struct pagevec pvec; struct folio *folio;
int i, npages; struct folio_batch fbatch;
int i, j, npages;
if (WARN_ON(!obj->filp)) if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -559,11 +559,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) ...@@ -559,11 +559,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
mapping_set_unevictable(mapping); mapping_set_unevictable(mapping);
for (i = 0; i < npages; i++) { i = 0;
p = shmem_read_mapping_page(mapping, i); while (i < npages) {
if (IS_ERR(p)) folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
goto fail; goto fail;
pages[i] = p; for (j = 0; j < folio_nr_pages(folio); j++, i++)
pages[i] = folio_file_page(folio, i);
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin. Note that this requires * correct region during swapin. Note that this requires
...@@ -571,23 +574,26 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) ...@@ -571,23 +574,26 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
* so shmem can relocate pages during swapin if required. * so shmem can relocate pages during swapin if required.
*/ */
BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL)); (folio_pfn(folio) >= 0x00100000UL));
} }
return pages; return pages;
fail: fail:
mapping_clear_unevictable(mapping); mapping_clear_unevictable(mapping);
pagevec_init(&pvec); folio_batch_init(&fbatch);
while (i--) { j = 0;
if (!pagevec_add(&pvec, pages[i])) while (j < i) {
drm_gem_check_release_pagevec(&pvec); struct folio *f = page_folio(pages[j]);
if (!folio_batch_add(&fbatch, f))
drm_gem_check_release_batch(&fbatch);
j += folio_nr_pages(f);
} }
if (pagevec_count(&pvec)) if (fbatch.nr)
drm_gem_check_release_pagevec(&pvec); drm_gem_check_release_batch(&fbatch);
kvfree(pages); kvfree(pages);
return ERR_CAST(p); return ERR_CAST(folio);
} }
EXPORT_SYMBOL(drm_gem_get_pages); EXPORT_SYMBOL(drm_gem_get_pages);
...@@ -603,7 +609,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, ...@@ -603,7 +609,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
{ {
int i, npages; int i, npages;
struct address_space *mapping; struct address_space *mapping;
struct pagevec pvec; struct folio_batch fbatch;
mapping = file_inode(obj->filp)->i_mapping; mapping = file_inode(obj->filp)->i_mapping;
mapping_clear_unevictable(mapping); mapping_clear_unevictable(mapping);
...@@ -616,23 +622,27 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, ...@@ -616,23 +622,27 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
npages = obj->size >> PAGE_SHIFT; npages = obj->size >> PAGE_SHIFT;
pagevec_init(&pvec); folio_batch_init(&fbatch);
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
struct folio *folio;
if (!pages[i]) if (!pages[i])
continue; continue;
folio = page_folio(pages[i]);
if (dirty) if (dirty)
set_page_dirty(pages[i]); folio_mark_dirty(folio);
if (accessed) if (accessed)
mark_page_accessed(pages[i]); folio_mark_accessed(folio);
/* Undo the reference we took when populating the table */ /* Undo the reference we took when populating the table */
if (!pagevec_add(&pvec, pages[i])) if (!folio_batch_add(&fbatch, folio))
drm_gem_check_release_pagevec(&pvec); drm_gem_check_release_batch(&fbatch);
i += folio_nr_pages(folio) - 1;
} }
if (pagevec_count(&pvec)) if (folio_batch_count(&fbatch))
drm_gem_check_release_pagevec(&pvec); drm_gem_check_release_batch(&fbatch);
kvfree(pages); kvfree(pages);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment