Commit a32e7ea3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache

Pull folio fixes from Matthew Wilcox:
 "Four folio-related fixes:

   - Don't release a folio while it's still locked

   - Fix a use-after-free after dropping the mmap_lock

   - Fix a memory leak when splitting a page

   - Fix a kernel-doc warning for struct folio"

* tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache:
  mm: Add kernel-doc for folio->mlock_count
  mm/huge_memory: Fix xarray node memory leak
  filemap: Cache the value of vm_flags
  filemap: Don't release a locked folio
parents aa3398fb 334f6f53
...@@ -227,6 +227,7 @@ struct page { ...@@ -227,6 +227,7 @@ struct page {
* struct folio - Represents a contiguous set of bytes. * struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags. * @flags: Identical to the page flags.
* @lru: Least Recently Used list; tracks how recently this folio was used. * @lru: Least Recently Used list; tracks how recently this folio was used.
* @mlock_count: Number of times this folio has been pinned by mlock().
* @mapping: The file this page belongs to, or refers to the anon_vma for * @mapping: The file this page belongs to, or refers to the anon_vma for
* anonymous memory. * anonymous memory.
* @index: Offset within the file, in units of pages. For anonymous memory, * @index: Offset within the file, in units of pages. For anonymous memory,
...@@ -255,10 +256,14 @@ struct folio { ...@@ -255,10 +256,14 @@ struct folio {
unsigned long flags; unsigned long flags;
union { union {
struct list_head lru; struct list_head lru;
/* private: avoid cluttering the output */
struct { struct {
void *__filler; void *__filler;
/* public: */
unsigned int mlock_count; unsigned int mlock_count;
/* private: */
}; };
/* public: */
}; };
struct address_space *mapping; struct address_space *mapping;
pgoff_t index; pgoff_t index;
......
...@@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); ...@@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
void xas_init_marks(const struct xa_state *); void xas_init_marks(const struct xa_state *);
bool xas_nomem(struct xa_state *, gfp_t); bool xas_nomem(struct xa_state *, gfp_t);
void xas_destroy(struct xa_state *);
void xas_pause(struct xa_state *); void xas_pause(struct xa_state *);
void xas_create_range(struct xa_state *); void xas_create_range(struct xa_state *);
......
...@@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node) ...@@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node)
* xas_destroy() - Free any resources allocated during the XArray operation. * xas_destroy() - Free any resources allocated during the XArray operation.
* @xas: XArray operation state. * @xas: XArray operation state.
* *
* This function is now internal-only. * Most users will not need to call this function; it is called for you
* by xas_nomem().
*/ */
static void xas_destroy(struct xa_state *xas) void xas_destroy(struct xa_state *xas)
{ {
struct xa_node *next, *node = xas->xa_alloc; struct xa_node *next, *node = xas->xa_alloc;
......
...@@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) ...@@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
struct file *fpin = NULL; struct file *fpin = NULL;
unsigned long vm_flags = vmf->vma->vm_flags;
unsigned int mmap_miss; unsigned int mmap_miss;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* Use the readahead code, even if readahead is disabled */ /* Use the readahead code, even if readahead is disabled */
if (vmf->vma->vm_flags & VM_HUGEPAGE) { if (vm_flags & VM_HUGEPAGE) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin); fpin = maybe_unlock_mmap_for_io(vmf, fpin);
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
ra->size = HPAGE_PMD_NR; ra->size = HPAGE_PMD_NR;
...@@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) ...@@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
* Fetch two PMD folios, so we get the chance to actually * Fetch two PMD folios, so we get the chance to actually
* readahead, unless we've been told not to. * readahead, unless we've been told not to.
*/ */
if (!(vmf->vma->vm_flags & VM_RAND_READ)) if (!(vm_flags & VM_RAND_READ))
ra->size *= 2; ra->size *= 2;
ra->async_size = HPAGE_PMD_NR; ra->async_size = HPAGE_PMD_NR;
page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER); page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
...@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) ...@@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
#endif #endif
/* If we don't want any read-ahead, don't bother */ /* If we don't want any read-ahead, don't bother */
if (vmf->vma->vm_flags & VM_RAND_READ) if (vm_flags & VM_RAND_READ)
return fpin; return fpin;
if (!ra->ra_pages) if (!ra->ra_pages)
return fpin; return fpin;
if (vmf->vma->vm_flags & VM_SEQ_READ) { if (vm_flags & VM_SEQ_READ) {
fpin = maybe_unlock_mmap_for_io(vmf, fpin); fpin = maybe_unlock_mmap_for_io(vmf, fpin);
page_cache_sync_ra(&ractl, ra->ra_pages); page_cache_sync_ra(&ractl, ra->ra_pages);
return fpin; return fpin;
......
...@@ -2672,8 +2672,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2672,8 +2672,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (mapping) if (mapping)
i_mmap_unlock_read(mapping); i_mmap_unlock_read(mapping);
out: out:
/* Free any memory we didn't use */ xas_destroy(&xas);
xas_nomem(&xas, 0);
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
return ret; return ret;
} }
......
...@@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac) ...@@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac)
while ((folio = readahead_folio(rac)) != NULL) { while ((folio = readahead_folio(rac)) != NULL) {
unsigned long nr = folio_nr_pages(folio); unsigned long nr = folio_nr_pages(folio);
folio_get(folio);
rac->ra->size -= nr; rac->ra->size -= nr;
if (rac->ra->async_size >= nr) { if (rac->ra->async_size >= nr) {
rac->ra->async_size -= nr; rac->ra->async_size -= nr;
filemap_remove_folio(folio); filemap_remove_folio(folio);
} }
folio_unlock(folio); folio_unlock(folio);
folio_put(folio);
} }
} else { } else {
while ((folio = readahead_folio(rac)) != NULL) while ((folio = readahead_folio(rac)) != NULL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment