Commit 07d0f025 authored by Michal Hocko's avatar Michal Hocko Committed by Luis Henriques

mm: get rid of radix tree gfp mask for pagecache_get_page

commit 45f87de5 upstream.

Commit 2457aec6 ("mm: non-atomically mark page accessed during page
cache allocation where possible") has added a separate parameter for
specifying gfp mask for radix tree allocations.

Not only this is less than optimal from the API point of view because it
is error prone, it is also buggy currently because
grab_cache_page_write_begin is using GFP_KERNEL for radix tree and if
fgp_flags doesn't contain FGP_NOFS (mostly controlled by fs by
AOP_FLAG_NOFS flag) but the mapping_gfp_mask has __GFP_FS cleared then
the radix tree allocation wouldn't obey the restriction and might
recurse into filesystem and cause deadlocks.  This is the case for most
filesystems unfortunately because only ext4 and gfs2 are using
AOP_FLAG_NOFS.

Let's simply remove radix_gfp_mask parameter because the allocation
context is same for both page cache and for the radix tree.  Just make
sure that the radix tree gets only the sane subset of the mask (e.g.  do
not pass __GFP_WRITE).

Long term it is more preferable to convert remaining users of
AOP_FLAG_NOFS to use mapping_gfp_mask instead and simplify this
interface even further.
Reported-by: default avatarDave Chinner <david@fromorbit.com>
Signed-off-by: default avatarMichal Hocko <mhocko@suse.cz>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent 1387ef05
...@@ -267,7 +267,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping, ...@@ -267,7 +267,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
#define FGP_NOWAIT 0x00000020 #define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); int fgp_flags, gfp_t cache_gfp_mask);
/** /**
* find_get_page - find and get a page reference * find_get_page - find and get a page reference
...@@ -282,13 +282,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, ...@@ -282,13 +282,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
static inline struct page *find_get_page(struct address_space *mapping, static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset) pgoff_t offset)
{ {
return pagecache_get_page(mapping, offset, 0, 0, 0); return pagecache_get_page(mapping, offset, 0, 0);
} }
static inline struct page *find_get_page_flags(struct address_space *mapping, static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags) pgoff_t offset, int fgp_flags)
{ {
return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); return pagecache_get_page(mapping, offset, fgp_flags, 0);
} }
/** /**
...@@ -308,7 +308,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping, ...@@ -308,7 +308,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
static inline struct page *find_lock_page(struct address_space *mapping, static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset) pgoff_t offset)
{ {
return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
} }
/** /**
...@@ -335,7 +335,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping, ...@@ -335,7 +335,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
{ {
return pagecache_get_page(mapping, offset, return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT, FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
gfp_mask, gfp_mask & GFP_RECLAIM_MASK); gfp_mask);
} }
/** /**
...@@ -356,8 +356,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, ...@@ -356,8 +356,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
{ {
return pagecache_get_page(mapping, index, return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
mapping_gfp_mask(mapping), mapping_gfp_mask(mapping));
GFP_NOFS);
} }
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
......
...@@ -1031,8 +1031,7 @@ EXPORT_SYMBOL(find_lock_entry); ...@@ -1031,8 +1031,7 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search * @mapping: the address_space to search
* @offset: the page index * @offset: the page index
* @fgp_flags: PCG flags * @fgp_flags: PCG flags
* @cache_gfp_mask: gfp mask to use for the page cache data page allocation * @gfp_mask: gfp mask to use for the page cache data page allocation
* @radix_gfp_mask: gfp mask to use for radix tree node allocation
* *
* Looks up the page cache slot at @mapping & @offset. * Looks up the page cache slot at @mapping & @offset.
* *
...@@ -1041,11 +1040,9 @@ EXPORT_SYMBOL(find_lock_entry); ...@@ -1041,11 +1040,9 @@ EXPORT_SYMBOL(find_lock_entry);
* FGP_ACCESSED: the page will be marked accessed * FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked * FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using * FGP_CREAT: If page is not present then a new page is allocated using
* @cache_gfp_mask and added to the page cache and the VM's LRU * @gfp_mask and added to the page cache and the VM's LRU
* list. If radix tree nodes are allocated during page cache * list. The page is returned locked and with an increased
* insertion then @radix_gfp_mask is used. The page is returned * refcount. Otherwise, %NULL is returned.
* locked and with an increased refcount. Otherwise, %NULL is
* returned.
* *
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic. * if the GFP flags specified for FGP_CREAT are atomic.
...@@ -1053,7 +1050,7 @@ EXPORT_SYMBOL(find_lock_entry); ...@@ -1053,7 +1050,7 @@ EXPORT_SYMBOL(find_lock_entry);
* If there is a page cache page, it is returned with an increased refcount. * If there is a page cache page, it is returned with an increased refcount.
*/ */
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) int fgp_flags, gfp_t gfp_mask)
{ {
struct page *page; struct page *page;
...@@ -1090,13 +1087,11 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, ...@@ -1090,13 +1087,11 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
if (!page && (fgp_flags & FGP_CREAT)) { if (!page && (fgp_flags & FGP_CREAT)) {
int err; int err;
if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
cache_gfp_mask |= __GFP_WRITE; gfp_mask |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS) { if (fgp_flags & FGP_NOFS)
cache_gfp_mask &= ~__GFP_FS; gfp_mask &= ~__GFP_FS;
radix_gfp_mask &= ~__GFP_FS;
}
page = __page_cache_alloc(cache_gfp_mask); page = __page_cache_alloc(gfp_mask);
if (!page) if (!page)
return NULL; return NULL;
...@@ -1107,7 +1102,8 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, ...@@ -1107,7 +1102,8 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
if (fgp_flags & FGP_ACCESSED) if (fgp_flags & FGP_ACCESSED)
init_page_accessed(page); init_page_accessed(page);
err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); err = add_to_page_cache_lru(page, mapping, offset,
gfp_mask & GFP_RECLAIM_MASK);
if (unlikely(err)) { if (unlikely(err)) {
page_cache_release(page); page_cache_release(page);
page = NULL; page = NULL;
...@@ -2416,8 +2412,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, ...@@ -2416,8 +2412,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
fgp_flags |= FGP_NOFS; fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags, page = pagecache_get_page(mapping, index, fgp_flags,
mapping_gfp_mask(mapping), mapping_gfp_mask(mapping));
GFP_KERNEL);
if (page) if (page)
wait_for_stable_page(page); wait_for_stable_page(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment