Commit 5169b844 authored by NeilBrown's avatar NeilBrown Committed by akpm

mm: submit multipage reads for SWP_FS_OPS swap-space

swap_readpage() is given one page at a time, but may be called repeatedly
in succession.

For block-device swap-space, the blk_plug functionality allows the
multiple pages to be combined together at lower layers.  That cannot be
used for SWP_FS_OPS as blk_plug may not exist - it is only active when
CONFIG_BLOCK=y.  Consequently all swap reads over NFS are single page
reads.

With this patch we pass in a pointer-to-pointer when swap_readpage can
store state between calls - much like the effect of blk_plug.  After
calling swap_readpage() some number of times, the state will be passed to
swap_read_unplug() which can submit the combined request.

Link: https://lkml.kernel.org/r/164859778127.29473.14059420492644907783.stgit@noble.brownSigned-off-by: default avatarNeilBrown <neilb@suse.de>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarDavid Howells <dhowells@redhat.com>
Tested-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cba738f6
...@@ -198,6 +198,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, ...@@ -198,6 +198,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
pte_t *orig_pte; pte_t *orig_pte;
struct vm_area_struct *vma = walk->private; struct vm_area_struct *vma = walk->private;
unsigned long index; unsigned long index;
struct swap_iocb *splug = NULL;
if (pmd_none_or_trans_huge_or_clear_bad(pmd)) if (pmd_none_or_trans_huge_or_clear_bad(pmd))
return 0; return 0;
...@@ -219,10 +220,11 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, ...@@ -219,10 +220,11 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
continue; continue;
page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
vma, index, false); vma, index, false, &splug);
if (page) if (page)
put_page(page); put_page(page);
} }
swap_read_unplug(splug);
return 0; return 0;
} }
...@@ -238,6 +240,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, ...@@ -238,6 +240,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
struct page *page; struct page *page;
struct swap_iocb *splug = NULL;
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, end_index) { xas_for_each(&xas, page, end_index) {
...@@ -250,13 +253,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, ...@@ -250,13 +253,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
swap = radix_to_swp_entry(page); swap = radix_to_swp_entry(page);
page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
NULL, 0, false); NULL, 0, false, &splug);
if (page) if (page)
put_page(page); put_page(page);
rcu_read_lock(); rcu_read_lock();
} }
rcu_read_unlock(); rcu_read_unlock();
swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */ lru_add_drain(); /* Push any new pages onto the LRU now */
} }
......
...@@ -3633,7 +3633,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3633,7 +3633,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* To provide entry to swap_readpage() */ /* To provide entry to swap_readpage() */
set_page_private(page, entry.val); set_page_private(page, entry.val);
swap_readpage(page, true); swap_readpage(page, true, NULL);
set_page_private(page, 0); set_page_private(page, 0);
} }
} else { } else {
......
...@@ -237,7 +237,8 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct page *page) ...@@ -237,7 +237,8 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
struct swap_iocb { struct swap_iocb {
struct kiocb iocb; struct kiocb iocb;
struct bio_vec bvec; struct bio_vec bvec[SWAP_CLUSTER_MAX];
int pages;
}; };
static mempool_t *sio_pool; static mempool_t *sio_pool;
...@@ -257,7 +258,7 @@ int sio_pool_init(void) ...@@ -257,7 +258,7 @@ int sio_pool_init(void)
static void sio_write_complete(struct kiocb *iocb, long ret) static void sio_write_complete(struct kiocb *iocb, long ret)
{ {
struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
struct page *page = sio->bvec.bv_page; struct page *page = sio->bvec[0].bv_page;
if (ret != PAGE_SIZE) { if (ret != PAGE_SIZE) {
/* /*
...@@ -295,10 +296,10 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc) ...@@ -295,10 +296,10 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
init_sync_kiocb(&sio->iocb, swap_file); init_sync_kiocb(&sio->iocb, swap_file);
sio->iocb.ki_complete = sio_write_complete; sio->iocb.ki_complete = sio_write_complete;
sio->iocb.ki_pos = page_file_offset(page); sio->iocb.ki_pos = page_file_offset(page);
sio->bvec.bv_page = page; sio->bvec[0].bv_page = page;
sio->bvec.bv_len = PAGE_SIZE; sio->bvec[0].bv_len = PAGE_SIZE;
sio->bvec.bv_offset = 0; sio->bvec[0].bv_offset = 0;
iov_iter_bvec(&from, WRITE, &sio->bvec, 1, PAGE_SIZE); iov_iter_bvec(&from, WRITE, &sio->bvec[0], 1, PAGE_SIZE);
ret = mapping->a_ops->swap_rw(&sio->iocb, &from); ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
if (ret != -EIOCBQUEUED) if (ret != -EIOCBQUEUED)
sio_write_complete(&sio->iocb, ret); sio_write_complete(&sio->iocb, ret);
...@@ -346,46 +347,66 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, ...@@ -346,46 +347,66 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
static void sio_read_complete(struct kiocb *iocb, long ret) static void sio_read_complete(struct kiocb *iocb, long ret)
{ {
struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
struct page *page = sio->bvec.bv_page; int p;
if (ret != 0 && ret != PAGE_SIZE) { if (ret == PAGE_SIZE * sio->pages) {
SetPageError(page); for (p = 0; p < sio->pages; p++) {
ClearPageUptodate(page); struct page *page = sio->bvec[p].bv_page;
pr_alert_ratelimited("Read-error on swap-device\n");
SetPageUptodate(page);
unlock_page(page);
}
count_vm_events(PSWPIN, sio->pages);
} else { } else {
SetPageUptodate(page); for (p = 0; p < sio->pages; p++) {
count_vm_event(PSWPIN); struct page *page = sio->bvec[p].bv_page;
SetPageError(page);
ClearPageUptodate(page);
unlock_page(page);
}
pr_alert_ratelimited("Read-error on swap-device\n");
} }
unlock_page(page);
mempool_free(sio, sio_pool); mempool_free(sio, sio_pool);
} }
static int swap_readpage_fs(struct page *page) static void swap_readpage_fs(struct page *page,
struct swap_iocb **plug)
{ {
struct swap_info_struct *sis = page_swap_info(page); struct swap_info_struct *sis = page_swap_info(page);
struct file *swap_file = sis->swap_file; struct swap_iocb *sio = NULL;
struct address_space *mapping = swap_file->f_mapping;
struct iov_iter from;
struct swap_iocb *sio;
loff_t pos = page_file_offset(page); loff_t pos = page_file_offset(page);
int ret;
sio = mempool_alloc(sio_pool, GFP_KERNEL);
init_sync_kiocb(&sio->iocb, swap_file);
sio->iocb.ki_pos = pos;
sio->iocb.ki_complete = sio_read_complete;
sio->bvec.bv_page = page;
sio->bvec.bv_len = PAGE_SIZE;
sio->bvec.bv_offset = 0;
iov_iter_bvec(&from, READ, &sio->bvec, 1, PAGE_SIZE); if (plug)
ret = mapping->a_ops->swap_rw(&sio->iocb, &from); sio = *plug;
if (ret != -EIOCBQUEUED) if (sio) {
sio_read_complete(&sio->iocb, ret); if (sio->iocb.ki_filp != sis->swap_file ||
return ret; sio->iocb.ki_pos + sio->pages * PAGE_SIZE != pos) {
swap_read_unplug(sio);
sio = NULL;
}
}
if (!sio) {
sio = mempool_alloc(sio_pool, GFP_KERNEL);
init_sync_kiocb(&sio->iocb, sis->swap_file);
sio->iocb.ki_pos = pos;
sio->iocb.ki_complete = sio_read_complete;
sio->pages = 0;
}
sio->bvec[sio->pages].bv_page = page;
sio->bvec[sio->pages].bv_len = PAGE_SIZE;
sio->bvec[sio->pages].bv_offset = 0;
sio->pages += 1;
if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
swap_read_unplug(sio);
sio = NULL;
}
if (plug)
*plug = sio;
} }
int swap_readpage(struct page *page, bool synchronous) int swap_readpage(struct page *page, bool synchronous,
struct swap_iocb **plug)
{ {
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
...@@ -413,7 +434,7 @@ int swap_readpage(struct page *page, bool synchronous) ...@@ -413,7 +434,7 @@ int swap_readpage(struct page *page, bool synchronous)
} }
if (data_race(sis->flags & SWP_FS_OPS)) { if (data_race(sis->flags & SWP_FS_OPS)) {
ret = swap_readpage_fs(page); swap_readpage_fs(page, plug);
goto out; goto out;
} }
...@@ -459,3 +480,16 @@ int swap_readpage(struct page *page, bool synchronous) ...@@ -459,3 +480,16 @@ int swap_readpage(struct page *page, bool synchronous)
delayacct_swapin_end(); delayacct_swapin_end();
return ret; return ret;
} }
void __swap_read_unplug(struct swap_iocb *sio)
{
struct iov_iter from;
struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
int ret;
iov_iter_bvec(&from, READ, sio->bvec, sio->pages,
PAGE_SIZE * sio->pages);
ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
if (ret != -EIOCBQUEUED)
sio_read_complete(&sio->iocb, ret);
}
...@@ -7,7 +7,15 @@ ...@@ -7,7 +7,15 @@
/* linux/mm/page_io.c */ /* linux/mm/page_io.c */
int sio_pool_init(void); int sio_pool_init(void);
int swap_readpage(struct page *page, bool do_poll); struct swap_iocb;
int swap_readpage(struct page *page, bool do_poll,
struct swap_iocb **plug);
void __swap_read_unplug(struct swap_iocb *plug);
static inline void swap_read_unplug(struct swap_iocb *plug)
{
if (unlikely(plug))
__swap_read_unplug(plug);
}
int swap_writepage(struct page *page, struct writeback_control *wbc); int swap_writepage(struct page *page, struct writeback_control *wbc);
void end_swap_bio_write(struct bio *bio); void end_swap_bio_write(struct bio *bio);
int __swap_writepage(struct page *page, struct writeback_control *wbc, int __swap_writepage(struct page *page, struct writeback_control *wbc,
...@@ -41,7 +49,8 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index); ...@@ -41,7 +49,8 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
bool do_poll); bool do_poll,
struct swap_iocb **plug);
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, struct vm_area_struct *vma,
unsigned long addr, unsigned long addr,
...@@ -56,7 +65,9 @@ static inline unsigned int page_swap_flags(struct page *page) ...@@ -56,7 +65,9 @@ static inline unsigned int page_swap_flags(struct page *page)
return page_swap_info(page)->flags; return page_swap_info(page)->flags;
} }
#else /* CONFIG_SWAP */ #else /* CONFIG_SWAP */
static inline int swap_readpage(struct page *page, bool do_poll) struct swap_iocb;
static inline int swap_readpage(struct page *page, bool do_poll,
struct swap_iocb **plug)
{ {
return 0; return 0;
} }
......
...@@ -520,14 +520,16 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -520,14 +520,16 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* the swap entry is no longer in use. * the swap entry is no longer in use.
*/ */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr, bool do_poll) struct vm_area_struct *vma,
unsigned long addr, bool do_poll,
struct swap_iocb **plug)
{ {
bool page_was_allocated; bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask, struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
vma, addr, &page_was_allocated); vma, addr, &page_was_allocated);
if (page_was_allocated) if (page_was_allocated)
swap_readpage(retpage, do_poll); swap_readpage(retpage, do_poll, plug);
return retpage; return retpage;
} }
...@@ -621,6 +623,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, ...@@ -621,6 +623,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
unsigned long mask; unsigned long mask;
struct swap_info_struct *si = swp_swap_info(entry); struct swap_info_struct *si = swp_swap_info(entry);
struct blk_plug plug; struct blk_plug plug;
struct swap_iocb *splug = NULL;
bool do_poll = true, page_allocated; bool do_poll = true, page_allocated;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address; unsigned long addr = vmf->address;
...@@ -647,7 +650,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, ...@@ -647,7 +650,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!page) if (!page)
continue; continue;
if (page_allocated) { if (page_allocated) {
swap_readpage(page, false); swap_readpage(page, false, &splug);
if (offset != entry_offset) { if (offset != entry_offset) {
SetPageReadahead(page); SetPageReadahead(page);
count_vm_event(SWAP_RA); count_vm_event(SWAP_RA);
...@@ -656,10 +659,12 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, ...@@ -656,10 +659,12 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
put_page(page); put_page(page);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain(); /* Push any new pages onto the LRU now */ lru_add_drain(); /* Push any new pages onto the LRU now */
skip: skip:
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); /* The page was likely read above, so no need for plugging here */
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
} }
int init_swap_address_space(unsigned int type, unsigned long nr_pages) int init_swap_address_space(unsigned int type, unsigned long nr_pages)
...@@ -790,6 +795,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, ...@@ -790,6 +795,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct blk_plug plug; struct blk_plug plug;
struct swap_iocb *splug = NULL;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct page *page; struct page *page;
pte_t *pte, pentry; pte_t *pte, pentry;
...@@ -820,7 +826,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, ...@@ -820,7 +826,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (!page) if (!page)
continue; continue;
if (page_allocated) { if (page_allocated) {
swap_readpage(page, false); swap_readpage(page, false, &splug);
if (i != ra_info.offset) { if (i != ra_info.offset) {
SetPageReadahead(page); SetPageReadahead(page);
count_vm_event(SWAP_RA); count_vm_event(SWAP_RA);
...@@ -829,10 +835,12 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, ...@@ -829,10 +835,12 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
put_page(page); put_page(page);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
swap_read_unplug(splug);
lru_add_drain(); lru_add_drain();
skip: skip:
/* The page was likely read above, so no need for plugging here */
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
ra_info.win == 1); ra_info.win == 1, NULL);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment