Commit 52650c8b authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Linus Torvalds

mm/gup: remove the vma allocation from gup_longterm_locked()

Long ago there wasn't a FOLL_LONGTERM flag so this DAX check was done by
post-processing the VMA list.

These days it is trivial to just check each VMA to see if it is DAX before
processing it inside __get_user_pages() and return failure if a DAX VMA is
encountered with FOLL_LONGTERM.

Removing the allocation of the VMA list is a significant speed up for many
call sites.

Add an IS_ENABLED to vma_is_fsdax so that code generation is unchanged
when DAX is compiled out.

Remove the dummy version of __gup_longterm_locked() as !CONFIG_CMA already
makes memalloc_nocma_save(), check_and_migrate_cma_pages(), and
memalloc_nocma_restore() into a NOP.

Link: https://lkml.kernel.org/r/0-v1-5551df3ed12e+b8-gup_dax_speedup_jgg@nvidia.comSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 57efa1fe
...@@ -3230,7 +3230,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma) ...@@ -3230,7 +3230,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
{ {
struct inode *inode; struct inode *inode;
if (!vma->vm_file) if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
return false; return false;
if (!vma_is_dax(vma)) if (!vma_is_dax(vma))
return false; return false;
......
...@@ -923,6 +923,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) ...@@ -923,6 +923,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
return -EFAULT; return -EFAULT;
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
return -EOPNOTSUPP;
if (write) { if (write) {
if (!(vm_flags & VM_WRITE)) { if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE)) if (!(gup_flags & FOLL_FORCE))
...@@ -1060,10 +1063,14 @@ static long __get_user_pages(struct mm_struct *mm, ...@@ -1060,10 +1063,14 @@ static long __get_user_pages(struct mm_struct *mm,
goto next_page; goto next_page;
} }
if (!vma || check_vma_flags(vma, gup_flags)) { if (!vma) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
ret = check_vma_flags(vma, gup_flags);
if (ret)
goto out;
if (is_vm_hugetlb_page(vma)) { if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas, i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, &start, &nr_pages, i,
...@@ -1567,26 +1574,6 @@ struct page *get_dump_page(unsigned long addr) ...@@ -1567,26 +1574,6 @@ struct page *get_dump_page(unsigned long addr)
} }
#endif /* CONFIG_ELF_CORE */ #endif /* CONFIG_ELF_CORE */
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
{
long i;
struct vm_area_struct *vma_prev = NULL;
for (i = 0; i < nr_pages; i++) {
struct vm_area_struct *vma = vmas[i];
if (vma == vma_prev)
continue;
vma_prev = vma;
if (vma_is_fsdax(vma))
return true;
}
return false;
}
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
static long check_and_migrate_cma_pages(struct mm_struct *mm, static long check_and_migrate_cma_pages(struct mm_struct *mm,
unsigned long start, unsigned long start,
...@@ -1705,63 +1692,23 @@ static long __gup_longterm_locked(struct mm_struct *mm, ...@@ -1705,63 +1692,23 @@ static long __gup_longterm_locked(struct mm_struct *mm,
struct vm_area_struct **vmas, struct vm_area_struct **vmas,
unsigned int gup_flags) unsigned int gup_flags)
{ {
struct vm_area_struct **vmas_tmp = vmas;
unsigned long flags = 0; unsigned long flags = 0;
long rc, i; long rc;
if (gup_flags & FOLL_LONGTERM) { if (gup_flags & FOLL_LONGTERM)
if (!pages)
return -EINVAL;
if (!vmas_tmp) {
vmas_tmp = kcalloc(nr_pages,
sizeof(struct vm_area_struct *),
GFP_KERNEL);
if (!vmas_tmp)
return -ENOMEM;
}
flags = memalloc_nocma_save(); flags = memalloc_nocma_save();
}
rc = __get_user_pages_locked(mm, start, nr_pages, pages, rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
vmas_tmp, NULL, gup_flags); gup_flags);
if (gup_flags & FOLL_LONGTERM) { if (gup_flags & FOLL_LONGTERM) {
if (rc < 0) if (rc > 0)
goto out;
if (check_dax_vmas(vmas_tmp, rc)) {
if (gup_flags & FOLL_PIN)
unpin_user_pages(pages, rc);
else
for (i = 0; i < rc; i++)
put_page(pages[i]);
rc = -EOPNOTSUPP;
goto out;
}
rc = check_and_migrate_cma_pages(mm, start, rc, pages, rc = check_and_migrate_cma_pages(mm, start, rc, pages,
vmas_tmp, gup_flags); vmas, gup_flags);
out:
memalloc_nocma_restore(flags); memalloc_nocma_restore(flags);
} }
if (vmas_tmp != vmas)
kfree(vmas_tmp);
return rc; return rc;
} }
#else /* !CONFIG_FS_DAX && !CONFIG_CMA */
static __always_inline long __gup_longterm_locked(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
unsigned int flags)
{
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, flags);
}
#endif /* CONFIG_FS_DAX || CONFIG_CMA */
static bool is_valid_gup_flags(unsigned int gup_flags) static bool is_valid_gup_flags(unsigned int gup_flags)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment