Commit 3a2a1e90 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Jason Gunthorpe

drivers/IB,qib: optimize mmap_sem usage

The driver uses mmap_sem for both pinned_vm accounting and
get_user_pages(). Because rdma drivers might want to use gup_longterm() in
the future we still need some sort of mmap_sem serialization (as opposed
to removing it entirely by using gup_fast()). Now that pinned_vm is atomic
the writer lock can therefore be converted to reader.

This also fixes a bug that __qib_get_user_pages was not taking into
account the current value of pinned_vm.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 4f564ff3
......@@ -49,43 +49,6 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
}
}
/*
* Call with current->mm->mmap_sem held.
*/
static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
unsigned long lock_limit;
size_t got;
int ret;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
goto bail;
}
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
}
atomic64_add(num_pages, &current->mm->pinned_vm);
ret = 0;
goto bail;
bail_release:
__qib_release_user_pages(p, got, 0);
bail:
return ret;
}
/**
* qib_map_page - a safety wrapper around pci_map_page()
*
......@@ -137,26 +100,44 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
int qib_get_user_pages(unsigned long start_page, size_t num_pages,
struct page **p)
{
unsigned long locked, lock_limit;
size_t got;
int ret;
down_write(&current->mm->mmap_sem);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
ret = __qib_get_user_pages(start_page, num_pages, p);
if (num_pages > lock_limit && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
goto bail;
}
up_write(&current->mm->mmap_sem);
down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages_longterm(start_page + got * PAGE_SIZE,
num_pages - got,
FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0) {
up_read(&current->mm->mmap_sem);
goto bail_release;
}
}
up_read(&current->mm->mmap_sem);
return 0;
bail_release:
__qib_release_user_pages(p, got, 0);
bail:
atomic64_sub(num_pages, &current->mm->pinned_vm);
return ret;
}
void qib_release_user_pages(struct page **p, size_t num_pages)
{
if (current->mm) /* during close after signal, mm can be NULL */
down_write(&current->mm->mmap_sem);
__qib_release_user_pages(p, num_pages, 1);
if (current->mm) {
/* during close after signal, mm can be NULL */
if (current->mm)
atomic64_sub(num_pages, &current->mm->pinned_vm);
up_write(&current->mm->mmap_sem);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment