Commit 4eb6ab13 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA: Remove rdma_user_mmap_page

Upon further research drivers that want this should simply call the core
function vm_insert_page(). The VMA holds a reference on the page and it
will be automatically freed when the last reference drops. No need for
disassociate to sequence the cleanup.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent ddcdc368
...@@ -926,43 +926,32 @@ static const struct vm_operations_struct rdma_umap_ops = { ...@@ -926,43 +926,32 @@ static const struct vm_operations_struct rdma_umap_ops = {
.fault = rdma_umap_fault, .fault = rdma_umap_fault,
}; };
static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, /*
struct vm_area_struct *vma, * Map IO memory into a process. This is to be called by drivers as part of
unsigned long size) * their mmap() functions if they wish to send something like PCI-E BAR memory
* to userspace.
*/
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot)
{ {
struct ib_uverbs_file *ufile = ucontext->ufile; struct ib_uverbs_file *ufile = ucontext->ufile;
struct rdma_umap_priv *priv; struct rdma_umap_priv *priv;
if (!(vma->vm_flags & VM_SHARED)) if (!(vma->vm_flags & VM_SHARED))
return ERR_PTR(-EINVAL); return -EINVAL;
if (vma->vm_end - vma->vm_start != size) if (vma->vm_end - vma->vm_start != size)
return ERR_PTR(-EINVAL); return -EINVAL;
/* Driver is using this wrong, must be called by ib_uverbs_mmap */ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
if (WARN_ON(!vma->vm_file || if (WARN_ON(!vma->vm_file ||
vma->vm_file->private_data != ufile)) vma->vm_file->private_data != ufile))
return ERR_PTR(-EINVAL); return -EINVAL;
lockdep_assert_held(&ufile->device->disassociate_srcu); lockdep_assert_held(&ufile->device->disassociate_srcu);
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
return ERR_PTR(-ENOMEM); return -ENOMEM;
return priv;
}
/*
* Map IO memory into a process. This is to be called by drivers as part of
* their mmap() functions if they wish to send something like PCI-E BAR memory
* to userspace.
*/
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
if (IS_ERR(priv))
return PTR_ERR(priv);
vma->vm_page_prot = prot; vma->vm_page_prot = prot;
if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) { if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
...@@ -975,35 +964,6 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, ...@@ -975,35 +964,6 @@ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
} }
EXPORT_SYMBOL(rdma_user_mmap_io); EXPORT_SYMBOL(rdma_user_mmap_io);
/*
* The page case is here for a slightly different reason, the driver expects
* to be able to free the page it is sharing to user space when it destroys
* its ucontext, which means we need to zap the user space references.
*
* We could handle this differently by providing an API to allocate a shared
* page and then only freeing the shared page when the last ufile is
* destroyed.
*/
int rdma_user_mmap_page(struct ib_ucontext *ucontext,
struct vm_area_struct *vma, struct page *page,
unsigned long size)
{
struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
if (IS_ERR(priv))
return PTR_ERR(priv);
if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
vma->vm_page_prot)) {
kfree(priv);
return -EAGAIN;
}
rdma_umap_priv_init(priv, vma);
return 0;
}
EXPORT_SYMBOL(rdma_user_mmap_page);
void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
{ {
struct rdma_umap_priv *priv, *next_priv; struct rdma_umap_priv *priv, *next_priv;
......
...@@ -2060,22 +2060,22 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, ...@@ -2060,22 +2060,22 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma, struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context) struct mlx5_ib_ucontext *context)
{ {
if (vma->vm_end - vma->vm_start != PAGE_SIZE) if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
!(vma->vm_flags & VM_SHARED))
return -EINVAL; return -EINVAL;
if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (vma->vm_flags & VM_WRITE) if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM; return -EPERM;
vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
if (!dev->mdev->clock_info) if (!dev->mdev->clock_info)
return -EOPNOTSUPP; return -EOPNOTSUPP;
return rdma_user_mmap_page(&context->ibucontext, vma, return vm_insert_page(vma, vma->vm_start,
virt_to_page(dev->mdev->clock_info), virt_to_page(dev->mdev->clock_info));
PAGE_SIZE);
} }
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
......
...@@ -2705,9 +2705,6 @@ void ib_set_device_ops(struct ib_device *device, ...@@ -2705,9 +2705,6 @@ void ib_set_device_ops(struct ib_device *device,
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma, int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot); unsigned long pfn, unsigned long size, pgprot_t prot);
int rdma_user_mmap_page(struct ib_ucontext *ucontext,
struct vm_area_struct *vma, struct page *page,
unsigned long size);
#else #else
static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext, static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
struct vm_area_struct *vma, struct vm_area_struct *vma,
...@@ -2716,12 +2713,6 @@ static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext, ...@@ -2716,12 +2713,6 @@ static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
{ {
return -EINVAL; return -EINVAL;
} }
static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
struct vm_area_struct *vma, struct page *page,
unsigned long size)
{
return -EINVAL;
}
#endif #endif
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len) static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment