Commit cb9fbc5c authored by Arthur Kepner's avatar Arthur Kepner Committed by Linus Torvalds

IB: expand ib_umem_get() prototype

Add a new parameter, dmasync, to the ib_umem_get() prototype.  Use dmasync = 1
when mapping user-allocated CQs with ib_umem_get().
Signed-off-by: default avatarArthur Kepner <akepner@sgi.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 309df0c5
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/dma-attrs.h>
#include "uverbs.h" #include "uverbs.h"
...@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d ...@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
* @addr: userspace virtual address to start at * @addr: userspace virtual address to start at
* @size: length of region to pin * @size: length of region to pin
* @access: IB_ACCESS_xxx flags for memory being pinned * @access: IB_ACCESS_xxx flags for memory being pinned
* @dmasync: flush in-flight DMA when the memory region is written
*/ */
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
size_t size, int access) size_t size, int access, int dmasync)
{ {
struct ib_umem *umem; struct ib_umem *umem;
struct page **page_list; struct page **page_list;
...@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
int ret; int ret;
int off; int off;
int i; int i;
DEFINE_DMA_ATTRS(attrs);
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (!can_do_mlock()) if (!can_do_mlock())
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
...@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
} }
chunk->nmap = ib_dma_map_sg(context->device, chunk->nmap = ib_dma_map_sg_attrs(context->device,
&chunk->page_list[0], &chunk->page_list[0],
chunk->nents, chunk->nents,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL,
&attrs);
if (chunk->nmap <= 0) { if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i) for (i = 0; i < chunk->nents; ++i)
put_page(sg_page(&chunk->page_list[i])); put_page(sg_page(&chunk->page_list[i]));
......
...@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
c2mr->pd = c2pd; c2mr->pd = c2pd;
c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(c2mr->umem)) { if (IS_ERR(c2mr->umem)) {
err = PTR_ERR(c2mr->umem); err = PTR_ERR(c2mr->umem);
kfree(c2mr); kfree(c2mr);
......
...@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mhp) if (!mhp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(mhp->umem)) { if (IS_ERR(mhp->umem)) {
err = PTR_ERR(mhp->umem); err = PTR_ERR(mhp->umem);
kfree(mhp); kfree(mhp);
......
...@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
e_mr->umem = ib_umem_get(pd->uobject->context, start, length, e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags); mr_access_flags, 0);
if (IS_ERR(e_mr->umem)) { if (IS_ERR(e_mr->umem)) {
ib_mr = (void *)e_mr->umem; ib_mr = (void *)e_mr->umem;
goto reg_user_mr_exit1; goto reg_user_mr_exit1;
......
...@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto bail; goto bail;
} }
umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem)) if (IS_ERR(umem))
return (void *) umem; return (void *) umem;
......
...@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont ...@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
int err; int err;
*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
......
...@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, ...@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
page->user_virt = (virt & PAGE_MASK); page->user_virt = (virt & PAGE_MASK);
page->refcnt = 0; page->refcnt = 0;
page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
PAGE_SIZE, 0); PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) { if (IS_ERR(page->umem)) {
err = PTR_ERR(page->umem); err = PTR_ERR(page->umem);
kfree(page); kfree(page);
......
...@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); mr->umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
goto err_free; goto err_free;
......
...@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err; goto err;
qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
qp->buf_size, 0); qp->buf_size, 0, 0);
if (IS_ERR(qp->umem)) { if (IS_ERR(qp->umem)) {
err = PTR_ERR(qp->umem); err = PTR_ERR(qp->umem);
goto err; goto err;
......
...@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, ...@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
} }
srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
buf_size, 0); buf_size, 0, 0);
if (IS_ERR(srq->umem)) { if (IS_ERR(srq->umem)) {
err = PTR_ERR(srq->umem); err = PTR_ERR(srq->umem);
goto err_srq; goto err_srq;
......
...@@ -1006,17 +1006,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1006,17 +1006,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct mthca_dev *dev = to_mdev(pd->device); struct mthca_dev *dev = to_mdev(pd->device);
struct ib_umem_chunk *chunk; struct ib_umem_chunk *chunk;
struct mthca_mr *mr; struct mthca_mr *mr;
struct mthca_reg_mr ucmd;
u64 *pages; u64 *pages;
int shift, n, len; int shift, n, len;
int i, j, k; int i, j, k;
int err = 0; int err = 0;
int write_mtt_size; int write_mtt_size;
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return ERR_PTR(-EFAULT);
mr = kmalloc(sizeof *mr, GFP_KERNEL); mr = kmalloc(sizeof *mr, GFP_KERNEL);
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
ucmd.mr_attrs & MTHCA_MR_DMASYNC);
if (IS_ERR(mr->umem)) { if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem); err = PTR_ERR(mr->umem);
goto err; goto err;
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
* Increment this value if any changes that break userspace ABI * Increment this value if any changes that break userspace ABI
* compatibility are made. * compatibility are made.
*/ */
#define MTHCA_UVERBS_ABI_VERSION 1 #define MTHCA_UVERBS_ABI_VERSION 2
/* /*
* Make sure that all structs defined in this file remain laid out so * Make sure that all structs defined in this file remain laid out so
...@@ -61,6 +61,14 @@ struct mthca_alloc_pd_resp { ...@@ -61,6 +61,14 @@ struct mthca_alloc_pd_resp {
__u32 reserved; __u32 reserved;
}; };
struct mthca_reg_mr {
__u32 mr_attrs;
#define MTHCA_MR_DMASYNC 0x1
/* mark the memory region with a DMA attribute that causes
* in-flight DMA to be flushed when the region is written to */
__u32 reserved;
};
struct mthca_create_cq { struct mthca_create_cq {
__u32 lkey; __u32 lkey;
__u32 pdn; __u32 pdn;
......
...@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u8 single_page = 1; u8 single_page = 1;
u8 stag_key; u8 stag_key;
region = ib_umem_get(pd->uobject->context, start, length, acc); region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(region)) { if (IS_ERR(region)) {
return (struct ib_mr *)region; return (struct ib_mr *)region;
} }
......
...@@ -62,7 +62,7 @@ struct ib_umem_chunk { ...@@ -62,7 +62,7 @@ struct ib_umem_chunk {
#ifdef CONFIG_INFINIBAND_USER_MEM #ifdef CONFIG_INFINIBAND_USER_MEM
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
size_t size, int access); size_t size, int access, int dmasync);
void ib_umem_release(struct ib_umem *umem); void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem); int ib_umem_page_count(struct ib_umem *umem);
...@@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem); ...@@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem);
static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
unsigned long addr, size_t size, unsigned long addr, size_t size,
int access) { int access, int dmasync) {
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
static inline void ib_umem_release(struct ib_umem *umem) { } static inline void ib_umem_release(struct ib_umem *umem) { }
......
...@@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, ...@@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
dma_unmap_single(dev->dma_device, addr, size, direction); dma_unmap_single(dev->dma_device, addr, size, direction);
} }
static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
direction, attrs);
}
static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return dma_unmap_single_attrs(dev->dma_device, addr, size,
direction, attrs);
}
/** /**
* ib_dma_map_page - Map a physical page to DMA address * ib_dma_map_page - Map a physical page to DMA address
* @dev: The device for which the dma_addr is to be created * @dev: The device for which the dma_addr is to be created
...@@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, ...@@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
dma_unmap_sg(dev->dma_device, sg, nents, direction); dma_unmap_sg(dev->dma_device, sg, nents, direction);
} }
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
}
/** /**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
* @dev: The device for which the DMA addresses were created * @dev: The device for which the DMA addresses were created
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment