Commit 762f899a authored by Majd Dibbiny's avatar Majd Dibbiny Committed by Doug Ledford

IB/mlx5: Limit mkey page size to 2GB

The maximum page size in the mkey context is 2GB.

Until today, we didn't enforce this requirement in the code,
and therefore, if we got a page size larger than 2GB, we
have passed zeros in the log_page_shift instead of the actual value
and the registration failed.

This patch limits the driver to use compound pages of 2GB for mkeys.

Fixes: e126ba97 ('mlx5: Add driver for Mellanox Connect-IB adapters')
Signed-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarMajd Dibbiny <majd@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 288c01b7
...@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, ...@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
if (err) if (err)
goto err_umem; goto err_umem;
mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
&ncont, NULL); &ncont, NULL);
mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
...@@ -1125,7 +1125,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, ...@@ -1125,7 +1125,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
return err; return err;
} }
mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
npas, NULL); npas, NULL);
cq->resize_umem = umem; cq->resize_umem = umem;
......
...@@ -37,12 +37,15 @@ ...@@ -37,12 +37,15 @@
/* @umem: umem object to scan /* @umem: umem object to scan
* @addr: ib virtual address requested by the user * @addr: ib virtual address requested by the user
* @max_page_shift: high limit for page_shift - 0 means no limit
* @count: number of PAGE_SIZE pages covered by umem * @count: number of PAGE_SIZE pages covered by umem
* @shift: page shift for the compound pages found in the region * @shift: page shift for the compound pages found in the region
* @ncont: number of compund pages * @ncont: number of compund pages
* @order: log2 of the number of compound pages * @order: log2 of the number of compound pages
*/ */
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
int *ncont, int *order) int *ncont, int *order)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, ...@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr = addr >> page_shift; addr = addr >> page_shift;
tmp = (unsigned long)addr; tmp = (unsigned long)addr;
m = find_first_bit(&tmp, BITS_PER_LONG); m = find_first_bit(&tmp, BITS_PER_LONG);
if (max_page_shift)
m = min_t(unsigned long, max_page_shift - page_shift, m);
skip = 1 << m; skip = 1 << m;
mask = skip - 1; mask = skip - 1;
i = 0; i = 0;
......
...@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ ...@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
#define MLX5_IB_DEFAULT_UIDX 0xffffff #define MLX5_IB_DEFAULT_UIDX 0xffffff
#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
enum { enum {
MLX5_IB_MMAP_CMD_SHIFT = 8, MLX5_IB_MMAP_CMD_SHIFT = 8,
MLX5_IB_MMAP_CMD_MASK = 0xff, MLX5_IB_MMAP_CMD_MASK = 0xff,
...@@ -823,7 +825,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, ...@@ -823,7 +825,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props); struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
unsigned long max_page_shift,
int *count, int *shift,
int *ncont, int *order); int *ncont, int *order);
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, size_t offset, size_t num_pages, int page_shift, size_t offset, size_t num_pages,
......
...@@ -855,7 +855,8 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length, ...@@ -855,7 +855,8 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
return (void *)umem; return (void *)umem;
} }
mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order); mlx5_ib_cont_pages(umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order);
if (!*npages) { if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n"); mlx5_ib_warn(dev, "avoid zero region\n");
ib_umem_release(umem); ib_umem_release(umem);
......
...@@ -675,7 +675,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, ...@@ -675,7 +675,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
return PTR_ERR(*umem); return PTR_ERR(*umem);
} }
mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL); mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) { if (err) {
...@@ -728,7 +728,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -728,7 +728,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return err; return err;
} }
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift, mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
&ncont, NULL); &ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
&rwq->rq_page_offset); &rwq->rq_page_offset);
......
...@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, ...@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
return err; return err;
} }
mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
&page_shift, &ncont, NULL); &page_shift, &ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
&offset); &offset);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment