Commit 14ab8896 authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Doug Ledford

IB/mlx5: avoid bogus -Wmaybe-uninitialized warning

We get a false-positive warning in linux-next for the mlx5 driver:

infiniband/hw/mlx5/mr.c: In function ‘mlx5_ib_reg_user_mr’:
infiniband/hw/mlx5/mr.c:1172:5: error: ‘order’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
infiniband/hw/mlx5/mr.c:1161:6: note: ‘order’ was declared here
infiniband/hw/mlx5/mr.c:1173:6: error: ‘ncont’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
infiniband/hw/mlx5/mr.c:1160:6: note: ‘ncont’ was declared here
infiniband/hw/mlx5/mr.c:1173:6: error: ‘page_shift’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
infiniband/hw/mlx5/mr.c:1158:6: note: ‘page_shift’ was declared here
infiniband/hw/mlx5/mr.c:1143:13: error: ‘npages’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
infiniband/hw/mlx5/mr.c:1159:6: note: ‘npages’ was declared here

I had a trivial workaround for gcc-5 or higher, but that didn't work
on gcc-4.9 unfortunately.

The only way I found to avoid the warnings for gcc-4.9, short of
initializing each of the arguments first was to change the calling
conventions to separate the error code from the umem pointer. This
avoids casting the error codes from one pointer to another incompatible
pointer, and lets gcc figure out when that the data is actually valid
whenever we return successfully.
Acked-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 7d29f349
...@@ -844,30 +844,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, ...@@ -844,30 +844,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
umrwr->mkey = key; umrwr->mkey = key;
} }
static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length, static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, int *npages, int access_flags, struct ib_umem **umem,
int *page_shift, int *ncont, int *order) int *npages, int *page_shift, int *ncont,
int *order)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length, int err;
*umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0); access_flags, 0);
if (IS_ERR(umem)) { err = PTR_ERR_OR_ZERO(*umem);
if (err < 0) {
mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
return (void *)umem; return err;
} }
mlx5_ib_cont_pages(umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order); page_shift, ncont, order);
if (!*npages) { if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n"); mlx5_ib_warn(dev, "avoid zero region\n");
ib_umem_release(umem); ib_umem_release(*umem);
return ERR_PTR(-EINVAL); return -EINVAL;
} }
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift); *npages, *ncont, *order, *page_shift);
return umem; return 0;
} }
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
...@@ -1193,11 +1197,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1193,11 +1197,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags); start, virt_addr, length, access_flags);
umem = mr_umem_get(pd, start, length, access_flags, &npages, err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order); &page_shift, &ncont, &order);
if (IS_ERR(umem)) if (err < 0)
return (void *)umem; return ERR_PTR(err);
if (use_umr(order)) { if (use_umr(order)) {
mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
...@@ -1371,10 +1375,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1371,10 +1375,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/ */
flags |= IB_MR_REREG_TRANS; flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&page_shift, &ncont, &order); &npages, &page_shift, &ncont, &order);
if (IS_ERR(mr->umem)) { if (err < 0) {
err = PTR_ERR(mr->umem);
mr->umem = NULL; mr->umem = NULL;
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment