Commit 61083720 authored by Shani Michaeli's avatar Shani Michaeli Committed by Roland Dreier

mlx4_core: Propagate MR deregistration failures to caller

MR deregistration fails when memory windows are bound to the MR.
Handle such failures by propagating them to the caller ULP.
Signed-off-by: default avatarHaggai Eran <haggaie@mellanox.com>
Signed-off-by: default avatarShani Michaeli <shanim@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent b20e519a
...@@ -68,7 +68,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -68,7 +68,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_free: err_free:
kfree(mr); kfree(mr);
...@@ -163,7 +163,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -163,7 +163,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
err_umem: err_umem:
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
...@@ -177,8 +177,11 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -177,8 +177,11 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int mlx4_ib_dereg_mr(struct ib_mr *ibmr) int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{ {
struct mlx4_ib_mr *mr = to_mmr(ibmr); struct mlx4_ib_mr *mr = to_mmr(ibmr);
int ret;
mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
if (ret)
return ret;
if (mr->umem) if (mr->umem)
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
kfree(mr); kfree(mr);
...@@ -212,7 +215,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, ...@@ -212,7 +215,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
return &mr->ibmr; return &mr->ibmr;
err_mr: err_mr:
mlx4_mr_free(dev->dev, &mr->mmr); (void) mlx4_mr_free(dev->dev, &mr->mmr);
err_free: err_free:
kfree(mr); kfree(mr);
...@@ -291,7 +294,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc, ...@@ -291,7 +294,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
return &fmr->ibfmr; return &fmr->ibfmr;
err_mr: err_mr:
mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
err_free: err_free:
kfree(fmr); kfree(fmr);
......
...@@ -176,7 +176,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) ...@@ -176,7 +176,7 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
flush_workqueue(mdev->workqueue); flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue); destroy_workqueue(mdev->workqueue);
mlx4_mr_free(dev, &mdev->mr); (void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map); iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar); mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn); mlx4_pd_free(dev, mdev->priv_pdn);
...@@ -283,7 +283,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -283,7 +283,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
return mdev; return mdev;
err_mr: err_mr:
mlx4_mr_free(dev, &mdev->mr); (void) mlx4_mr_free(dev, &mdev->mr);
err_map: err_map:
if (!mdev->uar_map) if (!mdev->uar_map)
iounmap(mdev->uar_map); iounmap(mdev->uar_map);
......
...@@ -442,7 +442,7 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, ...@@ -442,7 +442,7 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
} }
EXPORT_SYMBOL_GPL(mlx4_mr_alloc); EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{ {
int err; int err;
...@@ -450,20 +450,31 @@ static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -450,20 +450,31 @@ static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
err = mlx4_HW2SW_MPT(dev, NULL, err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) & key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1)); (dev->caps.num_mpts - 1));
if (err) if (err) {
mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err); mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
mlx4_warn(dev, "MR has MWs bound to it.\n");
return err;
}
mr->enabled = MLX4_MPT_EN_SW; mr->enabled = MLX4_MPT_EN_SW;
} }
mlx4_mtt_cleanup(dev, &mr->mtt); mlx4_mtt_cleanup(dev, &mr->mtt);
return 0;
} }
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{ {
mlx4_mr_free_reserved(dev, mr); int ret;
ret = mlx4_mr_free_reserved(dev, mr);
if (ret)
return ret;
if (mr->enabled) if (mr->enabled)
mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key)); mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
mlx4_mpt_release(dev, key_to_hw_index(mr->key)); mlx4_mpt_release(dev, key_to_hw_index(mr->key));
return 0;
} }
EXPORT_SYMBOL_GPL(mlx4_mr_free); EXPORT_SYMBOL_GPL(mlx4_mr_free);
...@@ -831,7 +842,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, ...@@ -831,7 +842,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
return 0; return 0;
err_free: err_free:
mlx4_mr_free(dev, &fmr->mr); (void) mlx4_mr_free(dev, &fmr->mr);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
...@@ -888,10 +899,14 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); ...@@ -888,10 +899,14 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{ {
int ret;
if (fmr->maps) if (fmr->maps)
return -EBUSY; return -EBUSY;
mlx4_mr_free(dev, &fmr->mr); ret = mlx4_mr_free(dev, &fmr->mr);
if (ret)
return ret;
fmr->mr.enabled = MLX4_MPT_DISABLED; fmr->mr.enabled = MLX4_MPT_DISABLED;
return 0; return 0;
......
...@@ -801,7 +801,7 @@ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); ...@@ -801,7 +801,7 @@ u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int npages, int page_shift, struct mlx4_mr *mr); int npages, int page_shift, struct mlx4_mr *mr);
void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr); int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr); int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list); int start_index, int npages, u64 *page_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment