Commit 0f51427b authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Cleanup WQE page fault handler

Refactor the page fault handler to be more readable and extensible, this
cleanup was triggered by the error reported below. The code structure made
it unclear to the automatic tools to identify that such a flow is not
possible in real life because "requestor != NULL" means that "qp != NULL"
too.

    drivers/infiniband/hw/mlx5/odp.c:1254 mlx5_ib_mr_wqe_pfault_handler()
    error: we previously assumed 'qp' could be null (see line 1230)

Fixes: 08100fad ("IB/mlx5: Add ODP SRQ support")
Reported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: default avatarMoni Shoua <monis@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 1c726c44
......@@ -929,7 +929,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
void *wqe,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, int receive_queue)
u32 *total_wqe_bytes, bool receive_queue)
{
int ret = 0, npages = 0;
u64 io_virt;
......@@ -1209,17 +1209,15 @@ static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{
int ret;
void *wqe, *wqe_end;
bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
u16 wqe_index = pfault->wqe.wqe_index;
void *wqe = NULL, *wqe_end = NULL;
u32 bytes_mapped, total_wqe_bytes;
char *buffer = NULL;
struct mlx5_core_rsc_common *res;
int resume_with_error = 1;
u16 wqe_index = pfault->wqe.wqe_index;
int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
struct mlx5_core_rsc_common *res = NULL;
struct mlx5_ib_qp *qp = NULL;
struct mlx5_ib_srq *srq = NULL;
struct mlx5_ib_qp *qp;
size_t bytes_copied;
int ret = 0;
res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
if (!res) {
......@@ -1227,87 +1225,74 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
return;
}
switch (res->res) {
case MLX5_RES_QP:
qp = res_to_qp(res);
break;
case MLX5_RES_SRQ:
case MLX5_RES_XSRQ:
srq = res_to_srq(res);
break;
default:
mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
res->res != MLX5_RES_XSRQ) {
mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
pfault->type);
goto resolve_page_fault;
}
buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer) {
wqe = (void *)__get_free_page(GFP_KERNEL);
if (!wqe) {
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
goto resolve_page_fault;
}
if (qp) {
if (requestor) {
ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index,
buffer, PAGE_SIZE,
qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
if (qp && sq) {
ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
&bytes_copied);
} else {
ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index,
buffer, PAGE_SIZE,
if (ret)
goto read_user;
ret = mlx5_ib_mr_initiator_pfault_handler(
dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
} else if (qp && !sq) {
ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
&bytes_copied);
}
} else {
ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index,
buffer, PAGE_SIZE,
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_rq(
dev, qp, wqe, &wqe_end, bytes_copied);
} else if (!qp) {
struct mlx5_ib_srq *srq = res_to_srq(res);
ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
&bytes_copied);
if (ret)
goto read_user;
ret = mlx5_ib_mr_responder_pfault_handler_srq(
dev, srq, &wqe, &wqe_end, bytes_copied);
}
if (ret) {
mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
ret, wqe_index, pfault->token);
if (ret < 0 || wqe >= wqe_end)
goto resolve_page_fault;
}
wqe = buffer;
if (requestor)
ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp,
&wqe, &wqe_end,
bytes_copied);
else if (qp)
ret = mlx5_ib_mr_responder_pfault_handler_rq(dev, qp,
wqe, &wqe_end,
bytes_copied);
else
ret = mlx5_ib_mr_responder_pfault_handler_srq(dev, srq,
&wqe, &wqe_end,
bytes_copied);
if (ret < 0)
goto resolve_page_fault;
ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
&total_wqe_bytes, !sq);
if (ret == -EAGAIN)
goto out;
if (wqe >= wqe_end) {
mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
if (ret < 0 || total_wqe_bytes > bytes_mapped)
goto resolve_page_fault;
}
ret = pagefault_data_segments(dev, pfault, wqe, wqe_end,
&bytes_mapped, &total_wqe_bytes,
!requestor);
if (ret == -EAGAIN) {
out:
ret = 0;
resume_with_error = 0;
goto resolve_page_fault;
} else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
goto resolve_page_fault;
}
resume_with_error = 0;
read_user:
if (ret)
mlx5_ib_err(
dev,
"Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
ret, wqe_index, pfault->token);
resolve_page_fault:
mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
pfault->wqe.wq_num, resume_with_error,
pfault->type);
mlx5_core_res_put(res);
free_page((unsigned long)buffer);
free_page((unsigned long)wqe);
}
static int pages_in_range(u64 address, u32 length)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment