Commit 7f5eb9bb authored by Roland Dreier's avatar Roland Dreier

IB/mlx4: Return receive queue sizes for userspace QPs from query QP

Return the receive queue sizes for both userspace QPs and kernel Qps
(not just kernel QPs) from mlx4_ib_query_qp().  Also zero the send
queue sizes for userspace QPs to avoid a possible information leak,
and set the max_inline_data for kernel QPs to 0 since inline sends are
not supported for kernel QPs.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent c9f2ba5e
...@@ -1581,17 +1581,25 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr ...@@ -1581,17 +1581,25 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
done: done:
qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) { if (!ibqp->uobject) {
qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; qp_attr->cap.max_send_sge = qp->sq.max_gs;
qp_attr->cap.max_send_sge = qp->sq.max_gs; } else {
qp_attr->cap.max_recv_sge = qp->rq.max_gs; qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_inline_data = (1 << qp->sq.wqe_shift) - qp_attr->cap.max_send_sge = 0;
send_wqe_overhead(qp->ibqp.qp_type) -
sizeof (struct mlx4_wqe_inline_seg);
qp_init_attr->cap = qp_attr->cap;
} }
/*
* We don't support inline sends for kernel QPs (yet), and we
* don't know what userspace's value should be.
*/
qp_attr->cap.max_inline_data = 0;
qp_init_attr->cap = qp_attr->cap;
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment