Commit 5bf944f2 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Add error messages

This patch adds error and debug messages so that every interaction
with rdma-core through a verbs API call or a completion error return
will generate at least one error message backed up by debug messages
with more detail.

With dynamic debugging one can follow up after seeing an error message
by turning on the appropriate debug messages.

Link: https://lore.kernel.org/r/20230303221623.8053-5-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 9ac01f43
...@@ -428,6 +428,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, ...@@ -428,6 +428,10 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
uwc->wc_flags = IB_WC_WITH_IMM; uwc->wc_flags = IB_WC_WITH_IMM;
uwc->byte_len = wqe->dma.length; uwc->byte_len = wqe->dma.length;
} }
} else {
if (wqe->status != IB_WC_WR_FLUSH_ERR)
rxe_err_qp(qp, "non-flush error status = %d",
wqe->status);
} }
} }
......
...@@ -80,7 +80,6 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); ...@@ -80,7 +80,6 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
int rxe_invalidate_mr(struct rxe_qp *qp, u32 key); int rxe_invalidate_mr(struct rxe_qp *qp, u32 key);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_elem *elem); void rxe_mr_cleanup(struct rxe_pool_elem *elem);
/* rxe_mw.c */ /* rxe_mw.c */
......
...@@ -722,19 +722,6 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -722,19 +722,6 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
return 0; return 0;
} }
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mr *mr = to_rmr(ibmr);
/* See IBA 10.6.7.2.6 */
if (atomic_read(&mr->num_mw) > 0)
return -EINVAL;
rxe_cleanup(mr);
kfree_rcu(mr);
return 0;
}
void rxe_mr_cleanup(struct rxe_pool_elem *elem) void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{ {
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
......
...@@ -1151,6 +1151,10 @@ static enum resp_states do_complete(struct rxe_qp *qp, ...@@ -1151,6 +1151,10 @@ static enum resp_states do_complete(struct rxe_qp *qp,
wc->port_num = qp->attr.port_num; wc->port_num = qp->attr.port_num;
} }
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
rxe_err_qp(qp, "non-flush error status = %d",
wc->status);
} }
/* have copy for srq and reference for !srq */ /* have copy for srq and reference for !srq */
......
...@@ -12,30 +12,47 @@ ...@@ -12,30 +12,47 @@
#include "rxe_queue.h" #include "rxe_queue.h"
#include "rxe_hw_counters.h" #include "rxe_hw_counters.h"
static int rxe_query_device(struct ib_device *dev, static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr);
/* dev */
static int rxe_query_device(struct ib_device *ibdev,
struct ib_device_attr *attr, struct ib_device_attr *attr,
struct ib_udata *uhw) struct ib_udata *udata)
{ {
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(ibdev);
int err;
if (uhw->inlen || uhw->outlen) if (udata->inlen || udata->outlen) {
return -EINVAL; rxe_dbg_dev(rxe, "malformed udata");
err = -EINVAL;
goto err_out;
}
memcpy(attr, &rxe->attr, sizeof(*attr));
*attr = rxe->attr;
return 0; return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_query_port(struct ib_device *dev, static int rxe_query_port(struct ib_device *ibdev,
u32 port_num, struct ib_port_attr *attr) u32 port_num, struct ib_port_attr *attr)
{ {
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(ibdev);
int rc; int err, ret;
if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
goto err_out;
}
/* *attr being zeroed by the caller, avoid zeroing it here */ memcpy(attr, &rxe->port.attr, sizeof(*attr));
*attr = rxe->port.attr;
mutex_lock(&rxe->usdev_lock); mutex_lock(&rxe->usdev_lock);
rc = ib_get_eth_speed(dev, port_num, &attr->active_speed, ret = ib_get_eth_speed(ibdev, port_num, &attr->active_speed,
&attr->active_width); &attr->active_width);
if (attr->state == IB_PORT_ACTIVE) if (attr->state == IB_PORT_ACTIVE)
...@@ -47,27 +64,45 @@ static int rxe_query_port(struct ib_device *dev, ...@@ -47,27 +64,45 @@ static int rxe_query_port(struct ib_device *dev,
mutex_unlock(&rxe->usdev_lock); mutex_unlock(&rxe->usdev_lock);
return rc; return ret;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_query_pkey(struct ib_device *device, static int rxe_query_pkey(struct ib_device *ibdev,
u32 port_num, u16 index, u16 *pkey) u32 port_num, u16 index, u16 *pkey)
{ {
if (index > 0) struct rxe_dev *rxe = to_rdev(ibdev);
return -EINVAL; int err;
if (index != 0) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad pkey index = %d", index);
goto err_out;
}
*pkey = IB_DEFAULT_PKEY_FULL; *pkey = IB_DEFAULT_PKEY_FULL;
return 0; return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_modify_device(struct ib_device *dev, static int rxe_modify_device(struct ib_device *ibdev,
int mask, struct ib_device_modify *attr) int mask, struct ib_device_modify *attr)
{ {
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(ibdev);
int err;
if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) IB_DEVICE_MODIFY_NODE_DESC)) {
return -EOPNOTSUPP; err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
goto err_out;
}
if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid); rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
...@@ -78,16 +113,33 @@ static int rxe_modify_device(struct ib_device *dev, ...@@ -78,16 +113,33 @@ static int rxe_modify_device(struct ib_device *dev,
} }
return 0; return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_modify_port(struct ib_device *dev, static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
u32 port_num, int mask, struct ib_port_modify *attr) int mask, struct ib_port_modify *attr)
{ {
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(ibdev);
struct rxe_port *port; struct rxe_port *port;
int err;
port = &rxe->port; if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
goto err_out;
}
//TODO is shutdown useful
if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
goto err_out;
}
port = &rxe->port;
port->attr.port_cap_flags |= attr->set_port_cap_mask; port->attr.port_cap_flags |= attr->set_port_cap_mask;
port->attr.port_cap_flags &= ~attr->clr_port_cap_mask; port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
...@@ -95,73 +147,125 @@ static int rxe_modify_port(struct ib_device *dev, ...@@ -95,73 +147,125 @@ static int rxe_modify_port(struct ib_device *dev,
port->attr.qkey_viol_cntr = 0; port->attr.qkey_viol_cntr = 0;
return 0; return 0;
}
static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev, err_out:
u32 port_num) rxe_err_dev(rxe, "returned err = %d", err);
{ return err;
return IB_LINK_LAYER_ETHERNET;
} }
static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata) static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
u32 port_num)
{ {
struct rxe_dev *rxe = to_rdev(ibuc->device); struct rxe_dev *rxe = to_rdev(ibdev);
struct rxe_ucontext *uc = to_ruc(ibuc); int err;
return rxe_add_to_pool(&rxe->uc_pool, uc); if (port_num != 1) {
} err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
goto err_out;
}
static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc) return IB_LINK_LAYER_ETHERNET;
{
struct rxe_ucontext *uc = to_ruc(ibuc);
rxe_cleanup(uc); err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_port_immutable(struct ib_device *dev, u32 port_num, static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable) struct ib_port_immutable *immutable)
{ {
struct rxe_dev *rxe = to_rdev(ibdev);
struct ib_port_attr attr = {};
int err; int err;
struct ib_port_attr attr;
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; if (port_num != 1) {
err = -EINVAL;
rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
goto err_out;
}
err = ib_query_port(dev, port_num, &attr); err = ib_query_port(ibdev, port_num, &attr);
if (err) if (err)
return err; goto err_out;
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len;
immutable->max_mad_size = IB_MGMT_MAD_SIZE; immutable->max_mad_size = IB_MGMT_MAD_SIZE;
return 0; return 0;
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
}
/* uc */
static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
{
struct rxe_dev *rxe = to_rdev(ibuc->device);
struct rxe_ucontext *uc = to_ruc(ibuc);
int err;
err = rxe_add_to_pool(&rxe->uc_pool, uc);
if (err)
rxe_err_dev(rxe, "unable to create uc");
return err;
}
static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
{
struct rxe_ucontext *uc = to_ruc(ibuc);
int err;
err = rxe_cleanup(uc);
if (err)
rxe_err_uc(uc, "cleanup failed, err = %d", err);
} }
/* pd */
static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);
int err;
err = rxe_add_to_pool(&rxe->pd_pool, pd);
if (err) {
rxe_dbg_dev(rxe, "unable to alloc pd");
goto err_out;
}
return 0;
return rxe_add_to_pool(&rxe->pd_pool, pd); err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);
int err;
err = rxe_cleanup(pd);
if (err)
rxe_err_pd(pd, "cleanup failed, err = %d", err);
rxe_cleanup(pd);
return 0; return 0;
} }
/* ah */
static int rxe_create_ah(struct ib_ah *ibah, static int rxe_create_ah(struct ib_ah *ibah,
struct rdma_ah_init_attr *init_attr, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah); struct rxe_ah *ah = to_rah(ibah);
struct rxe_create_ah_resp __user *uresp = NULL; struct rxe_create_ah_resp __user *uresp = NULL;
int err; int err, cleanup_err;
if (udata) { if (udata) {
/* test if new user provider */ /* test if new user provider */
...@@ -174,16 +278,18 @@ static int rxe_create_ah(struct ib_ah *ibah, ...@@ -174,16 +278,18 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_add_to_pool_ah(&rxe->ah_pool, ah, err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
init_attr->flags & RDMA_CREATE_AH_SLEEPABLE); init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err) if (err) {
return err; rxe_dbg_dev(rxe, "unable to create ah");
goto err_out;
}
/* create index > 0 */ /* create index > 0 */
ah->ah_num = ah->elem.index; ah->ah_num = ah->elem.index;
err = rxe_ah_chk_attr(ah, init_attr->ah_attr); err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
if (err) { if (err) {
rxe_cleanup(ah); rxe_dbg_ah(ah, "bad attr");
return err; goto err_cleanup;
} }
if (uresp) { if (uresp) {
...@@ -191,8 +297,9 @@ static int rxe_create_ah(struct ib_ah *ibah, ...@@ -191,8 +297,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = copy_to_user(&uresp->ah_num, &ah->ah_num, err = copy_to_user(&uresp->ah_num, &ah->ah_num,
sizeof(uresp->ah_num)); sizeof(uresp->ah_num));
if (err) { if (err) {
rxe_cleanup(ah); err = -EFAULT;
return -EFAULT; rxe_dbg_ah(ah, "unable to copy to user");
goto err_cleanup;
} }
} else if (ah->is_user) { } else if (ah->is_user) {
/* only if old user provider */ /* only if old user provider */
...@@ -203,19 +310,34 @@ static int rxe_create_ah(struct ib_ah *ibah, ...@@ -203,19 +310,34 @@ static int rxe_create_ah(struct ib_ah *ibah,
rxe_finalize(ah); rxe_finalize(ah);
return 0; return 0;
err_cleanup:
cleanup_err = rxe_cleanup(ah);
if (cleanup_err)
rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
err_out:
rxe_err_ah(ah, "returned err = %d", err);
return err;
} }
static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
{ {
int err;
struct rxe_ah *ah = to_rah(ibah); struct rxe_ah *ah = to_rah(ibah);
int err;
err = rxe_ah_chk_attr(ah, attr); err = rxe_ah_chk_attr(ah, attr);
if (err) if (err) {
return err; rxe_dbg_ah(ah, "bad attr");
goto err_out;
}
rxe_init_av(attr, &ah->av); rxe_init_av(attr, &ah->av);
return 0; return 0;
err_out:
rxe_err_ah(ah, "returned err = %d", err);
return err;
} }
static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
...@@ -225,92 +347,77 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr) ...@@ -225,92 +347,77 @@ static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
memset(attr, 0, sizeof(*attr)); memset(attr, 0, sizeof(*attr));
attr->type = ibah->type; attr->type = ibah->type;
rxe_av_to_attr(&ah->av, attr); rxe_av_to_attr(&ah->av, attr);
return 0; return 0;
} }
static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{ {
struct rxe_ah *ah = to_rah(ibah); struct rxe_ah *ah = to_rah(ibah);
int err;
rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE); err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
if (err)
return 0; rxe_err_ah(ah, "cleanup failed, err = %d", err);
}
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
{
int i;
u32 length;
struct rxe_recv_wqe *recv_wqe;
int num_sge = ibwr->num_sge;
int full;
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full))
return -ENOMEM;
if (unlikely(num_sge > rq->max_sge))
return -EINVAL;
length = 0;
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
recv_wqe->wr_id = ibwr->wr_id;
memcpy(recv_wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
recv_wqe->dma.length = length;
recv_wqe->dma.resid = length;
recv_wqe->dma.num_sge = num_sge;
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
return 0; return 0;
} }
/* srq */
static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int err;
struct rxe_dev *rxe = to_rdev(ibsrq->device); struct rxe_dev *rxe = to_rdev(ibsrq->device);
struct rxe_pd *pd = to_rpd(ibsrq->pd); struct rxe_pd *pd = to_rpd(ibsrq->pd);
struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_create_srq_resp __user *uresp = NULL; struct rxe_create_srq_resp __user *uresp = NULL;
int err, cleanup_err;
if (udata) { if (udata) {
if (udata->outlen < sizeof(*uresp)) if (udata->outlen < sizeof(*uresp)) {
return -EINVAL; err = -EINVAL;
rxe_err_dev(rxe, "malformed udata");
goto err_out;
}
uresp = udata->outbuf; uresp = udata->outbuf;
} }
if (init->srq_type != IB_SRQT_BASIC) if (init->srq_type != IB_SRQT_BASIC) {
return -EOPNOTSUPP; err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "srq type = %d, not supported",
init->srq_type);
goto err_out;
}
err = rxe_srq_chk_init(rxe, init); err = rxe_srq_chk_init(rxe, init);
if (err) if (err) {
return err; rxe_dbg_dev(rxe, "invalid init attributes");
goto err_out;
}
err = rxe_add_to_pool(&rxe->srq_pool, srq); err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err) if (err) {
return err; rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
goto err_out;
}
rxe_get(pd); rxe_get(pd);
srq->pd = pd; srq->pd = pd;
err = rxe_srq_from_init(rxe, srq, init, udata, uresp); err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err) if (err) {
rxe_dbg_srq(srq, "create srq failed, err = %d", err);
goto err_cleanup; goto err_cleanup;
}
return 0; return 0;
err_cleanup: err_cleanup:
rxe_cleanup(srq); cleanup_err = rxe_cleanup(srq);
if (cleanup_err)
rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err; return err;
} }
...@@ -318,46 +425,64 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -318,46 +425,64 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask mask, enum ib_srq_attr_mask mask,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int err;
struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_srq *srq = to_rsrq(ibsrq);
struct rxe_dev *rxe = to_rdev(ibsrq->device); struct rxe_dev *rxe = to_rdev(ibsrq->device);
struct rxe_modify_srq_cmd ucmd = {}; struct rxe_modify_srq_cmd cmd = {};
int err;
if (udata) { if (udata) {
if (udata->inlen < sizeof(ucmd)) if (udata->inlen < sizeof(cmd)) {
return -EINVAL; err = -EINVAL;
rxe_dbg_srq(srq, "malformed udata");
goto err_out;
}
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) if (err) {
return err; err = -EFAULT;
rxe_dbg_srq(srq, "unable to read udata");
goto err_out;
}
} }
err = rxe_srq_chk_attr(rxe, srq, attr, mask); err = rxe_srq_chk_attr(rxe, srq, attr, mask);
if (err) if (err) {
return err; rxe_dbg_srq(srq, "bad init attributes");
goto err_out;
}
return rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata); err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
if (err) {
rxe_dbg_srq(srq, "bad attr");
goto err_out;
}
return 0;
err_out:
rxe_err_srq(srq, "returned err = %d", err);
return err;
} }
static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
{ {
struct rxe_srq *srq = to_rsrq(ibsrq); struct rxe_srq *srq = to_rsrq(ibsrq);
int err;
if (srq->error) if (srq->error) {
return -EINVAL; err = -EINVAL;
rxe_dbg_srq(srq, "srq in error state");
goto err_out;
}
attr->max_wr = srq->rq.queue->buf->index_mask; attr->max_wr = srq->rq.queue->buf->index_mask;
attr->max_sge = srq->rq.max_sge; attr->max_sge = srq->rq.max_sge;
attr->srq_limit = srq->limit; attr->srq_limit = srq->limit;
return 0; return 0;
}
static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) err_out:
{ rxe_err_srq(srq, "returned err = %d", err);
struct rxe_srq *srq = to_rsrq(ibsrq); return err;
rxe_cleanup(srq);
return 0;
} }
static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
...@@ -378,76 +503,116 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, ...@@ -378,76 +503,116 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&srq->rq.producer_lock, flags); spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
if (err) if (err) {
*bad_wr = wr; *bad_wr = wr;
rxe_err_srq(srq, "returned err = %d", err);
}
return err; return err;
} }
static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
int err;
err = rxe_cleanup(srq);
if (err)
rxe_err_srq(srq, "cleanup failed, err = %d", err);
return 0;
}
/* qp */
static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init, static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int err;
struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_pd *pd = to_rpd(ibqp->pd); struct rxe_pd *pd = to_rpd(ibqp->pd);
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_create_qp_resp __user *uresp = NULL; struct rxe_create_qp_resp __user *uresp = NULL;
int err, cleanup_err;
if (udata) { if (udata) {
if (udata->outlen < sizeof(*uresp)) if (udata->inlen) {
return -EINVAL; err = -EINVAL;
uresp = udata->outbuf; rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
goto err_out;
} }
if (init->create_flags) if (udata->outlen < sizeof(*uresp)) {
return -EOPNOTSUPP; err = -EINVAL;
rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
err = rxe_qp_chk_init(rxe, init); goto err_out;
if (err) }
return err;
if (udata) {
if (udata->inlen)
return -EINVAL;
qp->is_user = true; qp->is_user = true;
uresp = udata->outbuf;
} else { } else {
qp->is_user = false; qp->is_user = false;
} }
if (init->create_flags) {
err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
goto err_out;
}
err = rxe_qp_chk_init(rxe, init);
if (err) {
rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->qp_pool, qp); err = rxe_add_to_pool(&rxe->qp_pool, qp);
if (err) if (err) {
return err; rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
goto err_out;
}
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata); err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err) if (err) {
goto qp_init; rxe_dbg_qp(qp, "create qp failed, err = %d", err);
goto err_cleanup;
}
rxe_finalize(qp); rxe_finalize(qp);
return 0; return 0;
qp_init: err_cleanup:
rxe_cleanup(qp); cleanup_err = rxe_cleanup(qp);
if (cleanup_err)
rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err; return err;
} }
static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int mask, struct ib_udata *udata) int mask, struct ib_udata *udata)
{ {
int err;
struct rxe_dev *rxe = to_rdev(ibqp->device); struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
int err;
if (mask & ~IB_QP_ATTR_STANDARD_BITS) if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
return -EOPNOTSUPP; err = -EOPNOTSUPP;
rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
mask, err);
goto err_out;
}
err = rxe_qp_chk_attr(rxe, qp, attr, mask); err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err) if (err) {
return err; rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
goto err_out;
}
err = rxe_qp_from_attr(qp, attr, mask, udata); err = rxe_qp_from_attr(qp, attr, mask, udata);
if (err) if (err) {
return err; rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
goto err_out;
}
if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
...@@ -455,6 +620,10 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -455,6 +620,10 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp->attr.dest_qp_num); qp->attr.dest_qp_num);
return 0; return 0;
err_out:
rxe_err_qp(qp, "returned err = %d", err);
return err;
} }
static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
...@@ -471,38 +640,59 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -471,38 +640,59 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
int ret; int err;
ret = rxe_qp_chk_destroy(qp); err = rxe_qp_chk_destroy(qp);
if (ret) if (err) {
return ret; rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
goto err_out;
}
err = rxe_cleanup(qp);
if (err)
rxe_err_qp(qp, "cleanup failed, err = %d", err);
rxe_cleanup(qp);
return 0; return 0;
err_out:
rxe_err_qp(qp, "returned err = %d", err);
return err;
} }
/* send wr */
static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr, static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
unsigned int mask, unsigned int length) unsigned int mask, unsigned int length)
{ {
int num_sge = ibwr->num_sge; int num_sge = ibwr->num_sge;
struct rxe_sq *sq = &qp->sq; struct rxe_sq *sq = &qp->sq;
if (unlikely(num_sge > sq->max_sge)) if (unlikely(num_sge > sq->max_sge)) {
return -EINVAL; rxe_dbg_qp(qp, "num_sge > max_sge");
goto err_out;
}
if (unlikely(mask & WR_ATOMIC_MASK)) { if (unlikely(mask & WR_ATOMIC_MASK)) {
if (length < 8) if (length != 8) {
return -EINVAL; rxe_dbg_qp(qp, "atomic length != 8");
goto err_out;
}
if (atomic_wr(ibwr)->remote_addr & 0x7) if (atomic_wr(ibwr)->remote_addr & 0x7) {
return -EINVAL; rxe_dbg_qp(qp, "misaligned atomic address");
goto err_out;
}
} }
if (unlikely((ibwr->send_flags & IB_SEND_INLINE) && if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
(length > sq->max_inline))) (length > sq->max_inline))) {
return -EINVAL; rxe_dbg_qp(qp, "inline length too big");
goto err_out;
}
return 0; return 0;
err_out:
return -EINVAL;
} }
static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
...@@ -624,9 +814,9 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, ...@@ -624,9 +814,9 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
spin_lock_irqsave(&qp->sq.sq_lock, flags); spin_lock_irqsave(&qp->sq.sq_lock, flags);
full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP); full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) { if (unlikely(full)) {
spin_unlock_irqrestore(&qp->sq.sq_lock, flags); spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
rxe_dbg_qp(qp, "queue full");
return -ENOMEM; return -ENOMEM;
} }
...@@ -652,6 +842,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, ...@@ -652,6 +842,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
while (wr) { while (wr) {
mask = wr_opcode_mask(wr->opcode, qp); mask = wr_opcode_mask(wr->opcode, qp);
if (unlikely(!mask)) { if (unlikely(!mask)) {
rxe_dbg_qp(qp, "bad wr opcode for qp");
err = -EINVAL; err = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
break; break;
...@@ -659,6 +850,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, ...@@ -659,6 +850,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
if (unlikely((wr->send_flags & IB_SEND_INLINE) && if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
!(mask & WR_INLINE_MASK))) { !(mask & WR_INLINE_MASK))) {
rxe_dbg_qp(qp, "opcode doesn't support inline data");
err = -EINVAL; err = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
break; break;
...@@ -669,17 +861,26 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, ...@@ -669,17 +861,26 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
length = 0; length = 0;
for (i = 0; i < wr->num_sge; i++) for (i = 0; i < wr->num_sge; i++)
length += wr->sg_list[i].length; length += wr->sg_list[i].length;
if (length > 1<<31) {
err = -EINVAL;
rxe_dbg_qp(qp, "message length too long");
*bad_wr = wr;
break;
}
err = post_one_send(qp, wr, mask, length); err = post_one_send(qp, wr, mask, length);
if (err) { if (err) {
*bad_wr = wr; *bad_wr = wr;
break; break;
} }
wr = next; wr = next;
} }
/* if we didn't post anything there's nothing to do */
if (!err)
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
if (unlikely(qp->req.state == QP_STATE_ERROR)) if (unlikely(qp->req.state == QP_STATE_ERROR))
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
...@@ -690,23 +891,90 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -690,23 +891,90 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr) const struct ib_send_wr **bad_wr)
{ {
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
int err;
if (unlikely(!qp->valid)) { if (unlikely(!qp->valid)) {
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; err = -EINVAL;
rxe_dbg_qp(qp, "qp destroyed");
goto err_out;
} }
if (unlikely(qp->req.state < QP_STATE_READY)) { if (unlikely(qp->req.state < QP_STATE_READY)) {
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; err = -EINVAL;
rxe_dbg_qp(qp, "qp not ready to send");
goto err_out;
} }
if (qp->is_user) { if (qp->is_user) {
/* Utilize process context to do protocol processing */ /* Utilize process context to do protocol processing */
rxe_run_task(&qp->req.task); rxe_run_task(&qp->req.task);
} else {
err = rxe_post_send_kernel(qp, wr, bad_wr);
if (err)
goto err_out;
}
return 0; return 0;
} else
return rxe_post_send_kernel(qp, wr, bad_wr); err_out:
rxe_err_qp(qp, "returned err = %d", err);
return err;
}
/* recv wr */
static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
{
int i;
unsigned long length;
struct rxe_recv_wqe *recv_wqe;
int num_sge = ibwr->num_sge;
int full;
int err;
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
err = -ENOMEM;
rxe_dbg("queue full");
goto err_out;
}
if (unlikely(num_sge > rq->max_sge)) {
err = -EINVAL;
rxe_dbg("bad num_sge > max_sge");
goto err_out;
}
length = 0;
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
/* IBA max message size is 2^31 */
if (length >= (1UL<<31)) {
err = -EINVAL;
rxe_dbg("message length too long");
goto err_out;
}
recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
recv_wqe->wr_id = ibwr->wr_id;
recv_wqe->dma.length = length;
recv_wqe->dma.resid = length;
recv_wqe->dma.num_sge = num_sge;
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
memcpy(recv_wqe->dma.sge, ibwr->sg_list,
num_sge * sizeof(struct ib_sge));
queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
return 0;
err_out:
rxe_dbg("returned err = %d", err);
return err;
} }
static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
...@@ -719,12 +987,16 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -719,12 +987,16 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; err = -EINVAL;
rxe_dbg_qp(qp, "qp destroyed or not ready to post recv");
goto err_out;
} }
if (unlikely(qp->srq)) { if (unlikely(qp->srq)) {
*bad_wr = wr; *bad_wr = wr;
return -EINVAL; err = -EINVAL;
rxe_dbg_qp(qp, "use post_srq_recv instead");
goto err_out;
} }
spin_lock_irqsave(&rq->producer_lock, flags); spin_lock_irqsave(&rq->producer_lock, flags);
...@@ -743,73 +1015,101 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -743,73 +1015,101 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (qp->resp.state == QP_STATE_ERROR) if (qp->resp.state == QP_STATE_ERROR)
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
err_out:
if (err)
rxe_err_qp(qp, "returned err = %d", err);
return err; return err;
} }
/* cq */
static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
int err;
struct ib_device *dev = ibcq->device; struct ib_device *dev = ibcq->device;
struct rxe_dev *rxe = to_rdev(dev); struct rxe_dev *rxe = to_rdev(dev);
struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_create_cq_resp __user *uresp = NULL; struct rxe_create_cq_resp __user *uresp = NULL;
int err, cleanup_err;
if (udata) { if (udata) {
if (udata->outlen < sizeof(*uresp)) if (udata->outlen < sizeof(*uresp)) {
return -EINVAL; err = -EINVAL;
rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
goto err_out;
}
uresp = udata->outbuf; uresp = udata->outbuf;
} }
if (attr->flags) if (attr->flags) {
return -EOPNOTSUPP; err = -EOPNOTSUPP;
rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
goto err_out;
}
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector); err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err) if (err) {
return err; rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->cq_pool, cq);
if (err) {
rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
goto err_out;
}
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp); uresp);
if (err) if (err) {
return err; rxe_dbg_cq(cq, "create cq failed, err = %d", err);
goto err_cleanup;
return rxe_add_to_pool(&rxe->cq_pool, cq); }
}
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
/* See IBA C11-17: The CI shall return an error if this Verb is
* invoked while a Work Queue is still associated with the CQ.
*/
if (atomic_read(&cq->num_wq))
return -EINVAL;
rxe_cq_disable(cq);
rxe_cleanup(cq);
return 0; return 0;
err_cleanup:
cleanup_err = rxe_cleanup(cq);
if (cleanup_err)
rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
err_out:
rxe_err_dev(rxe, "returned err = %d", err);
return err;
} }
static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{ {
int err;
struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_dev *rxe = to_rdev(ibcq->device); struct rxe_dev *rxe = to_rdev(ibcq->device);
struct rxe_resize_cq_resp __user *uresp = NULL; struct rxe_resize_cq_resp __user *uresp = NULL;
int err;
if (udata) { if (udata) {
if (udata->outlen < sizeof(*uresp)) if (udata->outlen < sizeof(*uresp)) {
return -EINVAL; err = -EINVAL;
rxe_dbg_cq(cq, "malformed udata");
goto err_out;
}
uresp = udata->outbuf; uresp = udata->outbuf;
} }
err = rxe_cq_chk_attr(rxe, cq, cqe, 0); err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
if (err) if (err) {
return err; rxe_dbg_cq(cq, "bad attr, err = %d", err);
goto err_out;
}
return rxe_cq_resize_queue(cq, cqe, uresp, udata); err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err) {
rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
goto err_out;
}
return 0;
err_out:
rxe_err_cq(cq, "returned err = %d", err);
return err;
} }
static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
...@@ -823,7 +1123,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -823,7 +1123,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
for (i = 0; i < num_entries; i++) { for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP); cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
if (!cqe) if (!cqe)
break; break; /* queue empty */
memcpy(wc++, &cqe->ibwc, sizeof(*wc)); memcpy(wc++, &cqe->ibwc, sizeof(*wc));
queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP); queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
...@@ -864,6 +1164,34 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) ...@@ -864,6 +1164,34 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
return ret; return ret;
} }
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
int err;
/* See IBA C11-17: The CI shall return an error if this Verb is
* invoked while a Work Queue is still associated with the CQ.
*/
if (atomic_read(&cq->num_wq)) {
err = -EINVAL;
rxe_dbg_cq(cq, "still in use");
goto err_out;
}
rxe_cq_disable(cq);
err = rxe_cleanup(cq);
if (err)
rxe_err_cq(cq, "cleanup failed, err = %d", err);
return 0;
err_out:
rxe_err_cq(cq, "returned err = %d", err);
return err;
}
/* mr */
static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
{ {
struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_dev *rxe = to_rdev(ibpd->device);
...@@ -874,12 +1202,15 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) ...@@ -874,12 +1202,15 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) { if (!mr) {
err = -ENOMEM; err = -ENOMEM;
rxe_dbg_dev(rxe, "no memory for mr");
goto err_out; goto err_out;
} }
err = rxe_add_to_pool(&rxe->mr_pool, mr); err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) if (err) {
rxe_dbg_dev(rxe, "unable to create mr");
goto err_free; goto err_free;
}
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
...@@ -892,46 +1223,53 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) ...@@ -892,46 +1223,53 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err_free: err_free:
kfree(mr); kfree(mr);
err_out: err_out:
rxe_err_pd(pd, "returned err = %d", err);
return ERR_PTR(err); return ERR_PTR(err);
} }
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
u64 start, u64 length, u64 iova, int access,
u64 length, struct ib_udata *udata)
u64 iova,
int access, struct ib_udata *udata)
{ {
int err;
struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mr *mr; struct rxe_mr *mr;
int err, cleanup_err;
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) { if (!mr) {
err = -ENOMEM; err = -ENOMEM;
rxe_dbg_pd(pd, "no memory for mr");
goto err_out; goto err_out;
} }
err = rxe_add_to_pool(&rxe->mr_pool, mr); err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) if (err) {
rxe_dbg_pd(pd, "unable to create mr");
goto err_free; goto err_free;
}
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device; mr->ibmr.device = ibpd->device;
err = rxe_mr_init_user(rxe, start, length, iova, access, mr); err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err) if (err) {
rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
goto err_cleanup; goto err_cleanup;
}
rxe_finalize(mr); rxe_finalize(mr);
return &mr->ibmr; return &mr->ibmr;
err_cleanup: err_cleanup:
rxe_cleanup(mr); cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
err_free: err_free:
kfree(mr); kfree(mr);
err_out: err_out:
rxe_err_pd(pd, "returned err = %d", err);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -941,40 +1279,76 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, ...@@ -941,40 +1279,76 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
struct rxe_dev *rxe = to_rdev(ibpd->device); struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd); struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mr *mr; struct rxe_mr *mr;
int err; int err, cleanup_err;
if (mr_type != IB_MR_TYPE_MEM_REG) if (mr_type != IB_MR_TYPE_MEM_REG) {
return ERR_PTR(-EINVAL); err = -EINVAL;
rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
mr_type, err);
goto err_out;
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL); mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) { if (!mr) {
err = -ENOMEM; err = -ENOMEM;
rxe_dbg_mr(mr, "no memory for mr");
goto err_out; goto err_out;
} }
err = rxe_add_to_pool(&rxe->mr_pool, mr); err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) if (err) {
rxe_dbg_mr(mr, "unable to create mr, err = %d", err);
goto err_free; goto err_free;
}
rxe_get(pd); rxe_get(pd);
mr->ibmr.pd = ibpd; mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device; mr->ibmr.device = ibpd->device;
err = rxe_mr_init_fast(max_num_sg, mr); err = rxe_mr_init_fast(max_num_sg, mr);
if (err) if (err) {
rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
goto err_cleanup; goto err_cleanup;
}
rxe_finalize(mr); rxe_finalize(mr);
return &mr->ibmr; return &mr->ibmr;
err_cleanup: err_cleanup:
rxe_cleanup(mr); cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", err);
err_free: err_free:
kfree(mr); kfree(mr);
err_out: err_out:
rxe_err_pd(pd, "returned err = %d", err);
return ERR_PTR(err); return ERR_PTR(err);
} }
static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mr *mr = to_rmr(ibmr);
int err, cleanup_err;
/* See IBA 10.6.7.2.6 */
if (atomic_read(&mr->num_mw) > 0) {
err = -EINVAL;
rxe_dbg_mr(mr, "mr has mw's bound");
goto err_out;
}
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
kfree_rcu(mr);
return 0;
err_out:
rxe_err_mr(mr, "returned err = %d", err);
return err;
}
static ssize_t parent_show(struct device *device, static ssize_t parent_show(struct device *device,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment