Commit 7c049d08 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull main batch of InfiniBand/RDMA changes from Roland Dreier:
 - Large ocrdma HW driver update: add "fast register" work requests,
   fixes, cleanups
 - Add receive flow steering support for raw QPs
 - Fix IPoIB neighbour race that leads to crash
 - iSER updates including support for using "fast register" memory
   registration
 - IPv6 support for iWARP
 - XRC transport fixes

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (54 commits)
  RDMA/ocrdma: Fix compiler warning about int/pointer size mismatch
  IB/iser: Fix redundant pointer check in dealloc flow
  IB/iser: Fix possible memory leak in iser_create_frwr_pool()
  IB/qib: Move COUNTER_MASK definition within qib_mad.h header guards
  RDMA/ocrdma: Fix passing wrong opcode to modify_srq
  RDMA/ocrdma: Fill PVID in UMC case
  RDMA/ocrdma: Add ABI versioning support
  RDMA/ocrdma: Consider multiple SGES in case of DPP
  RDMA/ocrdma: Fix for displaying proper link speed
  RDMA/ocrdma: Increase STAG array size
  RDMA/ocrdma: Dont use PD 0 for userpace CQ DB
  RDMA/ocrdma: FRMA code cleanup
  RDMA/ocrdma: For ERX2 irrespective of Qid, num_posted offset is 24
  RDMA/ocrdma: Fix to work with even a single MSI-X vector
  RDMA/ocrdma: Remove the MTU check based on Ethernet MTU
  RDMA/ocrdma: Add support for fast register work requests (FRWR)
  RDMA/ocrdma: Create IRD queue fix
  IB/core: Better checking of userspace values for receive flow steering
  IB/mlx4: Add receive flow steering support
  IB/core: Export ib_create/destroy_flow through uverbs
  ...
parents 00341b53 82af24ac
...@@ -1385,8 +1385,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) ...@@ -1385,8 +1385,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{ {
struct rdma_id_private *id_priv = iw_id->context; struct rdma_id_private *id_priv = iw_id->context;
struct rdma_cm_event event; struct rdma_cm_event event;
struct sockaddr_in *sin;
int ret = 0; int ret = 0;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0; return 0;
...@@ -1397,10 +1398,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) ...@@ -1397,10 +1398,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.event = RDMA_CM_EVENT_DISCONNECTED; event.event = RDMA_CM_EVENT_DISCONNECTED;
break; break;
case IW_CM_EVENT_CONNECT_REPLY: case IW_CM_EVENT_CONNECT_REPLY:
sin = (struct sockaddr_in *) cma_src_addr(id_priv); memcpy(cma_src_addr(id_priv), laddr,
*sin = iw_event->local_addr; rdma_addr_size(laddr));
sin = (struct sockaddr_in *) cma_dst_addr(id_priv); memcpy(cma_dst_addr(id_priv), raddr,
*sin = iw_event->remote_addr; rdma_addr_size(raddr));
switch (iw_event->status) { switch (iw_event->status) {
case 0: case 0:
event.event = RDMA_CM_EVENT_ESTABLISHED; event.event = RDMA_CM_EVENT_ESTABLISHED;
...@@ -1450,11 +1451,12 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1450,11 +1451,12 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
{ {
struct rdma_cm_id *new_cm_id; struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id; struct rdma_id_private *listen_id, *conn_id;
struct sockaddr_in *sin;
struct net_device *dev = NULL; struct net_device *dev = NULL;
struct rdma_cm_event event; struct rdma_cm_event event;
int ret; int ret;
struct ib_device_attr attr; struct ib_device_attr attr;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
listen_id = cm_id->context; listen_id = cm_id->context;
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
...@@ -1472,14 +1474,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1472,14 +1474,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT; conn_id->state = RDMA_CM_CONNECT;
dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (!dev) {
ret = -EADDRNOTAVAIL;
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
goto out;
}
ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
if (ret) { if (ret) {
mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id); rdma_destroy_id(new_cm_id);
...@@ -1497,10 +1492,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, ...@@ -1497,10 +1492,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
cm_id->context = conn_id; cm_id->context = conn_id;
cm_id->cm_handler = cma_iw_handler; cm_id->cm_handler = cma_iw_handler;
sin = (struct sockaddr_in *) cma_src_addr(conn_id); memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
*sin = iw_event->local_addr; memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
sin = (struct sockaddr_in *) cma_dst_addr(conn_id);
*sin = iw_event->remote_addr;
ret = ib_query_device(conn_id->id.device, &attr); ret = ib_query_device(conn_id->id.device, &attr);
if (ret) { if (ret) {
...@@ -1576,7 +1569,6 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) ...@@ -1576,7 +1569,6 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{ {
int ret; int ret;
struct sockaddr_in *sin;
struct iw_cm_id *id; struct iw_cm_id *id;
id = iw_create_cm_id(id_priv->id.device, id = iw_create_cm_id(id_priv->id.device,
...@@ -1587,8 +1579,8 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) ...@@ -1587,8 +1579,8 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
id_priv->cm_id.iw = id; id_priv->cm_id.iw = id;
sin = (struct sockaddr_in *) cma_src_addr(id_priv); memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
id_priv->cm_id.iw->local_addr = *sin; rdma_addr_size(cma_src_addr(id_priv)));
ret = iw_cm_listen(id_priv->cm_id.iw, backlog); ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
...@@ -2803,7 +2795,6 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, ...@@ -2803,7 +2795,6 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param) struct rdma_conn_param *conn_param)
{ {
struct iw_cm_id *cm_id; struct iw_cm_id *cm_id;
struct sockaddr_in* sin;
int ret; int ret;
struct iw_cm_conn_param iw_param; struct iw_cm_conn_param iw_param;
...@@ -2813,11 +2804,10 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, ...@@ -2813,11 +2804,10 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
id_priv->cm_id.iw = cm_id; id_priv->cm_id.iw = cm_id;
sin = (struct sockaddr_in *) cma_src_addr(id_priv); memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
cm_id->local_addr = *sin; rdma_addr_size(cma_src_addr(id_priv)));
memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
sin = (struct sockaddr_in *) cma_dst_addr(id_priv); rdma_addr_size(cma_dst_addr(id_priv)));
cm_id->remote_addr = *sin;
ret = cma_modify_qp_rtr(id_priv, conn_param); ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret) if (ret)
......
...@@ -135,6 +135,7 @@ struct ib_usrq_object { ...@@ -135,6 +135,7 @@ struct ib_usrq_object {
struct ib_uqp_object { struct ib_uqp_object {
struct ib_uevent_object uevent; struct ib_uevent_object uevent;
struct list_head mcast_list; struct list_head mcast_list;
struct ib_uxrcd_object *uxrcd;
}; };
struct ib_ucq_object { struct ib_ucq_object {
...@@ -155,6 +156,7 @@ extern struct idr ib_uverbs_cq_idr; ...@@ -155,6 +156,7 @@ extern struct idr ib_uverbs_cq_idr;
extern struct idr ib_uverbs_qp_idr; extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr; extern struct idr ib_uverbs_srq_idr;
extern struct idr ib_uverbs_xrcd_idr; extern struct idr ib_uverbs_xrcd_idr;
extern struct idr ib_uverbs_rule_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
...@@ -215,5 +217,7 @@ IB_UVERBS_DECLARE_CMD(destroy_srq); ...@@ -215,5 +217,7 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
IB_UVERBS_DECLARE_CMD(create_xsrq); IB_UVERBS_DECLARE_CMD(create_xsrq);
IB_UVERBS_DECLARE_CMD(open_xrcd); IB_UVERBS_DECLARE_CMD(open_xrcd);
IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_CMD(create_flow);
IB_UVERBS_DECLARE_CMD(destroy_flow);
#endif /* UVERBS_H */ #endif /* UVERBS_H */
...@@ -54,6 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; ...@@ -54,6 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \ do { \
...@@ -330,6 +331,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ...@@ -330,6 +331,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->srq_list); INIT_LIST_HEAD(&ucontext->srq_list);
INIT_LIST_HEAD(&ucontext->ah_list); INIT_LIST_HEAD(&ucontext->ah_list);
INIT_LIST_HEAD(&ucontext->xrcd_list); INIT_LIST_HEAD(&ucontext->xrcd_list);
INIT_LIST_HEAD(&ucontext->rule_list);
ucontext->closing = 0; ucontext->closing = 0;
resp.num_comp_vectors = file->device->num_comp_vectors; resp.num_comp_vectors = file->device->num_comp_vectors;
...@@ -1526,7 +1528,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, ...@@ -1526,7 +1528,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp, (unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp); in_len - sizeof cmd, out_len - sizeof resp);
obj = kmalloc(sizeof *obj, GFP_KERNEL); obj = kzalloc(sizeof *obj, GFP_KERNEL);
if (!obj) if (!obj)
return -ENOMEM; return -ENOMEM;
...@@ -1642,8 +1644,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, ...@@ -1642,8 +1644,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
goto err_copy; goto err_copy;
} }
if (xrcd) if (xrcd) {
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
uobject);
atomic_inc(&obj->uxrcd->refcnt);
put_xrcd_read(xrcd_uobj); put_xrcd_read(xrcd_uobj);
}
if (pd) if (pd)
put_pd_read(pd); put_pd_read(pd);
if (scq) if (scq)
...@@ -1753,6 +1760,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, ...@@ -1753,6 +1760,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
goto err_remove; goto err_remove;
} }
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
atomic_inc(&obj->uxrcd->refcnt);
put_xrcd_read(xrcd_uobj); put_xrcd_read(xrcd_uobj);
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
...@@ -2019,6 +2028,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, ...@@ -2019,6 +2028,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
if (ret) if (ret)
return ret; return ret;
if (obj->uxrcd)
atomic_dec(&obj->uxrcd->refcnt);
idr_remove_uobj(&ib_uverbs_qp_idr, uobj); idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
...@@ -2587,6 +2599,232 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, ...@@ -2587,6 +2599,232 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
return ret ? ret : in_len; return ret ? ret : in_len;
} }
static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
ib_spec->type = kern_spec->type;
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
if (ib_spec->eth.size != kern_spec->eth.size)
return -EINVAL;
memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
sizeof(struct ib_flow_eth_filter));
memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
sizeof(struct ib_flow_eth_filter));
break;
case IB_FLOW_SPEC_IPV4:
ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
if (ib_spec->ipv4.size != kern_spec->ipv4.size)
return -EINVAL;
memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
sizeof(struct ib_flow_ipv4_filter));
memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
sizeof(struct ib_flow_ipv4_filter));
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
return -EINVAL;
memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
sizeof(struct ib_flow_tcp_udp_filter));
memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
sizeof(struct ib_flow_tcp_udp_filter));
break;
default:
return -EINVAL;
}
return 0;
}
ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len)
{
struct ib_uverbs_create_flow cmd;
struct ib_uverbs_create_flow_resp resp;
struct ib_uobject *uobj;
struct ib_flow *flow_id;
struct ib_kern_flow_attr *kern_flow_attr;
struct ib_flow_attr *flow_attr;
struct ib_qp *qp;
int err = 0;
void *kern_spec;
void *ib_spec;
int i;
int kern_attr_size;
if (out_len < sizeof(resp))
return -ENOSPC;
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
if (cmd.comp_mask)
return -EINVAL;
if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
!capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
return -EPERM;
if (cmd.flow_attr.num_of_specs < 0 ||
cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
return -EINVAL;
kern_attr_size = cmd.flow_attr.size - sizeof(cmd) -
sizeof(struct ib_uverbs_cmd_hdr_ex);
if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len ||
kern_attr_size < 0 || kern_attr_size >
(cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec)))
return -EINVAL;
if (cmd.flow_attr.num_of_specs) {
kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
if (!kern_flow_attr)
return -ENOMEM;
memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd),
kern_attr_size)) {
err = -EFAULT;
goto err_free_attr;
}
} else {
kern_flow_attr = &cmd.flow_attr;
kern_attr_size = sizeof(cmd.flow_attr);
}
uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
if (!uobj) {
err = -ENOMEM;
goto err_free_attr;
}
init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
down_write(&uobj->mutex);
qp = idr_read_qp(cmd.qp_handle, file->ucontext);
if (!qp) {
err = -EINVAL;
goto err_uobj;
}
flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
if (!flow_attr) {
err = -ENOMEM;
goto err_put;
}
flow_attr->type = kern_flow_attr->type;
flow_attr->priority = kern_flow_attr->priority;
flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
flow_attr->port = kern_flow_attr->port;
flow_attr->flags = kern_flow_attr->flags;
flow_attr->size = sizeof(*flow_attr);
kern_spec = kern_flow_attr + 1;
ib_spec = flow_attr + 1;
for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) {
err = kern_spec_to_ib_spec(kern_spec, ib_spec);
if (err)
goto err_free;
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size;
kern_spec += ((struct ib_kern_spec *) kern_spec)->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
if (kern_attr_size) {
pr_warn("create flow failed, %d bytes left from uverb cmd\n",
kern_attr_size);
goto err_free;
}
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
if (IS_ERR(flow_id)) {
err = PTR_ERR(flow_id);
goto err_free;
}
flow_id->qp = qp;
flow_id->uobject = uobj;
uobj->object = flow_id;
err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
if (err)
goto destroy_flow;
memset(&resp, 0, sizeof(resp));
resp.flow_handle = uobj->id;
if (copy_to_user((void __user *)(unsigned long) cmd.response,
&resp, sizeof(resp))) {
err = -EFAULT;
goto err_copy;
}
put_qp_read(qp);
mutex_lock(&file->mutex);
list_add_tail(&uobj->list, &file->ucontext->rule_list);
mutex_unlock(&file->mutex);
uobj->live = 1;
up_write(&uobj->mutex);
kfree(flow_attr);
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
return in_len;
err_copy:
idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
destroy_flow:
ib_destroy_flow(flow_id);
err_free:
kfree(flow_attr);
err_put:
put_qp_read(qp);
err_uobj:
put_uobj_write(uobj);
err_free_attr:
if (cmd.flow_attr.num_of_specs)
kfree(kern_flow_attr);
return err;
}
ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len) {
struct ib_uverbs_destroy_flow cmd;
struct ib_flow *flow_id;
struct ib_uobject *uobj;
int ret;
if (copy_from_user(&cmd, buf, sizeof(cmd)))
return -EFAULT;
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
file->ucontext);
if (!uobj)
return -EINVAL;
flow_id = uobj->object;
ret = ib_destroy_flow(flow_id);
if (!ret)
uobj->live = 0;
put_uobj_write(uobj);
idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
mutex_lock(&file->mutex);
list_del(&uobj->list);
mutex_unlock(&file->mutex);
put_uobj(uobj);
return ret ? ret : in_len;
}
static int __uverbs_create_xsrq(struct ib_uverbs_file *file, static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uverbs_create_xsrq *cmd, struct ib_uverbs_create_xsrq *cmd,
struct ib_udata *udata) struct ib_udata *udata)
...@@ -2860,6 +3098,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -2860,6 +3098,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_srq *srq; struct ib_srq *srq;
struct ib_uevent_object *obj; struct ib_uevent_object *obj;
int ret = -EINVAL; int ret = -EINVAL;
struct ib_usrq_object *us;
enum ib_srq_type srq_type;
if (copy_from_user(&cmd, buf, sizeof cmd)) if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT; return -EFAULT;
...@@ -2869,6 +3109,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -2869,6 +3109,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return -EINVAL; return -EINVAL;
srq = uobj->object; srq = uobj->object;
obj = container_of(uobj, struct ib_uevent_object, uobject); obj = container_of(uobj, struct ib_uevent_object, uobject);
srq_type = srq->srq_type;
ret = ib_destroy_srq(srq); ret = ib_destroy_srq(srq);
if (!ret) if (!ret)
...@@ -2879,6 +3120,11 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -2879,6 +3120,11 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (ret) if (ret)
return ret; return ret;
if (srq_type == IB_SRQT_XRC) {
us = container_of(obj, struct ib_usrq_object, uevent);
atomic_dec(&us->uxrcd->refcnt);
}
idr_remove_uobj(&ib_uverbs_srq_idr, uobj); idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
......
...@@ -73,6 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr); ...@@ -73,6 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr); DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr); DEFINE_IDR(ib_uverbs_srq_idr);
DEFINE_IDR(ib_uverbs_xrcd_idr); DEFINE_IDR(ib_uverbs_xrcd_idr);
DEFINE_IDR(ib_uverbs_rule_idr);
static DEFINE_SPINLOCK(map_lock); static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
...@@ -113,7 +114,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, ...@@ -113,7 +114,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd,
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
[IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
[IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
[IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
}; };
static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_add_one(struct ib_device *device);
...@@ -212,6 +215,14 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, ...@@ -212,6 +215,14 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj); kfree(uobj);
} }
list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
struct ib_flow *flow_id = uobj->object;
idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
ib_destroy_flow(flow_id);
kfree(uobj);
}
list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
struct ib_qp *qp = uobj->object; struct ib_qp *qp = uobj->object;
struct ib_uqp_object *uqp = struct ib_uqp_object *uqp =
...@@ -583,9 +594,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -583,9 +594,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof hdr)) if (copy_from_user(&hdr, buf, sizeof hdr))
return -EFAULT; return -EFAULT;
if (hdr.in_words * 4 != count)
return -EINVAL;
if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[hdr.command]) !uverbs_cmd_table[hdr.command])
return -EINVAL; return -EINVAL;
...@@ -597,8 +605,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -597,8 +605,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
return -ENOSYS; return -ENOSYS;
return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
hdr.in_words * 4, hdr.out_words * 4); struct ib_uverbs_cmd_hdr_ex hdr_ex;
if (copy_from_user(&hdr_ex, buf, sizeof(hdr_ex)))
return -EFAULT;
if (((hdr_ex.in_words + hdr_ex.provider_in_words) * 4) != count)
return -EINVAL;
return uverbs_cmd_table[hdr.command](file,
buf + sizeof(hdr_ex),
(hdr_ex.in_words +
hdr_ex.provider_in_words) * 4,
(hdr_ex.out_words +
hdr_ex.provider_out_words) * 4);
} else {
if (hdr.in_words * 4 != count)
return -EINVAL;
return uverbs_cmd_table[hdr.command](file,
buf + sizeof(hdr),
hdr.in_words * 4,
hdr.out_words * 4);
}
} }
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
......
...@@ -346,10 +346,13 @@ EXPORT_SYMBOL(ib_destroy_srq); ...@@ -346,10 +346,13 @@ EXPORT_SYMBOL(ib_destroy_srq);
static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
{ {
struct ib_qp *qp = context; struct ib_qp *qp = context;
unsigned long flags;
spin_lock_irqsave(&qp->device->event_handler_lock, flags);
list_for_each_entry(event->element.qp, &qp->open_list, open_list) list_for_each_entry(event->element.qp, &qp->open_list, open_list)
if (event->element.qp->event_handler) if (event->element.qp->event_handler)
event->element.qp->event_handler(event, event->element.qp->qp_context); event->element.qp->event_handler(event, event->element.qp->qp_context);
spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
} }
static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
...@@ -1254,3 +1257,30 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd) ...@@ -1254,3 +1257,30 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return xrcd->device->dealloc_xrcd(xrcd); return xrcd->device->dealloc_xrcd(xrcd);
} }
EXPORT_SYMBOL(ib_dealloc_xrcd); EXPORT_SYMBOL(ib_dealloc_xrcd);
struct ib_flow *ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
{
struct ib_flow *flow_id;
if (!qp->device->create_flow)
return ERR_PTR(-ENOSYS);
flow_id = qp->device->create_flow(qp, flow_attr, domain);
if (!IS_ERR(flow_id))
atomic_inc(&qp->usecnt);
return flow_id;
}
EXPORT_SYMBOL(ib_create_flow);
int ib_destroy_flow(struct ib_flow *flow_id)
{
int err;
struct ib_qp *qp = flow_id->qp;
err = qp->device->destroy_flow(flow_id);
if (!err)
atomic_dec(&qp->usecnt);
return err;
}
EXPORT_SYMBOL(ib_destroy_flow);
...@@ -155,6 +155,8 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) ...@@ -155,6 +155,8 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
enum c2_event_id event_id; enum c2_event_id event_id;
unsigned long flags; unsigned long flags;
int status; int status;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
/* /*
* retrieve the message * retrieve the message
...@@ -206,10 +208,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) ...@@ -206,10 +208,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
case CCAE_ACTIVE_CONNECT_RESULTS: case CCAE_ACTIVE_CONNECT_RESULTS:
res = &wr->ae.ae_active_connect_results; res = &wr->ae.ae_active_connect_results;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY; cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.local_addr.sin_addr.s_addr = res->laddr; laddr->sin_addr.s_addr = res->laddr;
cm_event.remote_addr.sin_addr.s_addr = res->raddr; raddr->sin_addr.s_addr = res->raddr;
cm_event.local_addr.sin_port = res->lport; laddr->sin_port = res->lport;
cm_event.remote_addr.sin_port = res->rport; raddr->sin_port = res->rport;
if (status == 0) { if (status == 0) {
cm_event.private_data_len = cm_event.private_data_len =
be32_to_cpu(res->private_data_length); be32_to_cpu(res->private_data_length);
...@@ -281,10 +283,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) ...@@ -281,10 +283,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
} }
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.provider_data = (void*)(unsigned long)req->cr_handle; cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
cm_event.local_addr.sin_addr.s_addr = req->laddr; laddr->sin_addr.s_addr = req->laddr;
cm_event.remote_addr.sin_addr.s_addr = req->raddr; raddr->sin_addr.s_addr = req->raddr;
cm_event.local_addr.sin_port = req->lport; laddr->sin_port = req->lport;
cm_event.remote_addr.sin_port = req->rport; raddr->sin_port = req->rport;
cm_event.private_data_len = cm_event.private_data_len =
be32_to_cpu(req->private_data_length); be32_to_cpu(req->private_data_length);
cm_event.private_data = req->private_data; cm_event.private_data = req->private_data;
......
...@@ -46,6 +46,10 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) ...@@ -46,6 +46,10 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
struct c2_vq_req *vq_req; struct c2_vq_req *vq_req;
int err; int err;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
if (cm_id->remote_addr.ss_family != AF_INET)
return -ENOSYS;
ibqp = c2_get_qp(cm_id->device, iw_param->qpn); ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
if (!ibqp) if (!ibqp)
...@@ -91,8 +95,8 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) ...@@ -91,8 +95,8 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
wr->rnic_handle = c2dev->adapter_handle; wr->rnic_handle = c2dev->adapter_handle;
wr->qp_handle = qp->adapter_handle; wr->qp_handle = qp->adapter_handle;
wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr; wr->remote_addr = raddr->sin_addr.s_addr;
wr->remote_port = cm_id->remote_addr.sin_port; wr->remote_port = raddr->sin_port;
/* /*
* Move any private data from the callers's buf into * Move any private data from the callers's buf into
...@@ -135,6 +139,10 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) ...@@ -135,6 +139,10 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
struct c2wr_ep_listen_create_rep *reply; struct c2wr_ep_listen_create_rep *reply;
struct c2_vq_req *vq_req; struct c2_vq_req *vq_req;
int err; int err;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
if (cm_id->local_addr.ss_family != AF_INET)
return -ENOSYS;
c2dev = to_c2dev(cm_id->device); c2dev = to_c2dev(cm_id->device);
if (c2dev == NULL) if (c2dev == NULL)
...@@ -153,8 +161,8 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) ...@@ -153,8 +161,8 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
wr.hdr.context = (u64) (unsigned long) vq_req; wr.hdr.context = (u64) (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle; wr.rnic_handle = c2dev->adapter_handle;
wr.local_addr = cm_id->local_addr.sin_addr.s_addr; wr.local_addr = laddr->sin_addr.s_addr;
wr.local_port = cm_id->local_addr.sin_port; wr.local_port = laddr->sin_port;
wr.backlog = cpu_to_be32(backlog); wr.backlog = cpu_to_be32(backlog);
wr.user_context = (u64) (unsigned long) cm_id; wr.user_context = (u64) (unsigned long) cm_id;
......
...@@ -721,8 +721,10 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status) ...@@ -721,8 +721,10 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY; event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status; event.status = status;
event.local_addr = ep->com.local_addr; memcpy(&event.local_addr, &ep->com.local_addr,
event.remote_addr = ep->com.remote_addr; sizeof(ep->com.local_addr));
memcpy(&event.remote_addr, &ep->com.remote_addr,
sizeof(ep->com.remote_addr));
if ((status == 0) || (status == -ECONNREFUSED)) { if ((status == 0) || (status == -ECONNREFUSED)) {
event.private_data_len = ep->plen; event.private_data_len = ep->plen;
...@@ -747,8 +749,10 @@ static void connect_request_upcall(struct iwch_ep *ep) ...@@ -747,8 +749,10 @@ static void connect_request_upcall(struct iwch_ep *ep)
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event)); memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST; event.event = IW_CM_EVENT_CONNECT_REQUEST;
event.local_addr = ep->com.local_addr; memcpy(&event.local_addr, &ep->com.local_addr,
event.remote_addr = ep->com.remote_addr; sizeof(ep->com.local_addr));
memcpy(&event.remote_addr, &ep->com.remote_addr,
sizeof(ep->com.local_addr));
event.private_data_len = ep->plen; event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep; event.provider_data = ep;
...@@ -1872,8 +1876,9 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1872,8 +1876,9 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
static int is_loopback_dst(struct iw_cm_id *cm_id) static int is_loopback_dst(struct iw_cm_id *cm_id)
{ {
struct net_device *dev; struct net_device *dev;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr); dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
if (!dev) if (!dev)
return 0; return 0;
dev_put(dev); dev_put(dev);
...@@ -1886,6 +1891,13 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1886,6 +1891,13 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_ep *ep; struct iwch_ep *ep;
struct rtable *rt; struct rtable *rt;
int err = 0; int err = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
if (cm_id->remote_addr.ss_family != PF_INET) {
err = -ENOSYS;
goto out;
}
if (is_loopback_dst(cm_id)) { if (is_loopback_dst(cm_id)) {
err = -ENOSYS; err = -ENOSYS;
...@@ -1929,11 +1941,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1929,11 +1941,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} }
/* find a route */ /* find a route */
rt = find_route(h->rdev.t3cdev_p, rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
cm_id->local_addr.sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port,
cm_id->remote_addr.sin_addr.s_addr, raddr->sin_port, IPTOS_LOWDELAY);
cm_id->local_addr.sin_port,
cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
if (!rt) { if (!rt) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH; err = -EHOSTUNREACH;
...@@ -1941,7 +1951,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1941,7 +1951,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} }
ep->dst = &rt->dst; ep->dst = &rt->dst;
ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL, ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
&cm_id->remote_addr.sin_addr.s_addr); &raddr->sin_addr.s_addr);
if (!ep->l2t) { if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM; err = -ENOMEM;
...@@ -1950,8 +1960,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1950,8 +1960,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING); state_set(&ep->com, CONNECTING);
ep->tos = IPTOS_LOWDELAY; ep->tos = IPTOS_LOWDELAY;
ep->com.local_addr = cm_id->local_addr; memcpy(&ep->com.local_addr, &cm_id->local_addr,
ep->com.remote_addr = cm_id->remote_addr; sizeof(ep->com.local_addr));
memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
sizeof(ep->com.remote_addr));
/* send connect request to rnic */ /* send connect request to rnic */
err = send_connect(ep); err = send_connect(ep);
...@@ -1979,6 +1991,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -1979,6 +1991,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
might_sleep(); might_sleep();
if (cm_id->local_addr.ss_family != PF_INET) {
err = -ENOSYS;
goto fail1;
}
ep = alloc_ep(sizeof(*ep), GFP_KERNEL); ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) { if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
...@@ -1990,7 +2007,8 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -1990,7 +2007,8 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->add_ref(cm_id); cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id; ep->com.cm_id = cm_id;
ep->backlog = backlog; ep->backlog = backlog;
ep->com.local_addr = cm_id->local_addr; memcpy(&ep->com.local_addr, &cm_id->local_addr,
sizeof(ep->com.local_addr));
/* /*
* Allocate a server TID. * Allocate a server TID.
......
config INFINIBAND_CXGB4 config INFINIBAND_CXGB4
tristate "Chelsio T4 RDMA Driver" tristate "Chelsio T4 RDMA Driver"
depends on CHELSIO_T4 && INET depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
This is an iWARP/RDMA driver for the Chelsio T4 1GbE and This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
......
This diff is collapsed.
This diff is collapsed.
...@@ -103,18 +103,43 @@ static int dump_qp(int id, void *p, void *data) ...@@ -103,18 +103,43 @@ static int dump_qp(int id, void *p, void *data)
if (space == 0) if (space == 0)
return 1; return 1;
if (qp->ep) if (qp->ep) {
cc = snprintf(qpd->buf + qpd->pos, space, if (qp->ep->com.local_addr.ss_family == AF_INET) {
"qp sq id %u rq id %u state %u onchip %u " struct sockaddr_in *lsin = (struct sockaddr_in *)
"ep tid %u state %u %pI4:%u->%pI4:%u\n", &qp->ep->com.local_addr;
qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, struct sockaddr_in *rsin = (struct sockaddr_in *)
qp->wq.sq.flags & T4_SQ_ONCHIP, &qp->ep->com.remote_addr;
qp->ep->hwtid, (int)qp->ep->com.state,
&qp->ep->com.local_addr.sin_addr.s_addr, cc = snprintf(qpd->buf + qpd->pos, space,
ntohs(qp->ep->com.local_addr.sin_port), "rc qp sq id %u rq id %u state %u "
&qp->ep->com.remote_addr.sin_addr.s_addr, "onchip %u ep tid %u state %u "
ntohs(qp->ep->com.remote_addr.sin_port)); "%pI4:%u->%pI4:%u\n",
else qp->wq.sq.qid, qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
qp->ep->hwtid, (int)qp->ep->com.state,
&lsin->sin_addr, ntohs(lsin->sin_port),
&rsin->sin_addr, ntohs(rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
&qp->ep->com.local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
&qp->ep->com.remote_addr;
cc = snprintf(qpd->buf + qpd->pos, space,
"rc qp sq id %u rq id %u state %u "
"onchip %u ep tid %u state %u "
"%pI6:%u->%pI6:%u\n",
qp->wq.sq.qid, qp->wq.rq.qid,
(int)qp->attr.state,
qp->wq.sq.flags & T4_SQ_ONCHIP,
qp->ep->hwtid, (int)qp->ep->com.state,
&lsin6->sin6_addr,
ntohs(lsin6->sin6_port),
&rsin6->sin6_addr,
ntohs(rsin6->sin6_port));
}
} else
cc = snprintf(qpd->buf + qpd->pos, space, cc = snprintf(qpd->buf + qpd->pos, space,
"qp sq id %u rq id %u state %u onchip %u\n", "qp sq id %u rq id %u state %u onchip %u\n",
qp->wq.sq.qid, qp->wq.rq.qid, qp->wq.sq.qid, qp->wq.rq.qid,
...@@ -351,15 +376,37 @@ static int dump_ep(int id, void *p, void *data) ...@@ -351,15 +376,37 @@ static int dump_ep(int id, void *p, void *data)
if (space == 0) if (space == 0)
return 1; return 1;
cc = snprintf(epd->buf + epd->pos, space, if (ep->com.local_addr.ss_family == AF_INET) {
"ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx " struct sockaddr_in *lsin = (struct sockaddr_in *)
"hwtid %d atid %d %pI4:%d <-> %pI4:%d\n", &ep->com.local_addr;
ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, struct sockaddr_in *rsin = (struct sockaddr_in *)
ep->com.flags, ep->com.history, ep->hwtid, ep->atid, &ep->com.remote_addr;
&ep->com.local_addr.sin_addr.s_addr,
ntohs(ep->com.local_addr.sin_port), cc = snprintf(epd->buf + epd->pos, space,
&ep->com.remote_addr.sin_addr.s_addr, "ep %p cm_id %p qp %p state %d flags 0x%lx "
ntohs(ep->com.remote_addr.sin_port)); "history 0x%lx hwtid %d atid %d "
"%pI4:%d <-> %pI4:%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
&lsin->sin_addr, ntohs(lsin->sin_port),
&rsin->sin_addr, ntohs(rsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
&ep->com.local_addr;
struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
&ep->com.remote_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
"%pI6:%d <-> %pI6:%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
&rsin6->sin6_addr, ntohs(rsin6->sin6_port));
}
if (cc < space) if (cc < space)
epd->pos += cc; epd->pos += cc;
return 0; return 0;
...@@ -376,12 +423,27 @@ static int dump_listen_ep(int id, void *p, void *data) ...@@ -376,12 +423,27 @@ static int dump_listen_ep(int id, void *p, void *data)
if (space == 0) if (space == 0)
return 1; return 1;
cc = snprintf(epd->buf + epd->pos, space, if (ep->com.local_addr.ss_family == AF_INET) {
"ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d " struct sockaddr_in *lsin = (struct sockaddr_in *)
"%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state, &ep->com.local_addr;
ep->com.flags, ep->stid, ep->backlog,
&ep->com.local_addr.sin_addr.s_addr, cc = snprintf(epd->buf + epd->pos, space,
ntohs(ep->com.local_addr.sin_port)); "ep %p cm_id %p state %d flags 0x%lx stid %d "
"backlog %d %pI4:%d\n",
ep, ep->com.cm_id, (int)ep->com.state,
ep->com.flags, ep->stid, ep->backlog,
&lsin->sin_addr, ntohs(lsin->sin_port));
} else {
struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
&ep->com.local_addr;
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p state %d flags 0x%lx stid %d "
"backlog %d %pI6:%d\n",
ep, ep->com.cm_id, (int)ep->com.state,
ep->com.flags, ep->stid, ep->backlog,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port));
}
if (cc < space) if (cc < space)
epd->pos += cc; epd->pos += cc;
return 0; return 0;
......
...@@ -44,16 +44,6 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, ...@@ -44,16 +44,6 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
unsigned long flag; unsigned long flag;
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
"type %d wrid.hi 0x%x wrid.lo 0x%x\n",
__func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
return;
}
printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x " printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x\n", "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
......
...@@ -752,8 +752,8 @@ struct c4iw_ep_common { ...@@ -752,8 +752,8 @@ struct c4iw_ep_common {
enum c4iw_ep_state state; enum c4iw_ep_state state;
struct kref kref; struct kref kref;
struct mutex mutex; struct mutex mutex;
struct sockaddr_in local_addr; struct sockaddr_storage local_addr;
struct sockaddr_in remote_addr; struct sockaddr_storage remote_addr;
struct c4iw_wr_wait wr_wait; struct c4iw_wr_wait wr_wait;
unsigned long flags; unsigned long flags;
unsigned long history; unsigned long history;
...@@ -917,12 +917,11 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); ...@@ -917,12 +917,11 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
void c4iw_flush_hw_cq(struct t4_cq *cq); void c4iw_flush_hw_cq(struct c4iw_cq *chp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_sq(struct c4iw_qp *qhp);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
u16 c4iw_rqes_posted(struct c4iw_qp *qhp); u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
......
...@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->idx = qhp->wq.sq.pidx; swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0; swsqe->complete = 0;
swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
swsqe->flushed = 0;
swsqe->wr_id = wr->wr_id; swsqe->wr_id = wr->wr_id;
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
...@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag); spin_lock_irqsave(&rchp->lock, flag);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&rchp->cq);
if (qhp->wq.flushed) {
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
return;
}
qhp->wq.flushed = 1;
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
...@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag); spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock); spin_lock(&qhp->lock);
c4iw_flush_hw_cq(&schp->cq); if (schp != rchp)
c4iw_count_scqes(&schp->cq, &qhp->wq, &count); c4iw_flush_hw_cq(schp);
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag); spin_unlock_irqrestore(&schp->lock, flag);
if (flushed) { if (flushed) {
...@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp) ...@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp)
struct c4iw_cq *rchp, *schp; struct c4iw_cq *rchp, *schp;
unsigned long flag; unsigned long flag;
rchp = get_chp(qhp->rhp, qhp->attr.rcq); rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
schp = get_chp(qhp->rhp, qhp->attr.scq); schp = to_c4iw_cq(qhp->ibqp.send_cq);
t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) { if (qhp->ibqp.uobject) {
t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq); t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag); spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
...@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
disconnect = 1; disconnect = 1;
c4iw_get_ep(&qhp->ep->com); c4iw_get_ep(&qhp->ep->com);
} }
if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq);
t4_set_wq_in_error(&qhp->wq);
ret = rdma_fini(rhp, qhp, ep); ret = rdma_fini(rhp, qhp, ep);
if (ret) if (ret)
goto err; goto err;
...@@ -1340,18 +1348,21 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1340,18 +1348,21 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
set_state(qhp, C4IW_QP_STATE_TERMINATE); set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode; qhp->attr.ecode = attrs->ecode;
if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq);
t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep; ep = qhp->ep;
disconnect = 1;
if (!internal) if (!internal)
terminate = 1; terminate = 1;
disconnect = 1; else {
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
}
c4iw_get_ep(&qhp->ep->com); c4iw_get_ep(&qhp->ep->com);
break; break;
case C4IW_QP_STATE_ERROR: case C4IW_QP_STATE_ERROR:
set_state(qhp, C4IW_QP_STATE_ERROR); set_state(qhp, C4IW_QP_STATE_ERROR);
if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq);
t4_set_wq_in_error(&qhp->wq);
if (!internal) { if (!internal) {
abort = 1; abort = 1;
disconnect = 1; disconnect = 1;
...@@ -1552,12 +1563,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1552,12 +1563,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp) if (!qhp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
qhp->wq.sq.size = sqsize; qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
qhp->wq.sq.flush_cidx = -1;
qhp->wq.rq.size = rqsize; qhp->wq.rq.size = rqsize;
qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
......
...@@ -36,9 +36,9 @@ ...@@ -36,9 +36,9 @@
#include "t4_msg.h" #include "t4_msg.h"
#include "t4fw_ri_api.h" #include "t4fw_ri_api.h"
#define T4_MAX_NUM_QP (1<<16) #define T4_MAX_NUM_QP 65536
#define T4_MAX_NUM_CQ (1<<15) #define T4_MAX_NUM_CQ 65536
#define T4_MAX_NUM_PD (1<<15) #define T4_MAX_NUM_PD 65536
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
#define T4_MAX_IQ_SIZE (65520 - 1) #define T4_MAX_IQ_SIZE (65520 - 1)
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
#define T4_MAX_NUM_STAG (1<<15) #define T4_MAX_NUM_STAG (1<<15)
#define T4_MAX_MR_SIZE (~0ULL - 1) #define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff #define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0 #define T4_FW_MAJ 0
...@@ -269,6 +269,7 @@ struct t4_swsqe { ...@@ -269,6 +269,7 @@ struct t4_swsqe {
int complete; int complete;
int signaled; int signaled;
u16 idx; u16 idx;
int flushed;
}; };
static inline pgprot_t t4_pgprot_wc(pgprot_t prot) static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
...@@ -300,6 +301,7 @@ struct t4_sq { ...@@ -300,6 +301,7 @@ struct t4_sq {
u16 pidx; u16 pidx;
u16 wq_pidx; u16 wq_pidx;
u16 flags; u16 flags;
short flush_cidx;
}; };
struct t4_swrqe { struct t4_swrqe {
...@@ -330,6 +332,7 @@ struct t4_wq { ...@@ -330,6 +332,7 @@ struct t4_wq {
void __iomem *db; void __iomem *db;
void __iomem *gts; void __iomem *gts;
struct c4iw_rdev *rdev; struct c4iw_rdev *rdev;
int flushed;
}; };
static inline int t4_rqes_posted(struct t4_wq *wq) static inline int t4_rqes_posted(struct t4_wq *wq)
...@@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) ...@@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq) static inline void t4_sq_consume(struct t4_wq *wq)
{ {
BUG_ON(wq->sq.in_use < 1);
if (wq->sq.cidx == wq->sq.flush_cidx)
wq->sq.flush_cidx = -1;
wq->sq.in_use--; wq->sq.in_use--;
if (++wq->sq.cidx == wq->sq.size) if (++wq->sq.cidx == wq->sq.size)
wq->sq.cidx = 0; wq->sq.cidx = 0;
...@@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) ...@@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
static inline void t4_swcq_produce(struct t4_cq *cq) static inline void t4_swcq_produce(struct t4_cq *cq)
{ {
cq->sw_in_use++; cq->sw_in_use++;
if (cq->sw_in_use == cq->size) {
PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
}
if (++cq->sw_pidx == cq->size) if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0; cq->sw_pidx = 0;
} }
static inline void t4_swcq_consume(struct t4_cq *cq) static inline void t4_swcq_consume(struct t4_cq *cq)
{ {
BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--; cq->sw_in_use--;
if (++cq->sw_cidx == cq->size) if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0; cq->sw_cidx = 0;
...@@ -519,7 +531,7 @@ static inline void t4_swcq_consume(struct t4_cq *cq) ...@@ -519,7 +531,7 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq) static inline void t4_hwcq_consume(struct t4_cq *cq)
{ {
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
if (++cq->cidx_inc == (cq->size >> 4)) { if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
u32 val; u32 val;
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
...@@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
ret = -EOVERFLOW; ret = -EOVERFLOW;
cq->error = 1; cq->error = 1;
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
*cqe = &cq->queue[cq->cidx]; *cqe = &cq->queue[cq->cidx];
ret = 0; ret = 0;
...@@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{ {
if (cq->sw_in_use == cq->size) {
PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
cq->error = 1;
BUG_ON(1);
return NULL;
}
if (cq->sw_in_use) if (cq->sw_in_use)
return &cq->sw_queue[cq->sw_cidx]; return &cq->sw_queue[cq->sw_cidx];
return NULL; return NULL;
......
...@@ -54,6 +54,8 @@ ...@@ -54,6 +54,8 @@
#define DRV_VERSION "1.0" #define DRV_VERSION "1.0"
#define DRV_RELDATE "April 4, 2008" #define DRV_RELDATE "April 4, 2008"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
MODULE_AUTHOR("Roland Dreier"); MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver"); MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
...@@ -88,6 +90,25 @@ static void init_query_mad(struct ib_smp *mad) ...@@ -88,6 +90,25 @@ static void init_query_mad(struct ib_smp *mad)
static union ib_gid zgid; static union ib_gid zgid;
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int ib_num_ports = 0;
int i;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
if (ib_num_ports || mlx4_is_mfunc(dev)) {
pr_warn("Device managed flow steering is unavailable "
"for IB ports or in multifunction env.\n");
return 0;
}
return 1;
}
return 0;
}
static int mlx4_ib_query_device(struct ib_device *ibdev, static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props) struct ib_device_attr *props)
{ {
...@@ -144,6 +165,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -144,6 +165,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
if (check_flow_steering_support(dev->dev))
props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
} }
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
...@@ -798,6 +821,209 @@ struct mlx4_ib_steering { ...@@ -798,6 +821,209 @@ struct mlx4_ib_steering {
union ib_gid gid; union ib_gid gid;
}; };
static int parse_flow_attr(struct mlx4_dev *dev,
union ib_flow_spec *ib_spec,
struct _rule_hw *mlx4_spec)
{
enum mlx4_net_trans_rule_id type;
switch (ib_spec->type) {
case IB_FLOW_SPEC_ETH:
type = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
ETH_ALEN);
memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
ETH_ALEN);
mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
break;
case IB_FLOW_SPEC_IPV4:
type = MLX4_NET_TRANS_RULE_ID_IPV4;
mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
type = ib_spec->type == IB_FLOW_SPEC_TCP ?
MLX4_NET_TRANS_RULE_ID_TCP :
MLX4_NET_TRANS_RULE_ID_UDP;
mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
break;
default:
return -EINVAL;
}
if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
mlx4_hw_rule_sz(dev, type) < 0)
return -EINVAL;
mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
return mlx4_hw_rule_sz(dev, type);
}
static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
int domain,
enum mlx4_net_trans_promisc_mode flow_type,
u64 *reg_id)
{
int ret, i;
int size = 0;
void *ib_flow;
struct mlx4_ib_dev *mdev = to_mdev(qp->device);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
(sizeof(struct _rule_hw) * flow_attr->num_of_specs);
static const u16 __mlx4_domain[] = {
[IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
[IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
[IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
[IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
};
if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
pr_err("Invalid priority value %d\n", flow_attr->priority);
return -EINVAL;
}
if (domain >= IB_FLOW_DOMAIN_NUM) {
pr_err("Invalid domain value %d\n", domain);
return -EINVAL;
}
if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
return -EINVAL;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, rule_size);
ctrl = mailbox->buf;
ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
flow_attr->priority);
ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
ctrl->port = flow_attr->port;
ctrl->qpn = cpu_to_be32(qp->qp_num);
ib_flow = flow_attr + 1;
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
for (i = 0; i < flow_attr->num_of_specs; i++) {
ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return -EINVAL;
}
ib_flow += ((union ib_flow_spec *) ib_flow)->size;
size += ret;
}
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (ret == -ENOMEM)
pr_err("mcg table is full. Fail to register network rule.\n");
else if (ret == -ENXIO)
pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
else if (ret)
pr_err("Invalid argumant. Fail to register network rule.\n");
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return ret;
}
static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
{
int err;
err = mlx4_cmd(dev, reg_id, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
return err;
}
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
{
int err = 0, i = 0;
struct mlx4_ib_flow *mflow;
enum mlx4_net_trans_promisc_mode type[2];
memset(type, 0, sizeof(type));
mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
if (!mflow) {
err = -ENOMEM;
goto err_free;
}
switch (flow_attr->type) {
case IB_FLOW_ATTR_NORMAL:
type[0] = MLX4_FS_REGULAR;
break;
case IB_FLOW_ATTR_ALL_DEFAULT:
type[0] = MLX4_FS_ALL_DEFAULT;
break;
case IB_FLOW_ATTR_MC_DEFAULT:
type[0] = MLX4_FS_MC_DEFAULT;
break;
case IB_FLOW_ATTR_SNIFFER:
type[0] = MLX4_FS_UC_SNIFFER;
type[1] = MLX4_FS_MC_SNIFFER;
break;
default:
err = -EINVAL;
goto err_free;
}
while (i < ARRAY_SIZE(type) && type[i]) {
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
&mflow->reg_id[i]);
if (err)
goto err_free;
i++;
}
return &mflow->ibflow;
err_free:
kfree(mflow);
return ERR_PTR(err);
}
static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
{
int err, ret = 0;
int i = 0;
struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
struct mlx4_ib_flow *mflow = to_mflow(flow_id);
while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
if (err)
ret = err;
i++;
}
kfree(mflow);
return ret;
}
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{ {
int err; int err;
...@@ -1461,6 +1687,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -1461,6 +1687,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
} }
if (check_flow_steering_support(dev)) {
ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
ibdev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
}
mlx4_ib_alloc_eqs(dev, ibdev); mlx4_ib_alloc_eqs(dev, ibdev);
spin_lock_init(&iboe->lock); spin_lock_init(&iboe->lock);
......
...@@ -132,6 +132,12 @@ struct mlx4_ib_fmr { ...@@ -132,6 +132,12 @@ struct mlx4_ib_fmr {
struct mlx4_fmr mfmr; struct mlx4_fmr mfmr;
}; };
struct mlx4_ib_flow {
struct ib_flow ibflow;
/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
u64 reg_id[2];
};
struct mlx4_ib_wq { struct mlx4_ib_wq {
u64 *wrid; u64 *wrid;
spinlock_t lock; spinlock_t lock;
...@@ -552,6 +558,12 @@ static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr) ...@@ -552,6 +558,12 @@ static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
{ {
return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr); return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
} }
static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
{
return container_of(ibflow, struct mlx4_ib_flow, ibflow);
}
static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp) static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
{ {
return container_of(ibqp, struct mlx4_ib_qp, ibqp); return container_of(ibqp, struct mlx4_ib_qp, ibqp);
......
This diff is collapsed.
...@@ -56,10 +56,12 @@ struct ocrdma_dev_attr { ...@@ -56,10 +56,12 @@ struct ocrdma_dev_attr {
u16 max_qp; u16 max_qp;
u16 max_wqe; u16 max_wqe;
u16 max_rqe; u16 max_rqe;
u16 max_srq;
u32 max_inline_data; u32 max_inline_data;
int max_send_sge; int max_send_sge;
int max_recv_sge; int max_recv_sge;
int max_srq_sge; int max_srq_sge;
int max_rdma_sge;
int max_mr; int max_mr;
u64 max_mr_size; u64 max_mr_size;
u32 max_num_mr_pbl; u32 max_num_mr_pbl;
...@@ -130,8 +132,7 @@ struct ocrdma_dev { ...@@ -130,8 +132,7 @@ struct ocrdma_dev {
struct ocrdma_cq **cq_tbl; struct ocrdma_cq **cq_tbl;
struct ocrdma_qp **qp_tbl; struct ocrdma_qp **qp_tbl;
struct ocrdma_eq meq; struct ocrdma_eq *eq_tbl;
struct ocrdma_eq *qp_eq_tbl;
int eq_cnt; int eq_cnt;
u16 base_eqid; u16 base_eqid;
u16 max_eq; u16 max_eq;
...@@ -168,11 +169,12 @@ struct ocrdma_dev { ...@@ -168,11 +169,12 @@ struct ocrdma_dev {
struct list_head entry; struct list_head entry;
struct rcu_head rcu; struct rcu_head rcu;
int id; int id;
u64 stag_arr[OCRDMA_MAX_STAG];
u16 pvid;
}; };
struct ocrdma_cq { struct ocrdma_cq {
struct ib_cq ibcq; struct ib_cq ibcq;
struct ocrdma_dev *dev;
struct ocrdma_cqe *va; struct ocrdma_cqe *va;
u32 phase; u32 phase;
u32 getp; /* pointer to pending wrs to u32 getp; /* pointer to pending wrs to
...@@ -214,7 +216,6 @@ struct ocrdma_pd { ...@@ -214,7 +216,6 @@ struct ocrdma_pd {
struct ocrdma_ah { struct ocrdma_ah {
struct ib_ah ibah; struct ib_ah ibah;
struct ocrdma_dev *dev;
struct ocrdma_av *av; struct ocrdma_av *av;
u16 sgid_index; u16 sgid_index;
u32 id; u32 id;
...@@ -234,7 +235,6 @@ struct ocrdma_qp_hwq_info { ...@@ -234,7 +235,6 @@ struct ocrdma_qp_hwq_info {
struct ocrdma_srq { struct ocrdma_srq {
struct ib_srq ibsrq; struct ib_srq ibsrq;
struct ocrdma_dev *dev;
u8 __iomem *db; u8 __iomem *db;
struct ocrdma_qp_hwq_info rq; struct ocrdma_qp_hwq_info rq;
u64 *rqe_wr_id_tbl; u64 *rqe_wr_id_tbl;
...@@ -290,10 +290,11 @@ struct ocrdma_qp { ...@@ -290,10 +290,11 @@ struct ocrdma_qp {
u32 qkey; u32 qkey;
bool dpp_enabled; bool dpp_enabled;
u8 *ird_q_va; u8 *ird_q_va;
bool signaled;
u16 db_cache;
}; };
struct ocrdma_hw_mr { struct ocrdma_hw_mr {
struct ocrdma_dev *dev;
u32 lkey; u32 lkey;
u8 fr_mr; u8 fr_mr;
u8 remote_atomic; u8 remote_atomic;
...@@ -317,15 +318,16 @@ struct ocrdma_mr { ...@@ -317,15 +318,16 @@ struct ocrdma_mr {
struct ib_mr ibmr; struct ib_mr ibmr;
struct ib_umem *umem; struct ib_umem *umem;
struct ocrdma_hw_mr hwmr; struct ocrdma_hw_mr hwmr;
struct ocrdma_pd *pd;
}; };
struct ocrdma_ucontext { struct ocrdma_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct ocrdma_dev *dev;
struct list_head mm_head; struct list_head mm_head;
struct mutex mm_list_lock; /* protects list entries of mm type */ struct mutex mm_list_lock; /* protects list entries of mm type */
struct ocrdma_pd *cntxt_pd;
int pd_in_use;
struct { struct {
u32 *va; u32 *va;
dma_addr_t pa; dma_addr_t pa;
...@@ -386,14 +388,14 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) ...@@ -386,14 +388,14 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp)
{ {
return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY &&
qp->id < 64) ? 24 : 16); qp->id < 128) ? 24 : 16);
} }
static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
{ {
int cqe_valid; int cqe_valid;
cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
return ((cqe_valid == cq->phase) ? 1 : 0); return (cqe_valid == cq->phase);
} }
static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
......
...@@ -28,6 +28,9 @@ ...@@ -28,6 +28,9 @@
#ifndef __OCRDMA_ABI_H__ #ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__ #define __OCRDMA_ABI_H__
#define OCRDMA_ABI_VERSION 1
/* user kernel communication data structures. */
struct ocrdma_alloc_ucontext_resp { struct ocrdma_alloc_ucontext_resp {
u32 dev_id; u32 dev_id;
u32 wqe_size; u32 wqe_size;
...@@ -35,16 +38,16 @@ struct ocrdma_alloc_ucontext_resp { ...@@ -35,16 +38,16 @@ struct ocrdma_alloc_ucontext_resp {
u32 dpp_wqe_size; u32 dpp_wqe_size;
u64 ah_tbl_page; u64 ah_tbl_page;
u32 ah_tbl_len; u32 ah_tbl_len;
u32 rsvd;
u8 fw_ver[32];
u32 rqe_size; u32 rqe_size;
u8 fw_ver[32];
/* for future use/new features in progress */
u64 rsvd1; u64 rsvd1;
} __packed; u64 rsvd2;
};
/* user kernel communication data structures. */
struct ocrdma_alloc_pd_ureq { struct ocrdma_alloc_pd_ureq {
u64 rsvd1; u64 rsvd1;
} __packed; };
struct ocrdma_alloc_pd_uresp { struct ocrdma_alloc_pd_uresp {
u32 id; u32 id;
...@@ -52,12 +55,12 @@ struct ocrdma_alloc_pd_uresp { ...@@ -52,12 +55,12 @@ struct ocrdma_alloc_pd_uresp {
u32 dpp_page_addr_hi; u32 dpp_page_addr_hi;
u32 dpp_page_addr_lo; u32 dpp_page_addr_lo;
u64 rsvd1; u64 rsvd1;
} __packed; };
struct ocrdma_create_cq_ureq { struct ocrdma_create_cq_ureq {
u32 dpp_cq; u32 dpp_cq;
u32 rsvd; u32 rsvd; /* pad */
} __packed; };
#define MAX_CQ_PAGES 8 #define MAX_CQ_PAGES 8
struct ocrdma_create_cq_uresp { struct ocrdma_create_cq_uresp {
...@@ -69,9 +72,10 @@ struct ocrdma_create_cq_uresp { ...@@ -69,9 +72,10 @@ struct ocrdma_create_cq_uresp {
u64 db_page_addr; u64 db_page_addr;
u32 db_page_size; u32 db_page_size;
u32 phase_change; u32 phase_change;
/* for future use/new features in progress */
u64 rsvd1; u64 rsvd1;
u64 rsvd2; u64 rsvd2;
} __packed; };
#define MAX_QP_PAGES 8 #define MAX_QP_PAGES 8
#define MAX_UD_AV_PAGES 8 #define MAX_UD_AV_PAGES 8
...@@ -80,14 +84,14 @@ struct ocrdma_create_qp_ureq { ...@@ -80,14 +84,14 @@ struct ocrdma_create_qp_ureq {
u8 enable_dpp_cq; u8 enable_dpp_cq;
u8 rsvd; u8 rsvd;
u16 dpp_cq_id; u16 dpp_cq_id;
u32 rsvd1; u32 rsvd1; /* pad */
}; };
struct ocrdma_create_qp_uresp { struct ocrdma_create_qp_uresp {
u16 qp_id; u16 qp_id;
u16 sq_dbid; u16 sq_dbid;
u16 rq_dbid; u16 rq_dbid;
u16 resv0; u16 resv0; /* pad */
u32 sq_page_size; u32 sq_page_size;
u32 rq_page_size; u32 rq_page_size;
u32 num_sq_pages; u32 num_sq_pages;
...@@ -98,19 +102,19 @@ struct ocrdma_create_qp_uresp { ...@@ -98,19 +102,19 @@ struct ocrdma_create_qp_uresp {
u32 db_page_size; u32 db_page_size;
u32 dpp_credit; u32 dpp_credit;
u32 dpp_offset; u32 dpp_offset;
u32 rsvd1;
u32 num_wqe_allocated; u32 num_wqe_allocated;
u32 num_rqe_allocated; u32 num_rqe_allocated;
u32 db_sq_offset; u32 db_sq_offset;
u32 db_rq_offset; u32 db_rq_offset;
u32 db_shift; u32 db_shift;
u64 rsvd1;
u64 rsvd2; u64 rsvd2;
u64 rsvd3; u64 rsvd3;
} __packed; } __packed;
struct ocrdma_create_srq_uresp { struct ocrdma_create_srq_uresp {
u16 rq_dbid; u16 rq_dbid;
u16 resv0; u16 resv0; /* pad */
u32 resv1; u32 resv1;
u32 rq_page_size; u32 rq_page_size;
...@@ -126,6 +130,6 @@ struct ocrdma_create_srq_uresp { ...@@ -126,6 +130,6 @@ struct ocrdma_create_srq_uresp {
u64 rsvd2; u64 rsvd2;
u64 rsvd3; u64 rsvd3;
} __packed; };
#endif /* __OCRDMA_ABI_H__ */ #endif /* __OCRDMA_ABI_H__ */
...@@ -35,12 +35,11 @@ ...@@ -35,12 +35,11 @@
#include "ocrdma_ah.h" #include "ocrdma_ah.h"
#include "ocrdma_hw.h" #include "ocrdma_hw.h"
static inline int set_av_attr(struct ocrdma_ah *ah, static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, int pdid) struct ib_ah_attr *attr, int pdid)
{ {
int status = 0; int status = 0;
u16 vlan_tag; bool vlan_enabled = false; u16 vlan_tag; bool vlan_enabled = false;
struct ocrdma_dev *dev = ah->dev;
struct ocrdma_eth_vlan eth; struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh; struct ocrdma_grh grh;
int eth_sz; int eth_sz;
...@@ -51,6 +50,8 @@ static inline int set_av_attr(struct ocrdma_ah *ah, ...@@ -51,6 +50,8 @@ static inline int set_av_attr(struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index; ah->sgid_index = attr->grh.sgid_index;
vlan_tag = rdma_get_vlan_id(&attr->grh.dgid); vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) { if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100); eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
...@@ -92,7 +93,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) ...@@ -92,7 +93,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
int status; int status;
struct ocrdma_ah *ah; struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = pd->dev; struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
if (!(attr->ah_flags & IB_AH_GRH)) if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -100,12 +101,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) ...@@ -100,12 +101,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
ah = kzalloc(sizeof *ah, GFP_ATOMIC); ah = kzalloc(sizeof *ah, GFP_ATOMIC);
if (!ah) if (!ah)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ah->dev = pd->dev;
status = ocrdma_alloc_av(dev, ah); status = ocrdma_alloc_av(dev, ah);
if (status) if (status)
goto av_err; goto av_err;
status = set_av_attr(ah, attr, pd->id); status = set_av_attr(dev, ah, attr, pd->id);
if (status) if (status)
goto av_conf_err; goto av_conf_err;
...@@ -126,7 +126,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) ...@@ -126,7 +126,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
int ocrdma_destroy_ah(struct ib_ah *ibah) int ocrdma_destroy_ah(struct ib_ah *ibah)
{ {
struct ocrdma_ah *ah = get_ocrdma_ah(ibah); struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
ocrdma_free_av(ah->dev, ah); struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
ocrdma_free_av(dev, ah);
kfree(ah); kfree(ah);
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -78,6 +78,11 @@ static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len) ...@@ -78,6 +78,11 @@ static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
#endif #endif
} }
static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid)
{
return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size);
}
int ocrdma_init_hw(struct ocrdma_dev *); int ocrdma_init_hw(struct ocrdma_dev *);
void ocrdma_cleanup_hw(struct ocrdma_dev *); void ocrdma_cleanup_hw(struct ocrdma_dev *);
...@@ -86,6 +91,7 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed, ...@@ -86,6 +91,7 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped); bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */ /* verbs specific mailbox commands */
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *, int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config); struct ocrdma_mbx_query_config *config);
int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr); int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
...@@ -100,7 +106,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey); ...@@ -100,7 +106,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr, int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int acc); u32 pd_id, int acc);
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *, int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
int entries, int dpp_cq); int entries, int dpp_cq, u16 pd_id);
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *); int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs, int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
...@@ -112,8 +118,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, ...@@ -112,8 +118,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ocrdma_qp_params *param); struct ocrdma_qp_params *param);
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
int ocrdma_mbx_create_srq(struct ocrdma_srq *,
struct ib_srq_init_attr *, struct ib_srq_init_attr *,
struct ocrdma_pd *); struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *); int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
...@@ -123,7 +128,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *); ...@@ -123,7 +128,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *); int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *); int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state, int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state); enum ib_qp_state *old_ib_state);
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "ocrdma_ah.h" #include "ocrdma_ah.h"
#include "be_roce.h" #include "be_roce.h"
#include "ocrdma_hw.h" #include "ocrdma_hw.h"
#include "ocrdma_abi.h"
MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION); MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
MODULE_DESCRIPTION("Emulex RoCE HCA Driver"); MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
...@@ -265,6 +266,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) ...@@ -265,6 +266,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC)); sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE; dev->ibdev.owner = THIS_MODULE;
dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask = dev->ibdev.uverbs_cmd_mask =
OCRDMA_UVERBS(GET_CONTEXT) | OCRDMA_UVERBS(GET_CONTEXT) |
OCRDMA_UVERBS(QUERY_DEVICE) | OCRDMA_UVERBS(QUERY_DEVICE) |
...@@ -326,9 +328,14 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) ...@@ -326,9 +328,14 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.req_notify_cq = ocrdma_arm_cq; dev->ibdev.req_notify_cq = ocrdma_arm_cq;
dev->ibdev.get_dma_mr = ocrdma_get_dma_mr; dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
dev->ibdev.dereg_mr = ocrdma_dereg_mr; dev->ibdev.dereg_mr = ocrdma_dereg_mr;
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr; dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
/* mandatory to support user space verbs consumer. */ /* mandatory to support user space verbs consumer. */
dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
......
This diff is collapsed.
This diff is collapsed.
...@@ -72,6 +72,7 @@ int ocrdma_query_qp(struct ib_qp *, ...@@ -72,6 +72,7 @@ int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *); int qp_attr_mask, struct ib_qp_init_attr *);
int ocrdma_destroy_qp(struct ib_qp *); int ocrdma_destroy_qp(struct ib_qp *);
void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *, struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
struct ib_udata *); struct ib_udata *);
...@@ -89,5 +90,10 @@ struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *, ...@@ -89,5 +90,10 @@ struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
int num_phys_buf, int acc, u64 *iova_start); int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length, struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *); u64 virt, int acc, struct ib_udata *);
struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
*ibdev,
int page_list_len);
void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
#endif /* __OCRDMA_VERBS_H__ */ #endif /* __OCRDMA_VERBS_H__ */
...@@ -89,7 +89,6 @@ struct qlogic_ib_stats { ...@@ -89,7 +89,6 @@ struct qlogic_ib_stats {
extern struct qlogic_ib_stats qib_stats; extern struct qlogic_ib_stats qib_stats;
extern const struct pci_error_handlers qib_pci_err_handler; extern const struct pci_error_handlers qib_pci_err_handler;
extern struct pci_driver qib_driver;
#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ #define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
/* /*
...@@ -576,11 +575,13 @@ struct qib_pportdata { ...@@ -576,11 +575,13 @@ struct qib_pportdata {
/* read/write using lock */ /* read/write using lock */
spinlock_t sdma_lock ____cacheline_aligned_in_smp; spinlock_t sdma_lock ____cacheline_aligned_in_smp;
struct list_head sdma_activelist; struct list_head sdma_activelist;
struct list_head sdma_userpending;
u64 sdma_descq_added; u64 sdma_descq_added;
u64 sdma_descq_removed; u64 sdma_descq_removed;
u16 sdma_descq_tail; u16 sdma_descq_tail;
u16 sdma_descq_head; u16 sdma_descq_head;
u8 sdma_generation; u8 sdma_generation;
u8 sdma_intrequest;
struct tasklet_struct sdma_sw_clean_up_task struct tasklet_struct sdma_sw_clean_up_task
____cacheline_aligned_in_smp; ____cacheline_aligned_in_smp;
...@@ -1326,6 +1327,8 @@ int qib_setup_sdma(struct qib_pportdata *); ...@@ -1326,6 +1327,8 @@ int qib_setup_sdma(struct qib_pportdata *);
void qib_teardown_sdma(struct qib_pportdata *); void qib_teardown_sdma(struct qib_pportdata *);
void __qib_sdma_intr(struct qib_pportdata *); void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *); void qib_sdma_intr(struct qib_pportdata *);
void qib_user_sdma_send_desc(struct qib_pportdata *dd,
struct list_head *pktlist);
int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
u32, struct qib_verbs_txreq *); u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */ /* ppd->sdma_lock should be locked before calling this. */
......
...@@ -279,7 +279,7 @@ struct qib_base_info { ...@@ -279,7 +279,7 @@ struct qib_base_info {
* may not be implemented; the user code must deal with this if it * may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference. * cares, or it must abort after initialization reports the difference.
*/ */
#define QIB_USER_SWMINOR 12 #define QIB_USER_SWMINOR 13
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
...@@ -701,7 +701,37 @@ struct qib_message_header { ...@@ -701,7 +701,37 @@ struct qib_message_header {
__be32 bth[3]; __be32 bth[3];
/* fields below this point are in host byte order */ /* fields below this point are in host byte order */
struct qib_header iph; struct qib_header iph;
/* fields below are simplified, but should match PSM */
/* some are accessed by driver when packet spliting is needed */
__u8 sub_opcode; __u8 sub_opcode;
__u8 flags;
__u16 commidx;
__u32 ack_seq_num;
__u8 flowid;
__u8 hdr_dlen;
__u16 mqhdr;
__u32 uwords[4];
};
/* sequence number bits for message */
union qib_seqnum {
struct {
__u32 seq:11;
__u32 gen:8;
__u32 flow:5;
};
struct {
__u32 pkt:16;
__u32 msg:8;
};
__u32 val;
};
/* qib receiving-dma tid-session-member */
struct qib_tid_session_member {
__u16 tid;
__u16 offset;
__u16 length;
}; };
/* IB - LRH header consts */ /* IB - LRH header consts */
......
...@@ -1220,7 +1220,7 @@ static int qib_compatible_subctxts(int user_swmajor, int user_swminor) ...@@ -1220,7 +1220,7 @@ static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
return user_swminor == 3; return user_swminor == 3;
default: default:
/* >= 4 are compatible (or are expected to be) */ /* >= 4 are compatible (or are expected to be) */
return user_swminor >= 4; return user_swminor <= QIB_USER_SWMINOR;
} }
} }
/* make no promises yet for future major versions */ /* make no promises yet for future major versions */
......
...@@ -1193,7 +1193,7 @@ static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { ...@@ -1193,7 +1193,7 @@ static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qib_pci_tbl); MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
struct pci_driver qib_driver = { static struct pci_driver qib_driver = {
.name = QIB_DRV_NAME, .name = QIB_DRV_NAME,
.probe = qib_init_one, .probe = qib_init_one,
.remove = qib_remove_one, .remove = qib_remove_one,
......
...@@ -415,7 +415,6 @@ struct cc_table_shadow { ...@@ -415,7 +415,6 @@ struct cc_table_shadow {
struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX]; struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
} __packed; } __packed;
#endif /* _QIB_MAD_H */
/* /*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2. * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
...@@ -428,3 +427,5 @@ struct cc_table_shadow { ...@@ -428,3 +427,5 @@ struct cc_table_shadow {
COUNTER_MASK(1, 2) | \ COUNTER_MASK(1, 2) | \
COUNTER_MASK(1, 3) | \ COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4)) COUNTER_MASK(1, 4))
#endif /* _QIB_MAD_H */
...@@ -283,12 +283,12 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, ...@@ -283,12 +283,12 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
goto bail; goto bail;
} }
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX); pos = dd->pcidev->msix_cap;
if (nent && *nent && pos) { if (nent && *nent && pos) {
qib_msix_setup(dd, pos, nent, entry); qib_msix_setup(dd, pos, nent, entry);
ret = 0; /* did it, either MSIx or INTx */ ret = 0; /* did it, either MSIx or INTx */
} else { } else {
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); pos = dd->pcidev->msi_cap;
if (pos) if (pos)
ret = qib_msi_setup(dd, pos); ret = qib_msi_setup(dd, pos);
else else
...@@ -357,7 +357,7 @@ int qib_reinit_intr(struct qib_devdata *dd) ...@@ -357,7 +357,7 @@ int qib_reinit_intr(struct qib_devdata *dd)
if (!dd->msi_lo) if (!dd->msi_lo)
goto bail; goto bail;
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); pos = dd->pcidev->msi_cap;
if (!pos) { if (!pos) {
qib_dev_err(dd, qib_dev_err(dd,
"Can't find MSI capability, can't restore MSI settings\n"); "Can't find MSI capability, can't restore MSI settings\n");
...@@ -426,7 +426,7 @@ void qib_enable_intx(struct pci_dev *pdev) ...@@ -426,7 +426,7 @@ void qib_enable_intx(struct pci_dev *pdev)
if (new != cw) if (new != cw)
pci_write_config_word(pdev, PCI_COMMAND, new); pci_write_config_word(pdev, PCI_COMMAND, new);
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); pos = pdev->msi_cap;
if (pos) { if (pos) {
/* then turn off MSI */ /* then turn off MSI */
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw); pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
...@@ -434,7 +434,7 @@ void qib_enable_intx(struct pci_dev *pdev) ...@@ -434,7 +434,7 @@ void qib_enable_intx(struct pci_dev *pdev)
if (new != cw) if (new != cw)
pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new); pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
} }
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); pos = pdev->msix_cap;
if (pos) { if (pos) {
/* then turn off MSIx */ /* then turn off MSIx */
pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw); pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
......
...@@ -423,8 +423,11 @@ void qib_sdma_intr(struct qib_pportdata *ppd) ...@@ -423,8 +423,11 @@ void qib_sdma_intr(struct qib_pportdata *ppd)
void __qib_sdma_intr(struct qib_pportdata *ppd) void __qib_sdma_intr(struct qib_pportdata *ppd)
{ {
if (__qib_sdma_running(ppd)) if (__qib_sdma_running(ppd)) {
qib_sdma_make_progress(ppd); qib_sdma_make_progress(ppd);
if (!list_empty(&ppd->sdma_userpending))
qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
}
} }
int qib_setup_sdma(struct qib_pportdata *ppd) int qib_setup_sdma(struct qib_pportdata *ppd)
...@@ -452,6 +455,9 @@ int qib_setup_sdma(struct qib_pportdata *ppd) ...@@ -452,6 +455,9 @@ int qib_setup_sdma(struct qib_pportdata *ppd)
ppd->sdma_descq_removed = 0; ppd->sdma_descq_removed = 0;
ppd->sdma_descq_added = 0; ppd->sdma_descq_added = 0;
ppd->sdma_intrequest = 0;
INIT_LIST_HEAD(&ppd->sdma_userpending);
INIT_LIST_HEAD(&ppd->sdma_activelist); INIT_LIST_HEAD(&ppd->sdma_activelist);
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task, tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
......
This diff is collapsed.
...@@ -817,7 +817,6 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -817,7 +817,6 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (neigh) { if (neigh) {
neigh->cm = NULL; neigh->cm = NULL;
list_del(&neigh->list);
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
tx->neigh = NULL; tx->neigh = NULL;
...@@ -1234,7 +1233,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, ...@@ -1234,7 +1233,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (neigh) { if (neigh) {
neigh->cm = NULL; neigh->cm = NULL;
list_del(&neigh->list);
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
tx->neigh = NULL; tx->neigh = NULL;
...@@ -1325,7 +1323,6 @@ static void ipoib_cm_tx_start(struct work_struct *work) ...@@ -1325,7 +1323,6 @@ static void ipoib_cm_tx_start(struct work_struct *work)
neigh = p->neigh; neigh = p->neigh;
if (neigh) { if (neigh) {
neigh->cm = NULL; neigh->cm = NULL;
list_del(&neigh->list);
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
} }
list_del(&p->list); list_del(&p->list);
......
...@@ -493,7 +493,6 @@ static void path_rec_completion(int status, ...@@ -493,7 +493,6 @@ static void path_rec_completion(int status,
path, path,
neigh)); neigh));
if (!ipoib_cm_get(neigh)) { if (!ipoib_cm_get(neigh)) {
list_del(&neigh->list);
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
continue; continue;
} }
...@@ -618,7 +617,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -618,7 +617,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
if (!ipoib_cm_get(neigh)) if (!ipoib_cm_get(neigh))
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh)); ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
if (!ipoib_cm_get(neigh)) { if (!ipoib_cm_get(neigh)) {
list_del(&neigh->list);
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
goto err_drop; goto err_drop;
} }
...@@ -639,7 +637,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -639,7 +637,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
neigh->ah = NULL; neigh->ah = NULL;
if (!path->query && path_rec_start(dev, path)) if (!path->query && path_rec_start(dev, path))
goto err_list; goto err_path;
__skb_queue_tail(&neigh->queue, skb); __skb_queue_tail(&neigh->queue, skb);
} }
...@@ -648,9 +646,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, ...@@ -648,9 +646,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_put(neigh); ipoib_neigh_put(neigh);
return; return;
err_list:
list_del(&neigh->list);
err_path: err_path:
ipoib_neigh_free(neigh); ipoib_neigh_free(neigh);
err_drop: err_drop:
...@@ -1098,6 +1093,8 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh) ...@@ -1098,6 +1093,8 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
rcu_assign_pointer(*np, rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext, rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock))); lockdep_is_held(&priv->lock)));
/* remove from parent list */
list_del(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim); call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return; return;
} else { } else {
......
...@@ -347,6 +347,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -347,6 +347,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
{ {
struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn; struct iscsi_iser_conn *iser_conn;
struct iscsi_session *session;
struct iser_conn *ib_conn; struct iser_conn *ib_conn;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
int error; int error;
...@@ -365,7 +366,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -365,7 +366,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
} }
ib_conn = ep->dd_data; ib_conn = ep->dd_data;
if (iser_alloc_rx_descriptors(ib_conn)) session = conn->session;
if (iser_alloc_rx_descriptors(ib_conn, session))
return -ENOMEM; return -ENOMEM;
/* binds the iSER connection retrieved from the previously /* binds the iSER connection retrieved from the previously
...@@ -419,12 +421,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -419,12 +421,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_cls_session *cls_session; struct iscsi_cls_session *cls_session;
struct iscsi_session *session; struct iscsi_session *session;
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct iser_conn *ib_conn; struct iser_conn *ib_conn = NULL;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost) if (!shost)
return NULL; return NULL;
shost->transportt = iscsi_iser_scsi_transport; shost->transportt = iscsi_iser_scsi_transport;
shost->cmd_per_lun = qdepth;
shost->max_lun = iscsi_max_lun; shost->max_lun = iscsi_max_lun;
shost->max_id = 0; shost->max_id = 0;
shost->max_channel = 0; shost->max_channel = 0;
...@@ -441,12 +444,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -441,12 +444,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
ep ? ib_conn->device->ib_device->dma_device : NULL)) ep ? ib_conn->device->ib_device->dma_device : NULL))
goto free_host; goto free_host;
/* if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
* we do not support setting can_queue cmd_per_lun from userspace yet iser_info("cmds_max changed from %u to %u\n",
* because we preallocate so many resources cmds_max, ISER_DEF_XMIT_CMDS_MAX);
*/ cmds_max = ISER_DEF_XMIT_CMDS_MAX;
}
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost, cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
ISCSI_DEF_XMIT_CMDS_MAX, 0, cmds_max, 0,
sizeof(struct iscsi_iser_task), sizeof(struct iscsi_iser_task),
initial_cmdsn, 0); initial_cmdsn, 0);
if (!cls_session) if (!cls_session)
......
...@@ -78,14 +78,14 @@ ...@@ -78,14 +78,14 @@
#define iser_warn(fmt, arg...) \ #define iser_warn(fmt, arg...) \
do { \ do { \
if (iser_debug_level > 1) \ if (iser_debug_level > 0) \
pr_warn(PFX "%s:" fmt, \ pr_warn(PFX "%s:" fmt, \
__func__ , ## arg); \ __func__ , ## arg); \
} while (0) } while (0)
#define iser_info(fmt, arg...) \ #define iser_info(fmt, arg...) \
do { \ do { \
if (iser_debug_level > 0) \ if (iser_debug_level > 1) \
pr_info(PFX "%s:" fmt, \ pr_info(PFX "%s:" fmt, \
__func__ , ## arg); \ __func__ , ## arg); \
} while (0) } while (0)
...@@ -102,7 +102,13 @@ ...@@ -102,7 +102,13 @@
/* support up to 512KB in one RDMA */ /* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) #define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
#define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX #define ISER_DEF_XMIT_CMDS_DEFAULT 512
#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
#define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
#else
#define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
#endif
#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
/* QP settings */ /* QP settings */
/* Maximal bounds on received asynchronous PDUs */ /* Maximal bounds on received asynchronous PDUs */
...@@ -111,9 +117,9 @@ ...@@ -111,9 +117,9 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */ * SCSI_TMFUNC(2), LOGOUT(1) */
#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX) #define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2) #define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by * /* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
...@@ -123,7 +129,7 @@ ...@@ -123,7 +129,7 @@
#define ISER_INFLIGHT_DATAOUTS 8 #define ISER_INFLIGHT_DATAOUTS 8
#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \ #define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \ (1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \ ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS) ISER_MAX_RX_MISC_PDUS)
...@@ -205,7 +211,7 @@ struct iser_mem_reg { ...@@ -205,7 +211,7 @@ struct iser_mem_reg {
u64 va; u64 va;
u64 len; u64 len;
void *mem_h; void *mem_h;
int is_fmr; int is_mr;
}; };
struct iser_regd_buf { struct iser_regd_buf {
...@@ -246,6 +252,9 @@ struct iser_rx_desc { ...@@ -246,6 +252,9 @@ struct iser_rx_desc {
#define ISER_MAX_CQ 4 #define ISER_MAX_CQ 4
struct iser_conn;
struct iscsi_iser_task;
struct iser_device { struct iser_device {
struct ib_device *ib_device; struct ib_device *ib_device;
struct ib_pd *pd; struct ib_pd *pd;
...@@ -259,6 +268,22 @@ struct iser_device { ...@@ -259,6 +268,22 @@ struct iser_device {
int cq_active_qps[ISER_MAX_CQ]; int cq_active_qps[ISER_MAX_CQ];
int cqs_used; int cqs_used;
struct iser_cq_desc *cq_desc; struct iser_cq_desc *cq_desc;
int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn,
unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn);
int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
};
struct fast_reg_descriptor {
struct list_head list;
/* For fast registration - FRWR */
struct ib_mr *data_mr;
struct ib_fast_reg_page_list *data_frpl;
/* Valid for fast registration flag */
bool valid;
}; };
struct iser_conn { struct iser_conn {
...@@ -270,13 +295,13 @@ struct iser_conn { ...@@ -270,13 +295,13 @@ struct iser_conn {
struct iser_device *device; /* device context */ struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */ struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */ struct ib_qp *qp; /* QP */
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
wait_queue_head_t wait; /* waitq for conn/disconn */ wait_queue_head_t wait; /* waitq for conn/disconn */
unsigned qp_max_recv_dtos; /* num of rx buffers */
unsigned qp_max_recv_dtos_mask; /* above minus 1 */
unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
int post_recv_buf_count; /* posted rx count */ int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */ atomic_t post_send_buf_count; /* posted tx count */
char name[ISER_OBJECT_NAME_SIZE]; char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
struct list_head conn_list; /* entry in ig conn list */ struct list_head conn_list; /* entry in ig conn list */
char *login_buf; char *login_buf;
...@@ -285,6 +310,17 @@ struct iser_conn { ...@@ -285,6 +310,17 @@ struct iser_conn {
unsigned int rx_desc_head; unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs; struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
union {
struct {
struct ib_fmr_pool *pool; /* pool of IB FMRs */
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
} fmr;
struct {
struct list_head pool;
int pool_size;
} frwr;
} fastreg;
}; };
struct iscsi_iser_conn { struct iscsi_iser_conn {
...@@ -368,8 +404,10 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn); ...@@ -368,8 +404,10 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem(struct iscsi_iser_task *task, int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn, int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr, struct sockaddr_in *src_addr,
...@@ -380,7 +418,10 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, ...@@ -380,7 +418,10 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
struct iser_page_vec *page_vec, struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg); struct iser_mem_reg *mem_reg);
void iser_unreg_mem(struct iser_mem_reg *mem_reg); void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *ib_conn); int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count); int iser_post_recvm(struct iser_conn *ib_conn, int count);
...@@ -394,5 +435,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, ...@@ -394,5 +435,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
int iser_initialize_task_headers(struct iscsi_task *task, int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc); struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session);
int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
void iser_free_fmr_pool(struct iser_conn *ib_conn);
int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
void iser_free_frwr_pool(struct iser_conn *ib_conn);
#endif #endif
This diff is collapsed.
This diff is collapsed.
...@@ -576,6 +576,7 @@ struct adapter { ...@@ -576,6 +576,7 @@ struct adapter {
struct l2t_data *l2t; struct l2t_data *l2t;
void *uld_handle[CXGB4_ULD_MAX]; void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node; struct list_head list_node;
struct list_head rcu_node;
struct tid_info tids; struct tid_info tids;
void **tid_release_head; void **tid_release_head;
......
...@@ -154,6 +154,11 @@ struct in6_addr; ...@@ -154,6 +154,11 @@ struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid, int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan, __be32 sip, __be16 sport, __be16 vlan,
unsigned int queue); unsigned int queue);
int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
const struct in6_addr *sip, __be16 sport,
unsigned int queue);
int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan, __be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned int queue,
......
...@@ -320,6 +320,21 @@ struct cpl_act_open_req6 { ...@@ -320,6 +320,21 @@ struct cpl_act_open_req6 {
__be32 opt2; __be32 opt2;
}; };
struct cpl_t5_act_open_req6 {
WR_HDR;
union opcode_tid ot;
__be16 local_port;
__be16 peer_port;
__be64 local_ip_hi;
__be64 local_ip_lo;
__be64 peer_ip_hi;
__be64 peer_ip_lo;
__be64 opt0;
__be32 rsvd;
__be32 opt2;
__be64 params;
};
struct cpl_act_open_rpl { struct cpl_act_open_rpl {
union opcode_tid ot; union opcode_tid ot;
__be32 atid_status; __be32 atid_status;
...@@ -405,7 +420,7 @@ struct cpl_close_listsvr_req { ...@@ -405,7 +420,7 @@ struct cpl_close_listsvr_req {
WR_HDR; WR_HDR;
union opcode_tid ot; union opcode_tid ot;
__be16 reply_ctrl; __be16 reply_ctrl;
#define LISTSVR_IPV6 (1 << 14) #define LISTSVR_IPV6(x) ((x) << 14)
__be16 rsvd; __be16 rsvd;
}; };
......
...@@ -616,6 +616,7 @@ enum fw_cmd_opcodes { ...@@ -616,6 +616,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20, FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23, FW_RSS_VI_CONFIG_CMD = 0x23,
FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40, FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80, FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81, FW_DEBUG_CMD = 0x81,
...@@ -2062,6 +2063,28 @@ struct fw_rss_vi_config_cmd { ...@@ -2062,6 +2063,28 @@ struct fw_rss_vi_config_cmd {
} u; } u;
}; };
struct fw_clip_cmd {
__be32 op_to_write;
__be32 alloc_to_len16;
__be64 ip_hi;
__be64 ip_lo;
__be32 r4[2];
};
#define S_FW_CLIP_CMD_ALLOC 31
#define M_FW_CLIP_CMD_ALLOC 0x1
#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
#define G_FW_CLIP_CMD_ALLOC(x) \
(((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
#define S_FW_CLIP_CMD_FREE 30
#define M_FW_CLIP_CMD_FREE 0x1
#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
#define G_FW_CLIP_CMD_FREE(x) \
(((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
enum fw_error_type { enum fw_error_type {
FW_ERROR_TYPE_EXCEPTION = 0x0, FW_ERROR_TYPE_EXCEPTION = 0x0,
FW_ERROR_TYPE_HWMODULE = 0x1, FW_ERROR_TYPE_HWMODULE = 0x1,
......
...@@ -1909,7 +1909,8 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc) ...@@ -1909,7 +1909,8 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int log_rq_stride = qpc->rq_size_stride & 7; int log_rq_stride = qpc->rq_size_stride & 7;
int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1; u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
int sq_size; int sq_size;
int rq_size; int rq_size;
int total_pages; int total_pages;
......
...@@ -1052,11 +1052,6 @@ struct _rule_hw { ...@@ -1052,11 +1052,6 @@ struct _rule_hw {
}; };
}; };
/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
struct mlx4_flow_handle {
u64 reg_id[2];
};
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode); enum mlx4_net_trans_promisc_mode mode);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment