Commit ea9627c8 authored by Roland Dreier's avatar Roland Dreier

Merge branches 'cxgb4', 'ipoib', 'iser', 'misc', 'mlx4', 'qib' and 'srp' into for-next

...@@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work) ...@@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work)
} }
return; return;
} }
if (empty)
return;
spin_lock_irqsave(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm_id_priv->lock, flags);
} }
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
......
...@@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) ...@@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
struct ib_qp *qp = context; struct ib_qp *qp = context;
list_for_each_entry(event->element.qp, &qp->open_list, open_list) list_for_each_entry(event->element.qp, &qp->open_list, open_list)
event->element.qp->event_handler(event, event->element.qp->qp_context); if (event->element.qp->event_handler)
event->element.qp->event_handler(event, event->element.qp->qp_context);
} }
static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
......
...@@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo, ...@@ -62,13 +62,13 @@ static int __cxio_init_resource_fifo(struct kfifo *fifo,
kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
if (random) { if (random) {
j = 0; j = 0;
random_bytes = random32(); random_bytes = prandom_u32();
for (i = 0; i < RANDOM_SIZE; i++) for (i = 0; i < RANDOM_SIZE; i++)
rarray[i] = i + skip_low; rarray[i] = i + skip_low;
for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
if (j >= RANDOM_SIZE) { if (j >= RANDOM_SIZE) {
j = 0; j = 0;
random_bytes = random32(); random_bytes = prandom_u32();
} }
idx = (random_bytes >> (j * 2)) & 0xF; idx = (random_bytes >> (j * 2)) & 0xF;
kfifo_in(fifo, kfifo_in(fifo,
......
...@@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, ...@@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
__be64 *page_list = NULL; __be64 *page_list = NULL;
int shift = 0; int shift = 0;
u64 total_size; u64 total_size;
int npages; int npages = 0;
int ret; int ret;
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
......
...@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) ...@@ -54,7 +54,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
if (obj < alloc->max) { if (obj < alloc->max) {
if (alloc->flags & C4IW_ID_TABLE_F_RANDOM) if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last += random32() % RANDOM_SKIP; alloc->last += prandom_u32() % RANDOM_SKIP;
else else
alloc->last = obj + 1; alloc->last = obj + 1;
if (alloc->last >= alloc->max) if (alloc->last >= alloc->max)
...@@ -88,7 +88,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, ...@@ -88,7 +88,7 @@ int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
alloc->start = start; alloc->start = start;
alloc->flags = flags; alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM) if (flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last = random32() % RANDOM_SKIP; alloc->last = prandom_u32() % RANDOM_SKIP;
else else
alloc->last = 0; alloc->last = 0;
alloc->max = num; alloc->max = num;
......
...@@ -2187,7 +2187,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd) ...@@ -2187,7 +2187,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
if (ret) if (ret)
goto err_reg; goto err_reg;
if (ipath_verbs_register_sysfs(dev)) ret = ipath_verbs_register_sysfs(dev);
if (ret)
goto err_class; goto err_class;
enable_timer(dd); enable_timer(dd);
...@@ -2327,15 +2328,15 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev) ...@@ -2327,15 +2328,15 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev)
int i; int i;
int ret; int ret;
for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
if (device_create_file(&dev->dev, ret = device_create_file(&dev->dev,
ipath_class_attributes[i])) { ipath_class_attributes[i]);
ret = 1; if (ret)
goto bail; goto bail;
} }
return 0;
ret = 0;
bail: bail:
for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
device_remove_file(&dev->dev, ipath_class_attributes[i]);
return ret; return ret;
} }
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/mlx4/cq.h> #include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h> #include <linux/mlx4/qp.h>
#include <linux/mlx4/srq.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "mlx4_ib.h" #include "mlx4_ib.h"
...@@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_qp *mqp; struct mlx4_qp *mqp;
struct mlx4_ib_wq *wq; struct mlx4_ib_wq *wq;
struct mlx4_ib_srq *srq; struct mlx4_ib_srq *srq;
struct mlx4_srq *msrq = NULL;
int is_send; int is_send;
int is_error; int is_error;
u32 g_mlpath_rqpn; u32 g_mlpath_rqpn;
...@@ -653,6 +655,20 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -653,6 +655,20 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
wc->qp = &(*cur_qp)->ibqp; wc->qp = &(*cur_qp)->ibqp;
if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
u32 srq_num;
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
srq_num = g_mlpath_rqpn & 0xffffff;
/* SRQ is also in the radix tree */
msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
srq_num);
if (unlikely(!msrq)) {
pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
cq->mcq.cqn, srq_num);
return -EINVAL;
}
}
if (is_send) { if (is_send) {
wq = &(*cur_qp)->sq; wq = &(*cur_qp)->sq;
if (!(*cur_qp)->sq_signal_bits) { if (!(*cur_qp)->sq_signal_bits) {
...@@ -666,6 +682,11 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -666,6 +682,11 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
wqe_ctr = be16_to_cpu(cqe->wqe_index); wqe_ctr = be16_to_cpu(cqe->wqe_index);
wc->wr_id = srq->wrid[wqe_ctr]; wc->wr_id = srq->wrid[wqe_ctr];
mlx4_ib_free_srq_wqe(srq, wqe_ctr); mlx4_ib_free_srq_wqe(srq, wqe_ctr);
} else if (msrq) {
srq = to_mibsrq(msrq);
wqe_ctr = be16_to_cpu(cqe->wqe_index);
wc->wr_id = srq->wrid[wqe_ctr];
mlx4_ib_free_srq_wqe(srq, wqe_ctr);
} else { } else {
wq = &(*cur_qp)->rq; wq = &(*cur_qp)->rq;
tail = wq->tail & (wq->wqe_cnt - 1); tail = wq->tail & (wq->wqe_cnt - 1);
......
...@@ -93,7 +93,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num, ...@@ -93,7 +93,7 @@ static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
__be64 mlx4_ib_gen_node_guid(void) __be64 mlx4_ib_gen_node_guid(void)
{ {
#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40)) #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
return cpu_to_be64(NODE_GUID_HI | random32()); return cpu_to_be64(NODE_GUID_HI | prandom_u32());
} }
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
......
...@@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
context->xrcd = cpu_to_be32((u32) qp->xrcdn); context->xrcd = cpu_to_be32((u32) qp->xrcdn);
if (ibqp->qp_type == IB_QPT_RAW_PACKET)
context->param3 |= cpu_to_be32(1 << 30);
} }
if (qp->ibqp.uobject) if (qp->ibqp.uobject)
...@@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
} }
} }
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
sqd_event = 1; sqd_event = 1;
......
...@@ -808,10 +808,14 @@ int qib_verbs_register_sysfs(struct qib_devdata *dd) ...@@ -808,10 +808,14 @@ int qib_verbs_register_sysfs(struct qib_devdata *dd)
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
ret = device_create_file(&dev->dev, qib_attributes[i]); ret = device_create_file(&dev->dev, qib_attributes[i]);
if (ret) if (ret)
return ret; goto bail;
} }
return 0; return 0;
bail:
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
device_remove_file(&dev->dev, qib_attributes[i]);
return ret;
} }
/* /*
......
...@@ -2234,7 +2234,8 @@ int qib_register_ib_device(struct qib_devdata *dd) ...@@ -2234,7 +2234,8 @@ int qib_register_ib_device(struct qib_devdata *dd)
if (ret) if (ret)
goto err_agents; goto err_agents;
if (qib_verbs_register_sysfs(dd)) ret = qib_verbs_register_sysfs(dd);
if (ret)
goto err_class; goto err_class;
goto bail; goto bail;
......
...@@ -460,7 +460,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even ...@@ -460,7 +460,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
goto err_qp; goto err_qp;
} }
psn = random32() & 0xffffff; psn = prandom_u32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret) if (ret)
goto err_modify; goto err_modify;
......
...@@ -828,7 +828,7 @@ static int ipoib_hard_header(struct sk_buff *skb, ...@@ -828,7 +828,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
*/ */
memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
return 0; return sizeof *header;
} }
static void ipoib_set_mcast_list(struct net_device *dev) static void ipoib_set_mcast_list(struct net_device *dev)
......
...@@ -82,10 +82,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); ...@@ -82,10 +82,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
int iser_debug_level = 0; int iser_debug_level = 0;
MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover " MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
"v" DRV_VER " (" DRV_DATE ")");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
MODULE_VERSION(DRV_VER);
module_param_named(debug_level, iser_debug_level, int, 0644); module_param_named(debug_level, iser_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
...@@ -370,8 +370,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, ...@@ -370,8 +370,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
/* binds the iSER connection retrieved from the previously /* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges * connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */ * connection pointers */
iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n", iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n",
conn, conn->dd_data, ib_conn); conn, conn->dd_data, ib_conn);
iser_conn = conn->dd_data; iser_conn = conn->dd_data;
ib_conn->iser_conn = iser_conn; ib_conn->iser_conn = iser_conn;
iser_conn->ib_conn = ib_conn; iser_conn->ib_conn = ib_conn;
...@@ -475,28 +475,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, ...@@ -475,28 +475,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_HDRDGST_EN: case ISCSI_PARAM_HDRDGST_EN:
sscanf(buf, "%d", &value); sscanf(buf, "%d", &value);
if (value) { if (value) {
printk(KERN_ERR "DataDigest wasn't negotiated to None"); iser_err("DataDigest wasn't negotiated to None");
return -EPROTO; return -EPROTO;
} }
break; break;
case ISCSI_PARAM_DATADGST_EN: case ISCSI_PARAM_DATADGST_EN:
sscanf(buf, "%d", &value); sscanf(buf, "%d", &value);
if (value) { if (value) {
printk(KERN_ERR "DataDigest wasn't negotiated to None"); iser_err("DataDigest wasn't negotiated to None");
return -EPROTO; return -EPROTO;
} }
break; break;
case ISCSI_PARAM_IFMARKER_EN: case ISCSI_PARAM_IFMARKER_EN:
sscanf(buf, "%d", &value); sscanf(buf, "%d", &value);
if (value) { if (value) {
printk(KERN_ERR "IFMarker wasn't negotiated to No"); iser_err("IFMarker wasn't negotiated to No");
return -EPROTO; return -EPROTO;
} }
break; break;
case ISCSI_PARAM_OFMARKER_EN: case ISCSI_PARAM_OFMARKER_EN:
sscanf(buf, "%d", &value); sscanf(buf, "%d", &value);
if (value) { if (value) {
printk(KERN_ERR "OFMarker wasn't negotiated to No"); iser_err("OFMarker wasn't negotiated to No");
return -EPROTO; return -EPROTO;
} }
break; break;
...@@ -596,7 +596,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) ...@@ -596,7 +596,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
ib_conn->state == ISER_CONN_DOWN)) ib_conn->state == ISER_CONN_DOWN))
rc = -1; rc = -1;
iser_err("ib conn %p rc = %d\n", ib_conn, rc); iser_info("ib conn %p rc = %d\n", ib_conn, rc);
if (rc > 0) if (rc > 0)
return 1; /* success, this is the equivalent of POLLOUT */ return 1; /* success, this is the equivalent of POLLOUT */
...@@ -623,7 +623,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) ...@@ -623,7 +623,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn);
iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn); iser_conn_terminate(ib_conn);
} }
...@@ -682,7 +682,7 @@ static umode_t iser_attr_is_visible(int param_type, int param) ...@@ -682,7 +682,7 @@ static umode_t iser_attr_is_visible(int param_type, int param)
static struct scsi_host_template iscsi_iser_sht = { static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = "iSCSI Initiator over iSER, v." DRV_VER, .name = "iSCSI Initiator over iSER",
.queuecommand = iscsi_queuecommand, .queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth, .change_queue_depth = iscsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_SG_TABLESIZE, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE,
...@@ -740,7 +740,7 @@ static int __init iser_init(void) ...@@ -740,7 +740,7 @@ static int __init iser_init(void)
iser_dbg("Starting iSER datamover...\n"); iser_dbg("Starting iSER datamover...\n");
if (iscsi_max_lun < 1) { if (iscsi_max_lun < 1) {
printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun); iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
return -EINVAL; return -EINVAL;
} }
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/printk.h>
#include <scsi/libiscsi.h> #include <scsi/libiscsi.h>
#include <scsi/scsi_transport_iscsi.h> #include <scsi/scsi_transport_iscsi.h>
...@@ -65,20 +66,26 @@ ...@@ -65,20 +66,26 @@
#define DRV_NAME "iser" #define DRV_NAME "iser"
#define PFX DRV_NAME ": " #define PFX DRV_NAME ": "
#define DRV_VER "0.1" #define DRV_VER "1.1"
#define DRV_DATE "May 7th, 2006"
#define iser_dbg(fmt, arg...) \ #define iser_dbg(fmt, arg...) \
do { \ do { \
if (iser_debug_level > 1) \ if (iser_debug_level > 2) \
printk(KERN_DEBUG PFX "%s:" fmt,\ printk(KERN_DEBUG PFX "%s:" fmt,\
__func__ , ## arg); \ __func__ , ## arg); \
} while (0) } while (0)
#define iser_warn(fmt, arg...) \ #define iser_warn(fmt, arg...) \
do { \
if (iser_debug_level > 1) \
pr_warn(PFX "%s:" fmt, \
__func__ , ## arg); \
} while (0)
#define iser_info(fmt, arg...) \
do { \ do { \
if (iser_debug_level > 0) \ if (iser_debug_level > 0) \
printk(KERN_DEBUG PFX "%s:" fmt,\ pr_info(PFX "%s:" fmt, \
__func__ , ## arg); \ __func__ , ## arg); \
} while (0) } while (0)
...@@ -133,6 +140,15 @@ struct iser_hdr { ...@@ -133,6 +140,15 @@ struct iser_hdr {
__be64 read_va; __be64 read_va;
} __attribute__((packed)); } __attribute__((packed));
#define ISER_ZBVA_NOT_SUPPORTED 0x80
#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40
struct iser_cm_hdr {
u8 flags;
u8 rsvd[3];
} __packed;
/* Constant PDU lengths calculations */ /* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
......
...@@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, ...@@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
for (i=0 ; i<ib_conn->page_vec->length ; i++) for (i=0 ; i<ib_conn->page_vec->length ; i++)
iser_err("page_vec[%d] = 0x%llx\n", i, iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long) ib_conn->page_vec->pages[i]); (unsigned long long) ib_conn->page_vec->pages[i]);
return err;
} }
if (err)
return err;
} }
return 0; return 0;
} }
...@@ -74,8 +74,9 @@ static int iser_create_device_ib_res(struct iser_device *device) ...@@ -74,8 +74,9 @@ static int iser_create_device_ib_res(struct iser_device *device)
struct iser_cq_desc *cq_desc; struct iser_cq_desc *cq_desc;
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used, iser_info("using %d CQs, device %s supports %d vectors\n",
device->ib_device->name, device->ib_device->num_comp_vectors); device->cqs_used, device->ib_device->name,
device->ib_device->num_comp_vectors);
device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
GFP_KERNEL); GFP_KERNEL);
...@@ -262,7 +263,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -262,7 +263,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
min_index = index; min_index = index;
device->cq_active_qps[min_index]++; device->cq_active_qps[min_index]++;
mutex_unlock(&ig.connlist_mutex); mutex_unlock(&ig.connlist_mutex);
iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn); iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
init_attr.event_handler = iser_qp_event_callback; init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn; init_attr.qp_context = (void *)ib_conn;
...@@ -280,9 +281,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -280,9 +281,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
goto out_err; goto out_err;
ib_conn->qp = ib_conn->cma_id->qp; ib_conn->qp = ib_conn->cma_id->qp;
iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
ib_conn, ib_conn->cma_id, ib_conn, ib_conn->cma_id,
ib_conn->fmr_pool, ib_conn->cma_id->qp); ib_conn->fmr_pool, ib_conn->cma_id->qp);
return ret; return ret;
out_err: out_err:
...@@ -299,9 +300,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) ...@@ -299,9 +300,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
int cq_index; int cq_index;
BUG_ON(ib_conn == NULL); BUG_ON(ib_conn == NULL);
iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n",
ib_conn, ib_conn->cma_id, ib_conn, ib_conn->cma_id,
ib_conn->fmr_pool, ib_conn->qp); ib_conn->fmr_pool, ib_conn->qp);
/* qp is created only once both addr & route are resolved */ /* qp is created only once both addr & route are resolved */
if (ib_conn->fmr_pool != NULL) if (ib_conn->fmr_pool != NULL)
...@@ -379,7 +380,7 @@ static void iser_device_try_release(struct iser_device *device) ...@@ -379,7 +380,7 @@ static void iser_device_try_release(struct iser_device *device)
{ {
mutex_lock(&ig.device_list_mutex); mutex_lock(&ig.device_list_mutex);
device->refcount--; device->refcount--;
iser_err("device %p refcount %d\n",device,device->refcount); iser_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) { if (!device->refcount) {
iser_free_device_ib_res(device); iser_free_device_ib_res(device);
list_del(&device->ig_list); list_del(&device->ig_list);
...@@ -498,6 +499,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) ...@@ -498,6 +499,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
{ {
struct rdma_conn_param conn_param; struct rdma_conn_param conn_param;
int ret; int ret;
struct iser_cm_hdr req_hdr;
ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
if (ret) if (ret)
...@@ -509,6 +511,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) ...@@ -509,6 +511,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id)
conn_param.retry_count = 7; conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6; conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
ISER_SEND_W_INV_NOT_SUPPORTED);
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
ret = rdma_connect(cma_id, &conn_param); ret = rdma_connect(cma_id, &conn_param);
if (ret) { if (ret) {
iser_err("failure connecting: %d\n", ret); iser_err("failure connecting: %d\n", ret);
...@@ -558,8 +566,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve ...@@ -558,8 +566,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
{ {
int ret = 0; int ret = 0;
iser_err("event %d status %d conn %p id %p\n", iser_info("event %d status %d conn %p id %p\n",
event->event, event->status, cma_id->context, cma_id); event->event, event->status, cma_id->context, cma_id);
switch (event->event) { switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED: case RDMA_CM_EVENT_ADDR_RESOLVED:
...@@ -619,8 +627,8 @@ int iser_connect(struct iser_conn *ib_conn, ...@@ -619,8 +627,8 @@ int iser_connect(struct iser_conn *ib_conn,
/* the device is known only --after-- address resolution */ /* the device is known only --after-- address resolution */
ib_conn->device = NULL; ib_conn->device = NULL;
iser_err("connecting to: %pI4, port 0x%x\n", iser_info("connecting to: %pI4, port 0x%x\n",
&dst_addr->sin_addr, dst_addr->sin_port); &dst_addr->sin_addr, dst_addr->sin_port);
ib_conn->state = ISER_CONN_PENDING; ib_conn->state = ISER_CONN_PENDING;
......
...@@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) ...@@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
break; break;
default: default:
WARN_ON("ERROR: unexpected command state"); WARN(1, "Unexpected command state (%d)", state);
break; break;
} }
......
...@@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, ...@@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
.queue_mode = MLX4_NET_TRANS_Q_FIFO, .queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0, .exclusive = 0,
.allow_loopback = 1, .allow_loopback = 1,
.promisc_mode = MLX4_FS_PROMISC_NONE, .promisc_mode = MLX4_FS_REGULAR,
}; };
rule.port = priv->port; rule.port = priv->port;
......
...@@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work) ...@@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
.queue_mode = MLX4_NET_TRANS_Q_LIFO, .queue_mode = MLX4_NET_TRANS_Q_LIFO,
.exclusive = 1, .exclusive = 1,
.allow_loopback = 1, .allow_loopback = 1,
.promisc_mode = MLX4_FS_PROMISC_NONE, .promisc_mode = MLX4_FS_REGULAR,
.port = priv->port, .port = priv->port,
.priority = MLX4_DOMAIN_RFS, .priority = MLX4_DOMAIN_RFS,
}; };
...@@ -446,7 +446,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, ...@@ -446,7 +446,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
.queue_mode = MLX4_NET_TRANS_Q_FIFO, .queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0, .exclusive = 0,
.allow_loopback = 1, .allow_loopback = 1,
.promisc_mode = MLX4_FS_PROMISC_NONE, .promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC, .priority = MLX4_DOMAIN_NIC,
}; };
...@@ -793,7 +793,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, ...@@ -793,7 +793,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
err = mlx4_flow_steer_promisc_add(mdev->dev, err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port, priv->port,
priv->base_qpn, priv->base_qpn,
MLX4_FS_PROMISC_UPLINK); MLX4_FS_ALL_DEFAULT);
if (err) if (err)
en_err(priv, "Failed enabling promiscuous mode\n"); en_err(priv, "Failed enabling promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC; priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
...@@ -856,7 +856,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, ...@@ -856,7 +856,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
case MLX4_STEERING_MODE_DEVICE_MANAGED: case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev, err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port, priv->port,
MLX4_FS_PROMISC_UPLINK); MLX4_FS_ALL_DEFAULT);
if (err) if (err)
en_err(priv, "Failed disabling promiscuous mode\n"); en_err(priv, "Failed disabling promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
...@@ -917,7 +917,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, ...@@ -917,7 +917,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
err = mlx4_flow_steer_promisc_add(mdev->dev, err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port, priv->port,
priv->base_qpn, priv->base_qpn,
MLX4_FS_PROMISC_ALL_MULTI); MLX4_FS_MC_DEFAULT);
break; break;
case MLX4_STEERING_MODE_B0: case MLX4_STEERING_MODE_B0:
...@@ -940,7 +940,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, ...@@ -940,7 +940,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
case MLX4_STEERING_MODE_DEVICE_MANAGED: case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev, err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port, priv->port,
MLX4_FS_PROMISC_ALL_MULTI); MLX4_FS_MC_DEFAULT);
break; break;
case MLX4_STEERING_MODE_B0: case MLX4_STEERING_MODE_B0:
...@@ -1598,10 +1598,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1598,10 +1598,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
MLX4_EN_FLAG_MC_PROMISC); MLX4_EN_FLAG_MC_PROMISC);
mlx4_flow_steer_promisc_remove(mdev->dev, mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port, priv->port,
MLX4_FS_PROMISC_UPLINK); MLX4_FS_ALL_DEFAULT);
mlx4_flow_steer_promisc_remove(mdev->dev, mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port, priv->port,
MLX4_FS_PROMISC_ALL_MULTI); MLX4_FS_MC_DEFAULT);
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) { } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
priv->flags &= ~MLX4_EN_FLAG_PROMISC; priv->flags &= ~MLX4_EN_FLAG_PROMISC;
......
...@@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break; break;
case MLX4_EVENT_TYPE_SRQ_LIMIT: case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
__func__); __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) { if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */ /* forward only to slave owning the SRQ */
......
...@@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port, ...@@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err; return err;
} }
static const u8 __promisc_mode[] = {
[MLX4_FS_REGULAR] = 0x0,
[MLX4_FS_ALL_DEFAULT] = 0x1,
[MLX4_FS_MC_DEFAULT] = 0x3,
[MLX4_FS_UC_SNIFFER] = 0x4,
[MLX4_FS_MC_SNIFFER] = 0x5,
};
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type)
{
if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
return -EINVAL;
}
return __promisc_mode[flow_type];
}
EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
struct mlx4_net_trans_rule_hw_ctrl *hw) struct mlx4_net_trans_rule_hw_ctrl *hw)
{ {
static const u8 __promisc_mode[] = { u8 flags = 0;
[MLX4_FS_PROMISC_NONE] = 0x0,
[MLX4_FS_PROMISC_UPLINK] = 0x1, flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
[MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, flags |= ctrl->exclusive ? (1 << 2) : 0;
[MLX4_FS_PROMISC_ALL_MULTI] = 0x3, flags |= ctrl->allow_loopback ? (1 << 3) : 0;
};
hw->flags = flags;
u32 dw = 0; hw->type = __promisc_mode[ctrl->promisc_mode];
hw->prio = cpu_to_be16(ctrl->priority);
dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
dw |= ctrl->exclusive ? (1 << 2) : 0;
dw |= ctrl->allow_loopback ? (1 << 3) : 0;
dw |= __promisc_mode[ctrl->promisc_mode] << 8;
dw |= ctrl->priority << 16;
hw->ctrl = cpu_to_be32(dw);
hw->port = ctrl->port; hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn); hw->qpn = cpu_to_be32(ctrl->qpn);
} }
...@@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = { ...@@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
}; };
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
return __sw_id_hw[id];
}
EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_ETH] =
sizeof(struct mlx4_net_trans_rule_hw_eth),
[MLX4_NET_TRANS_RULE_ID_IB] =
sizeof(struct mlx4_net_trans_rule_hw_ib),
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
[MLX4_NET_TRANS_RULE_ID_IPV4] =
sizeof(struct mlx4_net_trans_rule_hw_ipv4),
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
};
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
return __rule_hw_sz[id];
}
EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
struct _rule_hw *rule_hw) struct _rule_hw *rule_hw)
{ {
static const size_t __rule_hw_sz[] = { if (mlx4_hw_rule_sz(dev, spec->id) < 0)
[MLX4_NET_TRANS_RULE_ID_ETH] =
sizeof(struct mlx4_net_trans_rule_hw_eth),
[MLX4_NET_TRANS_RULE_ID_IB] =
sizeof(struct mlx4_net_trans_rule_hw_ib),
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
[MLX4_NET_TRANS_RULE_ID_IPV4] =
sizeof(struct mlx4_net_trans_rule_hw_ipv4),
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
};
if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
return -EINVAL; return -EINVAL;
} memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
memset(rule_hw, 0, __rule_hw_sz[spec->id]);
rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
rule_hw->size = __rule_hw_sz[spec->id] >> 2; rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
switch (spec->id) { switch (spec->id) {
case MLX4_NET_TRANS_RULE_ID_ETH: case MLX4_NET_TRANS_RULE_ID_ETH:
...@@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, ...@@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
rule_hw->eth.ether_type_enable = 1; rule_hw->eth.ether_type_enable = 1;
rule_hw->eth.ether_type = spec->eth.ether_type; rule_hw->eth.ether_type = spec->eth.ether_type;
} }
rule_hw->eth.vlan_id = spec->eth.vlan_id; rule_hw->eth.vlan_tag = spec->eth.vlan_id;
rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
break; break;
case MLX4_NET_TRANS_RULE_ID_IB: case MLX4_NET_TRANS_RULE_ID_IB:
rule_hw->ib.qpn = spec->ib.r_qpn; rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
rule_hw->ib.qpn_mask = spec->ib.qpn_msk; rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
...@@ -1153,7 +1187,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], ...@@ -1153,7 +1187,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
struct mlx4_net_trans_rule rule = { struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO, .queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0, .exclusive = 0,
.promisc_mode = MLX4_FS_PROMISC_NONE, .promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC, .priority = MLX4_DOMAIN_NIC,
}; };
...@@ -1222,11 +1256,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, ...@@ -1222,11 +1256,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
u64 *regid_p; u64 *regid_p;
switch (mode) { switch (mode) {
case MLX4_FS_PROMISC_UPLINK: case MLX4_FS_ALL_DEFAULT:
case MLX4_FS_PROMISC_FUNCTION_PORT:
regid_p = &dev->regid_promisc_array[port]; regid_p = &dev->regid_promisc_array[port];
break; break;
case MLX4_FS_PROMISC_ALL_MULTI: case MLX4_FS_MC_DEFAULT:
regid_p = &dev->regid_allmulti_array[port]; regid_p = &dev->regid_allmulti_array[port];
break; break;
default: default:
...@@ -1253,11 +1286,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, ...@@ -1253,11 +1286,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
u64 *regid_p; u64 *regid_p;
switch (mode) { switch (mode) {
case MLX4_FS_PROMISC_UPLINK: case MLX4_FS_ALL_DEFAULT:
case MLX4_FS_PROMISC_FUNCTION_PORT:
regid_p = &dev->regid_promisc_array[port]; regid_p = &dev->regid_promisc_array[port];
break; break;
case MLX4_FS_PROMISC_ALL_MULTI: case MLX4_FS_MC_DEFAULT:
regid_p = &dev->regid_allmulti_array[port]; regid_p = &dev->regid_allmulti_array[port];
break; break;
default: default:
......
...@@ -701,85 +701,6 @@ struct mlx4_steer { ...@@ -701,85 +701,6 @@ struct mlx4_steer {
struct list_head steer_entries[MLX4_NUM_STEERS]; struct list_head steer_entries[MLX4_NUM_STEERS];
}; };
struct mlx4_net_trans_rule_hw_ctrl {
__be32 ctrl;
u8 rsvd1;
u8 funcid;
u8 vep;
u8 port;
__be32 qpn;
__be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
u8 size;
u8 rsvd1;
__be16 id;
u32 rsvd2;
__be32 qpn;
__be32 qpn_mask;
u8 dst_gid[16];
u8 dst_gid_msk[16];
} __packed;
struct mlx4_net_trans_rule_hw_eth {
u8 size;
u8 rsvd;
__be16 id;
u8 rsvd1[6];
u8 dst_mac[6];
u16 rsvd2;
u8 dst_mac_msk[6];
u16 rsvd3;
u8 src_mac[6];
u16 rsvd4;
u8 src_mac_msk[6];
u8 rsvd5;
u8 ether_type_enable;
__be16 ether_type;
__be16 vlan_id_msk;
__be16 vlan_id;
} __packed;
struct mlx4_net_trans_rule_hw_tcp_udp {
u8 size;
u8 rsvd;
__be16 id;
__be16 rsvd1[3];
__be16 dst_port;
__be16 rsvd2;
__be16 dst_port_msk;
__be16 rsvd3;
__be16 src_port;
__be16 rsvd4;
__be16 src_port_msk;
} __packed;
struct mlx4_net_trans_rule_hw_ipv4 {
u8 size;
u8 rsvd;
__be16 id;
__be32 rsvd1;
__be32 dst_ip;
__be32 dst_ip_msk;
__be32 src_ip;
__be32 src_ip_msk;
} __packed;
struct _rule_hw {
union {
struct {
u8 size;
u8 rsvd;
__be16 id;
};
struct mlx4_net_trans_rule_hw_eth eth;
struct mlx4_net_trans_rule_hw_ib ib;
struct mlx4_net_trans_rule_hw_ipv4 ipv4;
struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
};
};
enum { enum {
MLX4_PCI_DEV_IS_VF = 1 << 0, MLX4_PCI_DEV_IS_VF = 1 << 0,
MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
......
...@@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev) ...@@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
return; return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
} }
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
unsigned long flags;
spin_lock_irqsave(&srq_table->lock, flags);
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
spin_unlock_irqrestore(&srq_table->lock, flags);
return srq;
}
EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
...@@ -896,11 +896,12 @@ static inline int map_hw_to_sw_id(u16 header_id) ...@@ -896,11 +896,12 @@ static inline int map_hw_to_sw_id(u16 header_id)
} }
enum mlx4_net_trans_promisc_mode { enum mlx4_net_trans_promisc_mode {
MLX4_FS_PROMISC_NONE = 0, MLX4_FS_REGULAR = 1,
MLX4_FS_PROMISC_UPLINK, MLX4_FS_ALL_DEFAULT,
/* For future use. Not implemented yet */ MLX4_FS_MC_DEFAULT,
MLX4_FS_PROMISC_FUNCTION_PORT, MLX4_FS_UC_SNIFFER,
MLX4_FS_PROMISC_ALL_MULTI, MLX4_FS_MC_SNIFFER,
MLX4_FS_MODE_NUM, /* should be last */
}; };
struct mlx4_spec_eth { struct mlx4_spec_eth {
...@@ -929,7 +930,7 @@ struct mlx4_spec_ipv4 { ...@@ -929,7 +930,7 @@ struct mlx4_spec_ipv4 {
}; };
struct mlx4_spec_ib { struct mlx4_spec_ib {
__be32 r_qpn; __be32 l3_qpn;
__be32 qpn_msk; __be32 qpn_msk;
u8 dst_gid[16]; u8 dst_gid[16];
u8 dst_gid_msk[16]; u8 dst_gid_msk[16];
...@@ -962,6 +963,92 @@ struct mlx4_net_trans_rule { ...@@ -962,6 +963,92 @@ struct mlx4_net_trans_rule {
u32 qpn; u32 qpn;
}; };
struct mlx4_net_trans_rule_hw_ctrl {
__be16 prio;
u8 type;
u8 flags;
u8 rsvd1;
u8 funcid;
u8 vep;
u8 port;
__be32 qpn;
__be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
u8 size;
u8 rsvd1;
__be16 id;
u32 rsvd2;
__be32 l3_qpn;
__be32 qpn_mask;
u8 dst_gid[16];
u8 dst_gid_msk[16];
} __packed;
struct mlx4_net_trans_rule_hw_eth {
u8 size;
u8 rsvd;
__be16 id;
u8 rsvd1[6];
u8 dst_mac[6];
u16 rsvd2;
u8 dst_mac_msk[6];
u16 rsvd3;
u8 src_mac[6];
u16 rsvd4;
u8 src_mac_msk[6];
u8 rsvd5;
u8 ether_type_enable;
__be16 ether_type;
__be16 vlan_tag_msk;
__be16 vlan_tag;
} __packed;
struct mlx4_net_trans_rule_hw_tcp_udp {
u8 size;
u8 rsvd;
__be16 id;
__be16 rsvd1[3];
__be16 dst_port;
__be16 rsvd2;
__be16 dst_port_msk;
__be16 rsvd3;
__be16 src_port;
__be16 rsvd4;
__be16 src_port_msk;
} __packed;
struct mlx4_net_trans_rule_hw_ipv4 {
u8 size;
u8 rsvd;
__be16 id;
__be32 rsvd1;
__be32 dst_ip;
__be32 dst_ip_msk;
__be32 src_ip;
__be32 src_ip_msk;
} __packed;
struct _rule_hw {
union {
struct {
u8 size;
u8 rsvd;
__be16 id;
};
struct mlx4_net_trans_rule_hw_eth eth;
struct mlx4_net_trans_rule_hw_ib ib;
struct mlx4_net_trans_rule_hw_ipv4 ipv4;
struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
};
};
/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
struct mlx4_flow_handle {
u64 reg_id[2];
};
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode); enum mlx4_net_trans_promisc_mode mode);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
...@@ -1011,6 +1098,11 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx); ...@@ -1011,6 +1098,11 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_flow_attach(struct mlx4_dev *dev, int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id); struct mlx4_net_trans_rule *rule, u64 *reg_id);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id); int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type);
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
int i, int val); int i, int val);
......
...@@ -39,4 +39,6 @@ struct mlx4_wqe_srq_next_seg { ...@@ -39,4 +39,6 @@ struct mlx4_wqe_srq_next_seg {
u32 reserved2[3]; u32 reserved2[3];
}; };
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn);
#endif /* MLX4_SRQ_H */ #endif /* MLX4_SRQ_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment