Commit 488823f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/nes: Add a driver for NetEffect RNICs
  IB/mthca: Return proper error codes from mthca_fmr_alloc()
  IB: Avoid marking __devinitdata as const
  IB/mlx4: Actually print out the driver version
  IB/ib_mthca: Pre-link receive WQEs in Tavor mode
  IB/mthca: Remove checks for srq->first_free < 0
  IB/fmr_pool: Allocate page list for pool FMRs only when caching enabled
  IB/srp: Retry stale connections
  mlx4_core: Don't read reserved fields in mlx4_QUERY_ADAPTER()
  IB/mthca: Don't read reserved fields in mthca_QUERY_ADAPTER()
  IPoIB: Remove a misleading debug print
  IPoIB: Handle bonding failover race for connected neighbours too
  IB/mthca: Fix and simplify page size calculation in mthca_reg_phys_mr()
  IB/ehca: Add PMA support
  IB/ehca: Update sma_attr also in case of disruptive config change
  IB/ehca: Prevent sending UD packets to QP0
  IB/cm: Add interim support for routed paths
  mlx4_core: Fix more section mismatches
parents 827b3f6a 3c2d774c
......@@ -2681,6 +2681,16 @@ M: James.Bottomley@HansenPartnership.com
L: linux-scsi@vger.kernel.org
S: Maintained
NETEFFECT IWARP RNIC DRIVER (IW_NES)
P: Faisal Latif
M: flatif@neteffect.com
P: Glenn Streiff
M: gstreiff@neteffect.com
L: general@lists.openfabrics.org
W: http://www.neteffect.com
S: Supported
F: drivers/infiniband/hw/nes/
NETEM NETWORK EMULATOR
P: Stephen Hemminger
M: shemminger@linux-foundation.org
......
......@@ -44,8 +44,8 @@ source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
......
......@@ -5,6 +5,7 @@ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
......@@ -974,6 +974,9 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
struct ib_sa_path_rec *pri_path = param->primary_path;
struct ib_sa_path_rec *alt_path = param->alternate_path;
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
......@@ -997,35 +1000,46 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
cm_req_set_srq(req_msg, param->srq);
req_msg->primary_local_lid = param->primary_path->slid;
req_msg->primary_remote_lid = param->primary_path->dlid;
req_msg->primary_local_gid = param->primary_path->sgid;
req_msg->primary_remote_gid = param->primary_path->dgid;
cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
req_msg->primary_traffic_class = param->primary_path->traffic_class;
req_msg->primary_hop_limit = param->primary_path->hop_limit;
cm_req_set_primary_sl(req_msg, param->primary_path->sl);
cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
if (pri_path->hop_limit <= 1) {
req_msg->primary_local_lid = pri_path->slid;
req_msg->primary_remote_lid = pri_path->dlid;
} else {
/* Work-around until there's a way to obtain remote LID info */
req_msg->primary_local_lid = IB_LID_PERMISSIVE;
req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->primary_local_gid = pri_path->sgid;
req_msg->primary_remote_gid = pri_path->dgid;
cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
req_msg->primary_traffic_class = pri_path->traffic_class;
req_msg->primary_hop_limit = pri_path->hop_limit;
cm_req_set_primary_sl(req_msg, pri_path->sl);
cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
param->primary_path->packet_life_time));
pri_path->packet_life_time));
if (param->alternate_path) {
req_msg->alt_local_lid = param->alternate_path->slid;
req_msg->alt_remote_lid = param->alternate_path->dlid;
req_msg->alt_local_gid = param->alternate_path->sgid;
req_msg->alt_remote_gid = param->alternate_path->dgid;
if (alt_path) {
if (alt_path->hop_limit <= 1) {
req_msg->alt_local_lid = alt_path->slid;
req_msg->alt_remote_lid = alt_path->dlid;
} else {
req_msg->alt_local_lid = IB_LID_PERMISSIVE;
req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->alt_local_gid = alt_path->sgid;
req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
param->alternate_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
req_msg->alt_traffic_class = param->alternate_path->traffic_class;
req_msg->alt_hop_limit = param->alternate_path->hop_limit;
cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
alt_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
req_msg->alt_traffic_class = alt_path->traffic_class;
req_msg->alt_hop_limit = alt_path->hop_limit;
cm_req_set_alt_sl(req_msg, alt_path->sl);
cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
param->alternate_path->packet_life_time));
alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
......@@ -1441,6 +1455,34 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
return listen_cm_id_priv;
}
/*
* Work-around for inter-subnet connections. If the LIDs are permissive,
* we need to override the LID/SL data in the REQ with the LID information
* in the work completion.
*/
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
if (!cm_req_get_primary_subnet_local(req_msg)) {
if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
req_msg->primary_local_lid = cpu_to_be16(wc->slid);
cm_req_set_primary_sl(req_msg, wc->sl);
}
if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
if (!cm_req_get_alt_subnet_local(req_msg)) {
if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
req_msg->alt_local_lid = cpu_to_be16(wc->slid);
cm_req_set_alt_sl(req_msg, wc->sl);
}
if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
}
static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
......@@ -1481,6 +1523,7 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
......
......@@ -320,10 +320,13 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
.max_maps = pool->max_remaps,
.page_shift = params->page_shift
};
int bytes_per_fmr = sizeof *fmr;
if (pool->cache_bucket)
bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
GFP_KERNEL);
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
if (!fmr) {
printk(KERN_WARNING PFX "failed to allocate fmr "
"struct for FMR %d\n", i);
......
......@@ -101,6 +101,7 @@ struct ehca_sport {
spinlock_t mod_sqp_lock;
enum ib_port_state port_state;
struct ehca_sma_attr saved_attr;
u32 pma_qp_nr;
};
#define HCA_CAP_MR_PGSIZE_4K 0x80000000
......
......@@ -403,6 +403,8 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active");
ehca_query_sma_attr(shca, port,
&sport->saved_attr);
} else
notify_port_conf_change(shca, port);
break;
......
......@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context);
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad);
void ehca_poll_eqs(unsigned long data);
int ehca_calc_ipd(struct ehca_shca *shca, int port,
......
......@@ -472,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
shca->ib_device.attach_mcast = ehca_attach_mcast;
shca->ib_device.detach_mcast = ehca_detach_mcast;
/* shca->ib_device.process_mad = ehca_process_mad; */
shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
......
......@@ -209,6 +209,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
return -EINVAL;
}
if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
return -EINVAL;
}
my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
wqe_p->u.ud_av.ud_av = my_av->av;
......
......@@ -39,12 +39,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <rdma/ib_mad.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
......@@ -83,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
port, ret);
return ret;
}
shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
port, pma_qp_nr);
break;
default:
ehca_err(&shca->ib_device, "invalid qp_type=%x",
......@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
return H_SUCCESS;
}
struct ib_perf {
struct ib_mad_hdr mad_hdr;
u8 reserved[40];
u8 data[192];
} __attribute__ ((packed));
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
struct ib_perf *in_perf = (struct ib_perf *)in_mad;
struct ib_perf *out_perf = (struct ib_perf *)out_mad;
struct ib_class_port_info *poi =
(struct ib_class_port_info *)out_perf->data;
struct ehca_shca *shca =
container_of(ibdev, struct ehca_shca, ib_device);
struct ehca_sport *sport = &shca->sport[port_num - 1];
ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
*out_mad = *in_mad;
if (in_perf->mad_hdr.class_version != 1) {
ehca_warn(ibdev, "Unsupported class_version=%x",
in_perf->mad_hdr.class_version);
out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
goto perf_reply;
}
switch (in_perf->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
case IB_MGMT_METHOD_SET:
/* set class port info for redirection */
out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
memset(poi, 0, sizeof(*poi));
poi->base_version = 1;
poi->class_version = 1;
poi->resp_time_value = 18;
poi->redirect_lid = sport->saved_attr.lid;
poi->redirect_qp = sport->pma_qp_nr;
poi->redirect_qkey = IB_QP1_QKEY;
poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
sport->saved_attr.lid, sport->pma_qp_nr);
break;
case IB_MGMT_METHOD_GET_RESP:
return IB_MAD_RESULT_FAILURE;
default:
out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
break;
}
perf_reply:
out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
int ret;
if (!port_num || port_num > ibdev->phys_port_cnt)
return IB_MAD_RESULT_FAILURE;
/* accept only pma request */
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return IB_MAD_RESULT_SUCCESS;
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
return ret;
}
......@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
static const char mlx4_ib_version[] __devinitdata =
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -468,6 +468,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
......@@ -516,9 +517,16 @@ static struct class_device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev;
int i;
if (!mlx4_ib_version_printed) {
printk(KERN_INFO "%s", mlx4_ib_version);
++mlx4_ib_version_printed;
}
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
......
......@@ -1255,9 +1255,14 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
if (err)
goto out;
MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
if (!mthca_is_memfree(dev)) {
MTHCA_GET(adapter->vendor_id, outbox,
QUERY_ADAPTER_VENDOR_ID_OFFSET);
MTHCA_GET(adapter->device_id, outbox,
QUERY_ADAPTER_DEVICE_ID_OFFSET);
MTHCA_GET(adapter->revision_id, outbox,
QUERY_ADAPTER_REVISION_ID_OFFSET);
}
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
......
......@@ -126,7 +126,7 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
static const char mthca_version[] __devinitdata =
static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -735,7 +735,8 @@ static int mthca_init_hca(struct mthca_dev *mdev)
}
mdev->eq_table.inta_pin = adapter.inta_pin;
mdev->rev_id = adapter.revision_id;
if (!mthca_is_memfree(mdev))
mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
return 0;
......
......@@ -613,8 +613,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt))
if (IS_ERR(mr->mtt)) {
err = PTR_ERR(mr->mtt);
goto err_out_table;
}
mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
......@@ -627,8 +629,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_free_mtt;
}
mpt_entry = mailbox->buf;
......
......@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
struct mthca_mr *mr;
u64 *page_list;
u64 total_size;
u64 mask;
unsigned long mask;
int shift;
int npages;
int err;
int i, j, n;
/* First check that we have enough alignment */
if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
return ERR_PTR(-EINVAL);
mask = 0;
mask = buffer_list[0].addr ^ *iova_start;
total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0)
......@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if (mask & ~PAGE_MASK)
return ERR_PTR(-EINVAL);
/* Find largest page shift we can use to cover buffers */
for (shift = PAGE_SHIFT; shift < 31; ++shift)
if (num_phys_buf > 1) {
if ((1ULL << shift) & mask)
break;
} else {
if (1ULL << shift >=
buffer_list[0].size +
(buffer_list[0].addr & ((1ULL << shift) - 1)))
break;
}
shift = __ffs(mask | 1 << 31);
buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
buffer_list[0].addr &= ~0ull << shift;
......@@ -1270,6 +1256,8 @@ static int mthca_init_node_data(struct mthca_dev *dev)
goto out;
}
if (mthca_is_memfree(dev))
dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
......
......@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
{
int ret;
int i;
struct mthca_next_seg *next;
qp->refcount = 1;
init_waitqueue_head(&qp->wait);
......@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
if (mthca_is_memfree(dev)) {
struct mthca_next_seg *next;
struct mthca_data_seg *scatter;
int size = (sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
......@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
qp->sq.wqe_shift) +
qp->send_wqe_offset);
}
} else {
for (i = 0; i < qp->rq.max; ++i) {
next = get_recv_wqe(qp, i);
next->nda_op = htonl((((i + 1) % qp->rq.max) <<
qp->rq.wqe_shift) | 1);
}
}
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
......@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
prev_wqe = qp->rq.last;
qp->rq.last = wqe;
((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
((struct mthca_next_seg *) wqe)->flags = 0;
......@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
......
......@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
* scatter list L_Keys to the sentry value of 0x100.
*/
for (i = 0; i < srq->max; ++i) {
wqe = get_wqe(srq, i);
struct mthca_next_seg *next;
*wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
next = wqe = get_wqe(srq, i);
if (i < srq->max - 1) {
*wqe_to_link(wqe) = i + 1;
next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
} else {
*wqe_to_link(wqe) = -1;
next->nda_op = 0;
}
for (scatter = wqe + sizeof (struct mthca_next_seg);
(void *) scatter < wqe + (1 << srq->wqe_shift);
......@@ -470,16 +478,15 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
{
int ind;
struct mthca_next_seg *last_free;
ind = wqe_addr >> srq->wqe_shift;
spin_lock(&srq->lock);
if (likely(srq->first_free >= 0))
*wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
else
srq->first_free = ind;
last_free = get_wqe(srq, srq->last_free);
*wqe_to_link(last_free) = ind;
last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
*wqe_to_link(get_wqe(srq, ind)) = -1;
srq->last_free = ind;
......@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
first_ind = srq->first_free;
for (nreq = 0; wr; wr = wr->next) {
ind = srq->first_free;
if (unlikely(ind < 0)) {
mthca_err(dev, "SRQ %06x full\n", srq->srqn);
err = -ENOMEM;
*bad_wr = wr;
break;
}
ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
......@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
prev_wqe = srq->last;
srq->last = wqe;
((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
......@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32((ind << srq->wqe_shift) | 1);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
......@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
ind = srq->first_free;
if (unlikely(ind < 0)) {
mthca_err(dev, "SRQ %06x full\n", srq->srqn);
err = -ENOMEM;
*bad_wr = wr;
break;
}
ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
......@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
break;
}
((struct mthca_next_seg *) wqe)->nda_op =
cpu_to_be32((next_ind << srq->wqe_shift) | 1);
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
......
config INFINIBAND_NES
tristate "NetEffect RNIC Driver"
depends on PCI && INET && INFINIBAND
select LIBCRC32C
---help---
This is a low-level driver for NetEffect RDMA enabled
Network Interface Cards (RNIC).
config INFINIBAND_NES_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_NES
default n
---help---
This option causes the NetEffect RNIC driver to produce debug
messages. Select this if you are developing the driver
or trying to diagnose a problem.
obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/highmem.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/iw_cm.h>
#include "nes.h"
#include <net/netevent.h>
#include <net/neighbour.h>
#include <linux/route.h>
#include <net/ip_fib.h>
MODULE_AUTHOR("NetEffect");
MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
int max_mtu = 9000;
int nics_per_function = 1;
int interrupt_mod_interval = 0;
/* Interoperability */
int mpa_version = 1;
module_param(mpa_version, int, 0);
MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
/* Interoperability */
int disable_mpa_crc = 0;
module_param(disable_mpa_crc, int, 0);
MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
unsigned int send_first = 0;
module_param(send_first, int, 0);
MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
unsigned int nes_drv_opt = 0;
module_param(nes_drv_opt, int, 0);
MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
unsigned int nes_debug_level = 0;
module_param_named(debug_level, nes_debug_level, uint, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug output level");
LIST_HEAD(nes_adapter_list);
LIST_HEAD(nes_dev_list);
atomic_t qps_destroyed;
atomic_t cqp_reqs_allocated;
atomic_t cqp_reqs_freed;
atomic_t cqp_reqs_dynallocated;
atomic_t cqp_reqs_dynfreed;
atomic_t cqp_reqs_queued;
atomic_t cqp_reqs_redriven;
static void nes_print_macaddr(struct net_device *netdev);
static irqreturn_t nes_interrupt(int, void *);
static int __devinit nes_probe(struct pci_dev *, const struct pci_device_id *);
static void __devexit nes_remove(struct pci_dev *);
static int __init nes_init_module(void);
static void __exit nes_exit_module(void);
static unsigned int ee_flsh_adapter;
static unsigned int sysfs_nonidx_addr;
static unsigned int sysfs_idx_addr;
static struct pci_device_id nes_pci_table[] = {
{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
{0}
};
MODULE_DEVICE_TABLE(pci, nes_pci_table);
static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
static int nes_net_event(struct notifier_block *, unsigned long, void *);
static int nes_notifiers_registered;
static struct notifier_block nes_inetaddr_notifier = {
.notifier_call = nes_inetaddr_event
};
static struct notifier_block nes_net_notifier = {
.notifier_call = nes_net_event
};
/**
* nes_inetaddr_event
*/
static int nes_inetaddr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = ptr;
struct net_device *event_netdev = ifa->ifa_dev->dev;
struct nes_device *nesdev;
struct net_device *netdev;
struct nes_vnic *nesvnic;
unsigned int addr;
unsigned int mask;
addr = ntohl(ifa->ifa_address);
mask = ntohl(ifa->ifa_mask);
nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
addr, mask);
list_for_each_entry(nesdev, &nes_dev_list, list) {
nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
if (netdev == event_netdev) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
" RDMA is not enabled.\n",
netdev->name);
return NOTIFY_OK;
}
/* we have ifa->ifa_address/mask here if we need it */
switch (event) {
case NETDEV_DOWN:
nes_debug(NES_DBG_NETDEV, "event:DOWN\n");
nes_write_indexed(nesdev,
NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0);
nes_manage_arp_cache(netdev, netdev->dev_addr,
ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
nesvnic->local_ipaddr = 0;
return NOTIFY_OK;
break;
case NETDEV_UP:
nes_debug(NES_DBG_NETDEV, "event:UP\n");
if (nesvnic->local_ipaddr != 0) {
nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
return NOTIFY_OK;
}
/* Add the address to the IP table */
nesvnic->local_ipaddr = ifa->ifa_address;
nes_write_indexed(nesdev,
NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
ntohl(ifa->ifa_address));
nes_manage_arp_cache(netdev, netdev->dev_addr,
ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
return NOTIFY_OK;
break;
default:
break;
}
}
}
return NOTIFY_DONE;
}
/**
* nes_net_event
*/
static int nes_net_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct neighbour *neigh = ptr;
struct nes_device *nesdev;
struct net_device *netdev;
struct nes_vnic *nesvnic;
switch (event) {
case NETEVENT_NEIGH_UPDATE:
list_for_each_entry(nesdev, &nes_dev_list, list) {
/* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
if (netdev == neigh->dev) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n",
netdev->name);
} else {
if (neigh->nud_state & NUD_VALID) {
nes_manage_arp_cache(neigh->dev, neigh->ha,
ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD);
} else {
nes_manage_arp_cache(neigh->dev, neigh->ha,
ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE);
}
}
return NOTIFY_OK;
}
}
break;
default:
nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event);
break;
}
return NOTIFY_DONE;
}
/**
* nes_add_ref
*/
void nes_add_ref(struct ib_qp *ibqp)
{
struct nes_qp *nesqp;
nesqp = to_nesqp(ibqp);
nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n",
ibqp->qp_num, atomic_read(&nesqp->refcount));
atomic_inc(&nesqp->refcount);
}
static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
{
unsigned long flags;
struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 qp_id;
atomic_inc(&qps_destroyed);
/* Free the control structures */
qp_id = nesqp->hwqp.qp_id;
if (nesqp->pbl_vbase) {
pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
spin_lock_irqsave(&nesadapter->pbl_lock, flags);
nesadapter->free_256pbl++;
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
nesqp->pbl_vbase = NULL;
} else {
pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
}
nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
kfree(nesqp->allocated_buffer);
}
/**
* nes_rem_ref
*/
void nes_rem_ref(struct ib_qp *ibqp)
{
u64 u64temp;
struct nes_qp *nesqp;
struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_hw_cqp_wqe *cqp_wqe;
struct nes_cqp_request *cqp_request;
u32 opcode;
nesqp = to_nesqp(ibqp);
if (atomic_read(&nesqp->refcount) == 0) {
printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
__FUNCTION__, ibqp->qp_num, nesqp->last_aeq);
BUG();
}
if (atomic_dec_and_test(&nesqp->refcount)) {
nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
/* Destroy the QP */
cqp_request = nes_get_cqp_request(nesdev);
if (cqp_request == NULL) {
nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
return;
}
cqp_request->waiting = 0;
cqp_request->callback = 1;
cqp_request->cqp_callback = nes_cqp_rem_ref_callback;
cqp_request->cqp_callback_pointer = nesqp;
cqp_wqe = &cqp_request->cqp_wqe;
nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP;
if (nesqp->hte_added) {
opcode |= NES_CQP_QP_DEL_HTE;
nesqp->hte_added = 0;
}
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
u64temp = (u64)nesqp->nesqp_context_pbase;
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
}
}
/**
* nes_get_qp
*/
struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
{
struct nes_vnic *nesvnic = to_nesvnic(device);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp)))
return NULL;
return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp;
}
/**
* nes_print_macaddr
*/
static void nes_print_macaddr(struct net_device *netdev)
{
nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
netdev->name,
netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
netdev->irq);
}
/**
* nes_interrupt - handle interrupts
*/
static irqreturn_t nes_interrupt(int irq, void *dev_id)
{
struct nes_device *nesdev = (struct nes_device *)dev_id;
int handled = 0;
u32 int_mask;
u32 int_req;
u32 int_stat;
u32 intf_int_stat;
u32 timer_stat;
if (nesdev->msi_enabled) {
/* No need to read the interrupt pending register if msi is enabled */
handled = 1;
} else {
if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) {
/* Master interrupt enable provides synchronization for kicking off bottom half
when interrupt sharing is going on */
int_mask = nes_read32(nesdev->regs + NES_INT_MASK);
if (int_mask & 0x80000000) {
/* Check interrupt status to see if this might be ours */
int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
int_req = nesdev->int_req;
if (int_stat&int_req) {
/* if interesting CEQ or AEQ is pending, claim the interrupt */
if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) {
handled = 1;
} else {
if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) {
/* Timer might be running but might be for another function */
timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
if ((timer_stat & nesdev->timer_int_req) != 0) {
handled = 1;
}
}
if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) &&
(handled == 0)) {
intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
if ((intf_int_stat & nesdev->intf_int_req) != 0) {
handled = 1;
}
}
}
if (handled) {
nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000));
int_mask = nes_read32(nesdev->regs+NES_INT_MASK);
/* Save off the status to save an additional read */
nesdev->int_stat = int_stat;
nesdev->napi_isr_ran = 1;
}
}
}
} else {
handled = nes_read32(nesdev->regs+NES_INT_PENDING);
}
}
if (handled) {
if (nes_napi_isr(nesdev) == 0) {
tasklet_schedule(&nesdev->dpc_tasklet);
}
return IRQ_HANDLED;
} else {
return IRQ_NONE;
}
}
/**
* nes_probe - Device initialization
*/
static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct nes_device *nesdev = NULL;
int ret = 0;
struct nes_vnic *nesvnic = NULL;
void __iomem *mmio_regs = NULL;
u8 hw_rev;
assert(pcidev != NULL);
assert(ent != NULL);
printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
DRV_VERSION, pci_name(pcidev));
ret = pci_enable_device(pcidev);
if (ret) {
printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev));
goto bail0;
}
nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n",
(long unsigned int)pci_resource_start(pcidev, BAR_0),
(long unsigned int)pci_resource_len(pcidev, BAR_0));
nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n",
(long unsigned int)pci_resource_start(pcidev, BAR_1),
(long unsigned int)pci_resource_len(pcidev, BAR_1));
/* Make sure PCI base addr are MMIO */
if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) ||
!(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) {
printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
ret = -ENODEV;
goto bail1;
}
/* Reserve PCI I/O and memory resources */
ret = pci_request_regions(pcidev, DRV_NAME);
if (ret) {
printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev));
goto bail1;
}
if ((sizeof(dma_addr_t) > 4)) {
ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
if (ret < 0) {
printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
goto bail2;
}
ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK);
if (ret) {
printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
goto bail2;
}
} else {
ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
if (ret < 0) {
printk(KERN_ERR PFX "32b DMA mask configuration failed\n");
goto bail2;
}
ret = pci_set_consistent_dma_mask(pcidev, DMA_32BIT_MASK);
if (ret) {
printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n");
goto bail2;
}
}
pci_set_master(pcidev);
/* Allocate hardware structure */
nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
if (!nesdev) {
printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
ret = -ENOMEM;
goto bail2;
}
nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev);
nesdev->pcidev = pcidev;
pci_set_drvdata(pcidev, nesdev);
pci_read_config_byte(pcidev, 0x0008, &hw_rev);
nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev);
spin_lock_init(&nesdev->indexed_regs_lock);
/* Remap the PCI registers in adapter BAR0 to kernel VA space */
mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
if (mmio_regs == NULL) {
printk(KERN_ERR PFX "Unable to remap BAR0\n");
ret = -EIO;
goto bail3;
}
nesdev->regs = mmio_regs;
nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs;
/* Ensure interrupts are disabled */
nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) {
if (!pci_enable_msi(nesdev->pcidev)) {
nesdev->msi_enabled = 1;
nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n",
pci_name(pcidev));
} else {
nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n",
pci_name(pcidev));
}
} else {
nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n",
pci_name(pcidev));
}
nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0);
nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1);
/* Init the adapter */
nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
if (!nesdev->nesadapter) {
printk(KERN_ERR PFX "Unable to initialize adapter.\n");
ret = -ENOMEM;
goto bail5;
}
/* nesdev->base_doorbell_index =
nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
nesdev->base_doorbell_index = 1;
nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count;
tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
/* bring up the Control QP */
if (nes_init_cqp(nesdev)) {
ret = -ENODEV;
goto bail6;
}
/* Arm the CCQ */
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
PCI_FUNC(nesdev->pcidev->devfn));
nes_read32(nesdev->regs+NES_CQE_ALLOC);
/* Enable the interrupts */
nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
(1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24));
}
/* TODO: This really should be the first driver to load, not function 0 */
if (PCI_FUNC(nesdev->pcidev->devfn) == 0) {
/* pick up PCI and critical errors if the first driver to load */
nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR;
nesdev->int_req |= NES_INT_INTF;
} else {
nesdev->intf_int_req = 0;
}
nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0);
nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0);
nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265);
nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804);
nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790);
/* deal with both periodic and one_shot */
nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn);
nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req;
nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n",
PCI_FUNC(nesdev->pcidev->devfn),
nesdev->timer_int_req, nesdev->nesadapter->timer_int_req);
nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
list_add_tail(&nesdev->list, &nes_dev_list);
/* Request an interrupt line for the driver */
ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev);
if (ret) {
printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
pci_name(pcidev), pcidev->irq);
goto bail65;
}
nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
if (nes_notifiers_registered == 0) {
register_inetaddr_notifier(&nes_inetaddr_notifier);
register_netevent_notifier(&nes_net_notifier);
}
nes_notifiers_registered++;
/* Initialize network devices */
if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
goto bail7;
}
/* Register network device */
ret = register_netdev(netdev);
if (ret) {
printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
nes_netdev_destroy(netdev);
goto bail7;
}
nes_print_macaddr(netdev);
/* create a CM core for this netdev */
nesvnic = netdev_priv(netdev);
nesdev->netdev_count++;
nesdev->nesadapter->netdev_count++;
printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
pci_name(pcidev));
return 0;
bail7:
printk(KERN_ERR PFX "bail7\n");
while (nesdev->netdev_count > 0) {
nesdev->netdev_count--;
nesdev->nesadapter->netdev_count--;
unregister_netdev(nesdev->netdev[nesdev->netdev_count]);
nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]);
}
nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
nesdev->netdev_count, nesdev->nesadapter->netdev_count);
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
unregister_netevent_notifier(&nes_net_notifier);
unregister_inetaddr_notifier(&nes_inetaddr_notifier);
}
list_del(&nesdev->list);
nes_destroy_cqp(nesdev);
bail65:
printk(KERN_ERR PFX "bail65\n");
free_irq(pcidev->irq, nesdev);
if (nesdev->msi_enabled) {
pci_disable_msi(pcidev);
}
bail6:
printk(KERN_ERR PFX "bail6\n");
tasklet_kill(&nesdev->dpc_tasklet);
/* Deallocate the Adapter Structure */
nes_destroy_adapter(nesdev->nesadapter);
bail5:
printk(KERN_ERR PFX "bail5\n");
iounmap(nesdev->regs);
bail3:
printk(KERN_ERR PFX "bail3\n");
kfree(nesdev);
bail2:
pci_release_regions(pcidev);
bail1:
pci_disable_device(pcidev);
bail0:
return ret;
}
/**
* nes_remove - unload from kernel
*/
static void __devexit nes_remove(struct pci_dev *pcidev)
{
struct nes_device *nesdev = pci_get_drvdata(pcidev);
struct net_device *netdev;
int netdev_index = 0;
if (nesdev->netdev_count) {
netdev = nesdev->netdev[netdev_index];
if (netdev) {
netif_stop_queue(netdev);
unregister_netdev(netdev);
nes_netdev_destroy(netdev);
nesdev->netdev[netdev_index] = NULL;
nesdev->netdev_count--;
nesdev->nesadapter->netdev_count--;
}
}
nes_notifiers_registered--;
if (nes_notifiers_registered == 0) {
unregister_netevent_notifier(&nes_net_notifier);
unregister_inetaddr_notifier(&nes_inetaddr_notifier);
}
list_del(&nesdev->list);
nes_destroy_cqp(nesdev);
tasklet_kill(&nesdev->dpc_tasklet);
/* Deallocate the Adapter Structure */
nes_destroy_adapter(nesdev->nesadapter);
free_irq(pcidev->irq, nesdev);
if (nesdev->msi_enabled) {
pci_disable_msi(pcidev);
}
iounmap(nesdev->regs);
kfree(nesdev);
/* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */
pci_release_regions(pcidev);
pci_disable_device(pcidev);
pci_set_drvdata(pcidev, NULL);
}
static struct pci_driver nes_pci_driver = {
.name = DRV_NAME,
.id_table = nes_pci_table,
.probe = nes_probe,
.remove = __devexit_p(nes_remove),
};
static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
{
unsigned int devfn = 0xffffffff;
unsigned char bus_number = 0xff;
unsigned int i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
devfn = nesdev->nesadapter->devfn;
bus_number = nesdev->nesadapter->bus_number;
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn);
}
static ssize_t nes_store_adapter(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
ee_flsh_adapter = simple_strtoul(p, &p, 10);
return strnlen(buf, count);
}
static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
{
u32 eeprom_cmd = 0xdead;
u32 i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND);
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd);
}
static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
u32 val;
u32 i = 0;
struct nes_device *nesdev;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
val = simple_strtoul(p, &p, 16);
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val);
break;
}
i++;
}
}
return strnlen(buf, count);
}
static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
{
u32 eeprom_data = 0xdead;
u32 i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA);
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data);
}
static ssize_t nes_store_ee_data(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
u32 val;
u32 i = 0;
struct nes_device *nesdev;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
val = simple_strtoul(p, &p, 16);
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nes_write32(nesdev->regs + NES_EEPROM_DATA, val);
break;
}
i++;
}
}
return strnlen(buf, count);
}
static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
{
u32 flash_cmd = 0xdead;
u32 i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND);
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd);
}
static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
u32 val;
u32 i = 0;
struct nes_device *nesdev;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
val = simple_strtoul(p, &p, 16);
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nes_write32(nesdev->regs + NES_FLASH_COMMAND, val);
break;
}
i++;
}
}
return strnlen(buf, count);
}
static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
{
u32 flash_data = 0xdead;
u32 i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA);
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data);
}
static ssize_t nes_store_flash_data(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
u32 val;
u32 i = 0;
struct nes_device *nesdev;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
val = simple_strtoul(p, &p, 16);
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nes_write32(nesdev->regs + NES_FLASH_DATA, val);
break;
}
i++;
}
}
return strnlen(buf, count);
}
static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr);
}
static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
sysfs_nonidx_addr = simple_strtoul(p, &p, 16);
return strnlen(buf, count);
}
static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
{
u32 nonidx_data = 0xdead;
u32 i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr);
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data);
}
static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
u32 val;
u32 i = 0;
struct nes_device *nesdev;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
val = simple_strtoul(p, &p, 16);
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nes_write32(nesdev->regs + sysfs_nonidx_addr, val);
break;
}
i++;
}
}
return strnlen(buf, count);
}
static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr);
}
static ssize_t nes_store_idx_addr(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
sysfs_idx_addr = simple_strtoul(p, &p, 16);
return strnlen(buf, count);
}
static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
{
u32 idx_data = 0xdead;
u32 i = 0;
struct nes_device *nesdev;
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
idx_data = nes_read_indexed(nesdev, sysfs_idx_addr);
break;
}
i++;
}
return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data);
}
static ssize_t nes_store_idx_data(struct device_driver *ddp,
const char *buf, size_t count)
{
char *p = (char *)buf;
u32 val;
u32 i = 0;
struct nes_device *nesdev;
if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
val = simple_strtoul(p, &p, 16);
list_for_each_entry(nesdev, &nes_dev_list, list) {
if (i == ee_flsh_adapter) {
nes_write_indexed(nesdev, sysfs_idx_addr, val);
break;
}
i++;
}
}
return strnlen(buf, count);
}
static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
nes_show_adapter, nes_store_adapter);
static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
nes_show_ee_cmd, nes_store_ee_cmd);
static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR,
nes_show_ee_data, nes_store_ee_data);
static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR,
nes_show_flash_cmd, nes_store_flash_cmd);
static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR,
nes_show_flash_data, nes_store_flash_data);
static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR,
nes_show_nonidx_addr, nes_store_nonidx_addr);
static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR,
nes_show_nonidx_data, nes_store_nonidx_data);
static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
nes_show_idx_addr, nes_store_idx_addr);
static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
nes_show_idx_data, nes_store_idx_data);
static int nes_create_driver_sysfs(struct pci_driver *drv)
{
int error;
error = driver_create_file(&drv->driver, &driver_attr_adapter);
error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd);
error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data);
error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd);
error |= driver_create_file(&drv->driver, &driver_attr_flash_data);
error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr);
error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
return error;
}
static void nes_remove_driver_sysfs(struct pci_driver *drv)
{
driver_remove_file(&drv->driver, &driver_attr_adapter);
driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd);
driver_remove_file(&drv->driver, &driver_attr_eeprom_data);
driver_remove_file(&drv->driver, &driver_attr_flash_cmd);
driver_remove_file(&drv->driver, &driver_attr_flash_data);
driver_remove_file(&drv->driver, &driver_attr_nonidx_addr);
driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
driver_remove_file(&drv->driver, &driver_attr_idx_addr);
driver_remove_file(&drv->driver, &driver_attr_idx_data);
}
/**
* nes_init_module - module initialization entry point
*/
static int __init nes_init_module(void)
{
int retval;
int retval1;
retval = nes_cm_start();
if (retval) {
printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n");
return retval;
}
retval = pci_register_driver(&nes_pci_driver);
if (retval >= 0) {
retval1 = nes_create_driver_sysfs(&nes_pci_driver);
if (retval1 < 0)
printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n");
}
return retval;
}
/**
* nes_exit_module - module unload entry point
*/
static void __exit nes_exit_module(void)
{
nes_cm_stop();
nes_remove_driver_sysfs(&nes_pci_driver);
pci_unregister_driver(&nes_pci_driver);
}
module_init(nes_init_module);
module_exit(nes_exit_module);
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NES_H
#define __NES_H
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <asm/semaphore.h>
#include <linux/version.h>
#include <asm/io.h>
#include <linux/crc32c.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
#define NES_SEND_FIRST_WRITE
#define QUEUE_DISCONNECTS
#define DRV_BUILD "1"
#define DRV_NAME "iw_nes"
#define DRV_VERSION "1.0 KO Build " DRV_BUILD
#define PFX DRV_NAME ": "
/*
* NetEffect PCI vendor id and NE010 PCI device id.
*/
#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
#define PCI_VENDOR_ID_NETEFFECT 0x1678
#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
#endif
#define NE020_REV 4
#define NE020_REV1 5
#define BAR_0 0
#define BAR_1 2
#define RX_BUF_SIZE (1536 + 8)
#define NES_REG0_SIZE (4 * 1024)
#define NES_TX_TIMEOUT (6*HZ)
#define NES_FIRST_QPN 64
#define NES_SW_CONTEXT_ALIGN 1024
#define NES_NIC_MAX_NICS 16
#define NES_MAX_ARP_TABLE_SIZE 4096
#define NES_NIC_CEQ_SIZE 8
/* NICs will be on a separate CQ */
#define NES_CCEQ_SIZE ((nesadapter->max_cq / nesadapter->port_count) - 32)
#define NES_MAX_PORT_COUNT 4
#define MAX_DPC_ITERATIONS 128
#define NES_CQP_REQUEST_NO_DOORBELL_RING 0
#define NES_CQP_REQUEST_RING_DOORBELL 1
#define NES_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
#define NES_DRV_OPT_DISABLE_MPA_CRC 0x00000002
#define NES_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
#define NES_DRV_OPT_DISABLE_INTF 0x00000008
#define NES_DRV_OPT_ENABLE_MSI 0x00000010
#define NES_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
#define NES_DRV_OPT_SUPRESS_OPTION_BC 0x00000040
#define NES_DRV_OPT_NO_INLINE_DATA 0x00000080
#define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100
#define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
#define NES_AEQ_EVENT_TIMEOUT 2500
#define NES_DISCONNECT_EVENT_TIMEOUT 2000
/* debug levels */
/* must match userspace */
#define NES_DBG_HW 0x00000001
#define NES_DBG_INIT 0x00000002
#define NES_DBG_ISR 0x00000004
#define NES_DBG_PHY 0x00000008
#define NES_DBG_NETDEV 0x00000010
#define NES_DBG_CM 0x00000020
#define NES_DBG_CM1 0x00000040
#define NES_DBG_NIC_RX 0x00000080
#define NES_DBG_NIC_TX 0x00000100
#define NES_DBG_CQP 0x00000200
#define NES_DBG_MMAP 0x00000400
#define NES_DBG_MR 0x00000800
#define NES_DBG_PD 0x00001000
#define NES_DBG_CQ 0x00002000
#define NES_DBG_QP 0x00004000
#define NES_DBG_MOD_QP 0x00008000
#define NES_DBG_AEQ 0x00010000
#define NES_DBG_IW_RX 0x00020000
#define NES_DBG_IW_TX 0x00040000
#define NES_DBG_SHUTDOWN 0x00080000
#define NES_DBG_RSVD1 0x10000000
#define NES_DBG_RSVD2 0x20000000
#define NES_DBG_RSVD3 0x40000000
#define NES_DBG_RSVD4 0x80000000
#define NES_DBG_ALL 0xffffffff
#ifdef CONFIG_INFINIBAND_NES_DEBUG
#define nes_debug(level, fmt, args...) \
if (level & nes_debug_level) \
printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
#define assert(expr) \
if (!(expr)) { \
printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
#expr, __FILE__, __FUNCTION__, __LINE__); \
}
#define NES_EVENT_TIMEOUT 1200000
#else
#define nes_debug(level, fmt, args...)
#define assert(expr) do {} while (0)
#define NES_EVENT_TIMEOUT 100000
#endif
#include "nes_hw.h"
#include "nes_verbs.h"
#include "nes_context.h"
#include "nes_user.h"
#include "nes_cm.h"
extern int max_mtu;
extern int nics_per_function;
#define max_frame_len (max_mtu+ETH_HLEN)
extern int interrupt_mod_interval;
extern int nes_if_count;
extern int mpa_version;
extern int disable_mpa_crc;
extern unsigned int send_first;
extern unsigned int nes_drv_opt;
extern unsigned int nes_debug_level;
extern struct list_head nes_adapter_list;
extern struct list_head nes_dev_list;
extern struct nes_cm_core *g_cm_core;
extern atomic_t cm_connects;
extern atomic_t cm_accepts;
extern atomic_t cm_disconnects;
extern atomic_t cm_closes;
extern atomic_t cm_connecteds;
extern atomic_t cm_connect_reqs;
extern atomic_t cm_rejects;
extern atomic_t mod_qp_timouts;
extern atomic_t qps_created;
extern atomic_t qps_destroyed;
extern atomic_t sw_qps_destroyed;
extern u32 mh_detected;
extern u32 mh_pauses_sent;
extern u32 cm_packets_sent;
extern u32 cm_packets_bounced;
extern u32 cm_packets_created;
extern u32 cm_packets_received;
extern u32 cm_packets_dropped;
extern u32 cm_packets_retrans;
extern u32 cm_listens_created;
extern u32 cm_listens_destroyed;
extern u32 cm_backlog_drops;
extern atomic_t cm_loopbacks;
extern atomic_t cm_nodes_created;
extern atomic_t cm_nodes_destroyed;
extern atomic_t cm_accel_dropped_pkts;
extern atomic_t cm_resets_recvd;
extern u32 crit_err_count;
extern u32 int_mod_timer_init;
extern u32 int_mod_cq_depth_256;
extern u32 int_mod_cq_depth_128;
extern u32 int_mod_cq_depth_32;
extern u32 int_mod_cq_depth_24;
extern u32 int_mod_cq_depth_16;
extern u32 int_mod_cq_depth_4;
extern u32 int_mod_cq_depth_1;
extern atomic_t cqp_reqs_allocated;
extern atomic_t cqp_reqs_freed;
extern atomic_t cqp_reqs_dynallocated;
extern atomic_t cqp_reqs_dynfreed;
extern atomic_t cqp_reqs_queued;
extern atomic_t cqp_reqs_redriven;
struct nes_device {
struct nes_adapter *nesadapter;
void __iomem *regs;
void __iomem *index_reg;
struct pci_dev *pcidev;
struct net_device *netdev[NES_NIC_MAX_NICS];
u64 link_status_interrupts;
struct tasklet_struct dpc_tasklet;
spinlock_t indexed_regs_lock;
unsigned long csr_start;
unsigned long doorbell_region;
unsigned long doorbell_start;
unsigned long mac_tx_errors;
unsigned long mac_pause_frames_sent;
unsigned long mac_pause_frames_received;
unsigned long mac_rx_errors;
unsigned long mac_rx_crc_errors;
unsigned long mac_rx_symbol_err_frames;
unsigned long mac_rx_jabber_frames;
unsigned long mac_rx_oversized_frames;
unsigned long mac_rx_short_frames;
unsigned long port_rx_discards;
unsigned long port_tx_discards;
unsigned int mac_index;
unsigned int nes_stack_start;
/* Control Structures */
void *cqp_vbase;
dma_addr_t cqp_pbase;
u32 cqp_mem_size;
u8 ceq_index;
u8 nic_ceq_index;
struct nes_hw_cqp cqp;
struct nes_hw_cq ccq;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
struct nes_cqp_request *nes_cqp_requests;
u32 int_req;
u32 int_stat;
u32 timer_int_req;
u32 timer_only_int_count;
u32 intf_int_req;
u32 last_mac_tx_pauses;
u32 last_used_chunks_tx;
struct list_head list;
u16 base_doorbell_index;
u16 currcq_count;
u16 deepcq_count;
u8 msi_enabled;
u8 netdev_count;
u8 napi_isr_ran;
u8 disable_rx_flow_control;
u8 disable_tx_flow_control;
};
static inline void
set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
{
wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value));
wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value)));
}
static inline void
set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value)
{
wqe_words[index] = cpu_to_le32(value);
}
static inline void
nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev)
{
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX,
(u64)((unsigned long) &nesdev->cqp));
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0;
cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_HIGH_IDX] = 0;
}
static inline void
nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head)
{
u32 value;
value = ((u32)((unsigned long) nesqp)) | head;
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX,
(u32)(upper_32_bits((unsigned long)(nesqp))));
set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value);
}
/* Read from memory-mapped device */
static inline u32 nes_read_indexed(struct nes_device *nesdev, u32 reg_index)
{
unsigned long flags;
void __iomem *addr = nesdev->index_reg;
u32 value;
spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
writel(reg_index, addr);
value = readl((void __iomem *)addr + 4);
spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
return value;
}
static inline u32 nes_read32(const void __iomem *addr)
{
return readl(addr);
}
static inline u16 nes_read16(const void __iomem *addr)
{
return readw(addr);
}
static inline u8 nes_read8(const void __iomem *addr)
{
return readb(addr);
}
/* Write to memory-mapped device */
static inline void nes_write_indexed(struct nes_device *nesdev, u32 reg_index, u32 val)
{
unsigned long flags;
void __iomem *addr = nesdev->index_reg;
spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
writel(reg_index, addr);
writel(val, (void __iomem *)addr + 4);
spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
}
static inline void nes_write32(void __iomem *addr, u32 val)
{
writel(val, addr);
}
static inline void nes_write16(void __iomem *addr, u16 val)
{
writew(val, addr);
}
static inline void nes_write8(void __iomem *addr, u8 val)
{
writeb(val, addr);
}
static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
unsigned long *resource_array, u32 max_resources,
u32 *req_resource_num, u32 *next)
{
unsigned long flags;
u32 resource_num;
spin_lock_irqsave(&nesadapter->resource_lock, flags);
resource_num = find_next_zero_bit(resource_array, max_resources, *next);
if (resource_num >= max_resources) {
resource_num = find_first_zero_bit(resource_array, max_resources);
if (resource_num >= max_resources) {
printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__);
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
return -EMFILE;
}
}
set_bit(resource_num, resource_array);
*next = resource_num+1;
if (*next == max_resources) {
*next = 0;
}
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
*req_resource_num = resource_num;
return 0;
}
static inline int nes_is_resource_allocated(struct nes_adapter *nesadapter,
unsigned long *resource_array, u32 resource_num)
{
unsigned long flags;
int bit_is_set;
spin_lock_irqsave(&nesadapter->resource_lock, flags);
bit_is_set = test_bit(resource_num, resource_array);
nes_debug(NES_DBG_HW, "resource_num %u is%s allocated.\n",
resource_num, (bit_is_set ? "": " not"));
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
return bit_is_set;
}
static inline void nes_free_resource(struct nes_adapter *nesadapter,
unsigned long *resource_array, u32 resource_num)
{
unsigned long flags;
spin_lock_irqsave(&nesadapter->resource_lock, flags);
clear_bit(resource_num, resource_array);
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
}
static inline struct nes_vnic *to_nesvnic(struct ib_device *ibdev)
{
return container_of(ibdev, struct nes_ib_device, ibdev)->nesvnic;
}
static inline struct nes_pd *to_nespd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct nes_pd, ibpd);
}
static inline struct nes_ucontext *to_nesucontext(struct ib_ucontext *ibucontext)
{
return container_of(ibucontext, struct nes_ucontext, ibucontext);
}
static inline struct nes_mr *to_nesmr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct nes_mr, ibmr);
}
static inline struct nes_mr *to_nesmr_from_ibfmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct nes_mr, ibfmr);
}
static inline struct nes_mr *to_nesmw(struct ib_mw *ibmw)
{
return container_of(ibmw, struct nes_mr, ibmw);
}
static inline struct nes_fmr *to_nesfmr(struct nes_mr *nesmr)
{
return container_of(nesmr, struct nes_fmr, nesmr);
}
static inline struct nes_cq *to_nescq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct nes_cq, ibcq);
}
static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct nes_qp, ibqp);
}
/* nes.c */
void nes_add_ref(struct ib_qp *);
void nes_rem_ref(struct ib_qp *);
struct ib_qp *nes_get_qp(struct ib_device *, int);
/* nes_hw.c */
struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
void nes_nic_init_timer_defaults(struct nes_device *, u8);
unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
int nes_init_serdes(struct nes_device *, u8, u8, u8);
void nes_init_csr_ne020(struct nes_device *, u8, u8);
void nes_destroy_adapter(struct nes_adapter *);
int nes_init_cqp(struct nes_device *);
int nes_init_phy(struct nes_device *);
int nes_init_nic_qp(struct nes_device *, struct net_device *);
void nes_destroy_nic_qp(struct nes_vnic *);
int nes_napi_isr(struct nes_device *);
void nes_dpc(unsigned long);
void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
void nes_process_mac_intr(struct nes_device *, u32);
void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
int nes_destroy_cqp(struct nes_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
/* nes_nic.c */
void nes_netdev_set_multicast_list(struct net_device *);
void nes_netdev_exit(struct nes_vnic *);
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
void nes_netdev_destroy(struct net_device *);
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
/* nes_cm.c */
void *nes_cm_create(struct net_device *);
int nes_cm_recv(struct sk_buff *, struct net_device *);
void nes_update_arp(unsigned char *, u32, u32, u16, u16);
void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
void nes_sock_release(struct nes_qp *, unsigned long *);
struct nes_cm_core *nes_cm_alloc_core(void);
void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
int nes_cm_disconn(struct nes_qp *);
void nes_cm_disconn_worker(void *);
/* nes_verbs.c */
int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
void nes_destroy_ofa_device(struct nes_ib_device *);
int nes_register_ofa_device(struct nes_ib_device *);
void nes_unregister_ofa_device(struct nes_ib_device *);
/* nes_util.c */
int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16);
void nes_read_10G_phy_reg(struct nes_device *, u16, u8);
struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
int nes_arp_table(struct nes_device *, u32, u8 *, u32);
void nes_mh_fix(unsigned long);
void nes_clc(unsigned long);
void nes_dump_mem(unsigned int, void *, int);
u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
#endif /* __NES_H */
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#define TCPOPT_TIMESTAMP 8
#include <asm/atomic.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/notifier.h>
#include <linux/net.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/list.h>
#include <linux/threads.h>
#include <net/neighbour.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include "nes.h"
u32 cm_packets_sent;
u32 cm_packets_bounced;
u32 cm_packets_dropped;
u32 cm_packets_retrans;
u32 cm_packets_created;
u32 cm_packets_received;
u32 cm_listens_created;
u32 cm_listens_destroyed;
u32 cm_backlog_drops;
atomic_t cm_loopbacks;
atomic_t cm_nodes_created;
atomic_t cm_nodes_destroyed;
atomic_t cm_accel_dropped_pkts;
atomic_t cm_resets_recvd;
static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
struct nes_vnic *, struct nes_cm_info *);
static int add_ref_cm_node(struct nes_cm_node *);
static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
/* External CM API Interface */
/* instance of function pointers for client API */
/* set address of this instance to cm_core->cm_ops at cm_core alloc */
static struct nes_cm_ops nes_cm_api = {
mini_cm_accelerated,
mini_cm_listen,
mini_cm_del_listen,
mini_cm_connect,
mini_cm_close,
mini_cm_accept,
mini_cm_reject,
mini_cm_recv_pkt,
mini_cm_dealloc_core,
mini_cm_get,
mini_cm_set
};
struct nes_cm_core *g_cm_core;
atomic_t cm_connects;
atomic_t cm_accepts;
atomic_t cm_disconnects;
atomic_t cm_closes;
atomic_t cm_connecteds;
atomic_t cm_connect_reqs;
atomic_t cm_rejects;
/**
* create_event
*/
static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
enum nes_cm_event_type type)
{
struct nes_cm_event *event;
if (!cm_node->cm_id)
return NULL;
/* allocate an empty event */
event = kzalloc(sizeof(*event), GFP_ATOMIC);
if (!event)
return NULL;
event->type = type;
event->cm_node = cm_node;
event->cm_info.rem_addr = cm_node->rem_addr;
event->cm_info.loc_addr = cm_node->loc_addr;
event->cm_info.rem_port = cm_node->rem_port;
event->cm_info.loc_port = cm_node->loc_port;
event->cm_info.cm_id = cm_node->cm_id;
nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x],"
" src_addr=%08x[%x]\n",
event, type,
event->cm_info.loc_addr, event->cm_info.loc_port,
event->cm_info.rem_addr, event->cm_info.rem_port);
nes_cm_post_event(event);
return event;
}
/**
* send_mpa_request
*/
int send_mpa_request(struct nes_cm_node *cm_node)
{
struct sk_buff *skb;
int ret;
skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
/* send an MPA Request frame */
form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
cm_node->mpa_frame_size, SET_ACK);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
if (ret < 0) {
return ret;
}
return 0;
}
/**
* recv_mpa - process a received TCP pkt, we are expecting an
* IETF MPA frame
*/
static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
{
struct ietf_mpa_frame *mpa_frame;
/* assume req frame is in tcp data payload */
if (len < sizeof(struct ietf_mpa_frame)) {
nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
return -1;
}
mpa_frame = (struct ietf_mpa_frame *)buffer;
cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
" complete (%x + %x != %x)\n",
cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len);
return -1;
}
/* copy entire MPA frame to our cm_node's frame */
memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
cm_node->mpa_frame_size);
return 0;
}
/**
* handle_exception_pkt - process an exception packet.
* We have been in a TSA state, and we have now received SW
* TCP/IP traffic should be a FIN request or IP pkt with options
*/
static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
int ret = 0;
struct tcphdr *tcph = tcp_hdr(skb);
/* first check to see if this a FIN pkt */
if (tcph->fin) {
/* we need to ACK the FIN request */
send_ack(cm_node);
/* check which side we are (client/server) and set next state accordingly */
if (cm_node->tcp_cntxt.client)
cm_node->state = NES_CM_STATE_CLOSING;
else {
/* we are the server side */
cm_node->state = NES_CM_STATE_CLOSE_WAIT;
/* since this is a self contained CM we don't wait for */
/* an APP to close us, just send final FIN immediately */
ret = send_fin(cm_node, NULL);
cm_node->state = NES_CM_STATE_LAST_ACK;
}
} else {
ret = -EINVAL;
}
return ret;
}
/**
* form_cm_frame - get a free packet and build empty frame Use
* node info to build.
*/
struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
void *options, u32 optionsize, void *data, u32 datasize, u8 flags)
{
struct tcphdr *tcph;
struct iphdr *iph;
struct ethhdr *ethh;
u8 *buf;
u16 packetsize = sizeof(*iph);
packetsize += sizeof(*tcph);
packetsize += optionsize + datasize;
memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph));
skb->len = 0;
buf = skb_put(skb, packetsize + ETH_HLEN);
ethh = (struct ethhdr *) buf;
buf += ETH_HLEN;
iph = (struct iphdr *)buf;
buf += sizeof(*iph);
tcph = (struct tcphdr *)buf;
skb_reset_mac_header(skb);
skb_set_network_header(skb, ETH_HLEN);
skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph));
buf += sizeof(*tcph);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->protocol = htons(0x800);
skb->data_len = 0;
skb->mac_len = ETH_HLEN;
memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN);
memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN);
ethh->h_proto = htons(0x0800);
iph->version = IPVERSION;
iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
iph->tos = 0;
iph->tot_len = htons(packetsize);
iph->id = htons(++cm_node->tcp_cntxt.loc_id);
iph->frag_off = htons(0x4000);
iph->ttl = 0x40;
iph->protocol = 0x06; /* IPPROTO_TCP */
iph->saddr = htonl(cm_node->loc_addr);
iph->daddr = htonl(cm_node->rem_addr);
tcph->source = htons(cm_node->loc_port);
tcph->dest = htons(cm_node->rem_port);
tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
if (flags & SET_ACK) {
cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
tcph->ack = 1;
} else
tcph->ack_seq = 0;
if (flags & SET_SYN) {
cm_node->tcp_cntxt.loc_seq_num++;
tcph->syn = 1;
} else
cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */
if (flags & SET_FIN)
tcph->fin = 1;
if (flags & SET_RST)
tcph->rst = 1;
tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2);
tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
tcph->urg_ptr = 0;
if (optionsize)
memcpy(buf, options, optionsize);
buf += optionsize;
if (datasize)
memcpy(buf, data, datasize);
skb_shinfo(skb)->nr_frags = 0;
cm_packets_created++;
return skb;
}
/**
* print_core - dump a cm core
*/
static void print_core(struct nes_cm_core *core)
{
nes_debug(NES_DBG_CM, "---------------------------------------------\n");
nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core);
if (!core)
return;
nes_debug(NES_DBG_CM, "---------------------------------------------\n");
nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
nes_debug(NES_DBG_CM, "State : %u \n", core->state);
nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list));
nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt));
nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt));
nes_debug(NES_DBG_CM, "core : %p \n", core);
nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
}
/**
* schedule_nes_timer
* note - cm_node needs to be protected before calling this. Encase in:
* rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node);
*/
int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
enum nes_timer_type type, int send_retrans,
int close_when_complete)
{
unsigned long flags;
struct nes_cm_core *cm_core;
struct nes_timer_entry *new_send;
int ret = 0;
u32 was_timer_set;
new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
if (!new_send)
return -1;
if (!cm_node)
return -EINVAL;
/* new_send->timetosend = currenttime */
new_send->retrycount = NES_DEFAULT_RETRYS;
new_send->retranscount = NES_DEFAULT_RETRANS;
new_send->skb = skb;
new_send->timetosend = jiffies;
new_send->type = type;
new_send->netdev = cm_node->netdev;
new_send->send_retrans = send_retrans;
new_send->close_when_complete = close_when_complete;
if (type == NES_TIMER_TYPE_CLOSE) {
new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
list_add_tail(&new_send->list, &cm_node->recv_list);
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
}
if (type == NES_TIMER_TYPE_SEND) {
new_send->seq_num = htonl(tcp_hdr(skb)->seq);
atomic_inc(&new_send->skb->users);
ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
if (ret != NETDEV_TX_OK) {
nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n",
new_send, jiffies);
atomic_dec(&new_send->skb->users);
new_send->timetosend = jiffies;
} else {
cm_packets_sent++;
if (!send_retrans) {
if (close_when_complete)
rem_ref_cm_node(cm_node->cm_core, cm_node);
dev_kfree_skb_any(new_send->skb);
kfree(new_send);
return ret;
}
new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
}
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
list_add_tail(&new_send->list, &cm_node->retrans_list);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
}
if (type == NES_TIMER_TYPE_RECV) {
new_send->seq_num = htonl(tcp_hdr(skb)->seq);
new_send->timetosend = jiffies;
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
list_add_tail(&new_send->list, &cm_node->recv_list);
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
}
cm_core = cm_node->cm_core;
was_timer_set = timer_pending(&cm_core->tcp_timer);
if (!was_timer_set) {
cm_core->tcp_timer.expires = new_send->timetosend;
add_timer(&cm_core->tcp_timer);
}
return ret;
}
/**
* nes_cm_timer_tick
*/
void nes_cm_timer_tick(unsigned long pass)
{
unsigned long flags, qplockflags;
unsigned long nexttimeout = jiffies + NES_LONG_TIME;
struct iw_cm_id *cm_id;
struct nes_cm_node *cm_node;
struct nes_timer_entry *send_entry, *recv_entry;
struct list_head *list_core, *list_core_temp;
struct list_head *list_node, *list_node_temp;
struct nes_cm_core *cm_core = g_cm_core;
struct nes_qp *nesqp;
struct sk_buff *skb;
u32 settimer = 0;
int ret = NETDEV_TX_OK;
int node_done;
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct nes_cm_node, list);
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
recv_entry = container_of(list_core, struct nes_timer_entry, list);
if ((time_after(recv_entry->timetosend, jiffies)) &&
(recv_entry->type == NES_TIMER_TYPE_CLOSE)) {
if (nexttimeout > recv_entry->timetosend || !settimer) {
nexttimeout = recv_entry->timetosend;
settimer = 1;
}
continue;
}
list_del(&recv_entry->list);
cm_id = cm_node->cm_id;
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
nesqp = (struct nes_qp *)recv_entry->skb;
spin_lock_irqsave(&nesqp->lock, qplockflags);
if (nesqp->cm_id) {
nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: "
"****** HIT A NES_TIMER_TYPE_CLOSE"
" with something to do!!! ******\n",
nesqp->hwqp.qp_id, cm_id,
atomic_read(&nesqp->refcount));
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
nesqp->ibqp_state = IB_QPS_ERR;
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_cm_disconn(nesqp);
} else {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:"
" ****** HIT A NES_TIMER_TYPE_CLOSE"
" with nothing to do!!! ******\n",
nesqp->hwqp.qp_id, cm_id,
atomic_read(&nesqp->refcount));
nes_rem_ref(&nesqp->ibqp);
}
if (cm_id)
cm_id->rem_ref(cm_id);
}
kfree(recv_entry);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
}
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
node_done = 0;
list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
if (node_done) {
break;
}
send_entry = container_of(list_core, struct nes_timer_entry, list);
if (time_after(send_entry->timetosend, jiffies)) {
if (cm_node->state != NES_CM_STATE_TSA) {
if ((nexttimeout > send_entry->timetosend) || !settimer) {
nexttimeout = send_entry->timetosend;
settimer = 1;
}
node_done = 1;
continue;
} else {
list_del(&send_entry->list);
skb = send_entry->skb;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
dev_kfree_skb_any(skb);
kfree(send_entry);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
}
}
if (send_entry->type == NES_TIMER_NODE_CLEANUP) {
list_del(&send_entry->list);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
kfree(send_entry);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
}
if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) ||
(cm_node->state == NES_CM_STATE_TSA) ||
(cm_node->state == NES_CM_STATE_CLOSED)) {
skb = send_entry->skb;
list_del(&send_entry->list);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
kfree(send_entry);
dev_kfree_skb_any(skb);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
}
if (!send_entry->retranscount || !send_entry->retrycount) {
cm_packets_dropped++;
skb = send_entry->skb;
list_del(&send_entry->list);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
dev_kfree_skb_any(skb);
kfree(send_entry);
if (cm_node->state == NES_CM_STATE_SYN_RCVD) {
/* this node never even generated an indication up to the cm */
rem_ref_cm_node(cm_core, cm_node);
} else {
cm_node->state = NES_CM_STATE_CLOSED;
create_event(cm_node, NES_CM_EVENT_ABORTED);
}
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
}
/* this seems like the correct place, but leave send entry unprotected */
// spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
atomic_inc(&send_entry->skb->users);
cm_packets_retrans++;
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
" jiffies = %lu, time to send = %lu, retranscount = %u, "
"send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n",
send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount,
send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
if (ret != NETDEV_TX_OK) {
cm_packets_bounced++;
atomic_dec(&send_entry->skb->users);
send_entry->retrycount--;
nexttimeout = jiffies + NES_SHORT_TIME;
settimer = 1;
node_done = 1;
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
} else {
cm_packets_sent++;
}
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
list_del(&send_entry->list);
nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n",
send_entry->retranscount, send_entry->retrycount);
if (send_entry->send_retrans) {
send_entry->retranscount--;
send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT;
if (nexttimeout > send_entry->timetosend || !settimer) {
nexttimeout = send_entry->timetosend;
settimer = 1;
}
list_add(&send_entry->list, &cm_node->retrans_list);
continue;
} else {
int close_when_complete;
skb = send_entry->skb;
close_when_complete = send_entry->close_when_complete;
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
if (close_when_complete) {
BUG_ON(atomic_read(&cm_node->ref_count) == 1);
rem_ref_cm_node(cm_core, cm_node);
}
dev_kfree_skb_any(skb);
kfree(send_entry);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
}
}
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
rem_ref_cm_node(cm_core, cm_node);
spin_lock_irqsave(&cm_core->ht_lock, flags);
if (ret != NETDEV_TX_OK)
break;
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
if (settimer) {
if (!timer_pending(&cm_core->tcp_timer)) {
cm_core->tcp_timer.expires = nexttimeout;
add_timer(&cm_core->tcp_timer);
}
}
}
/**
* send_syn
*/
int send_syn(struct nes_cm_node *cm_node, u32 sendack)
{
int ret;
int flags = SET_SYN;
struct sk_buff *skb;
char optionsbuffer[sizeof(struct option_mss) +
sizeof(struct option_windowscale) +
sizeof(struct option_base) + 1];
int optionssize = 0;
/* Sending MSS option */
union all_known_options *options;
if (!cm_node)
return -EINVAL;
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_mss.optionnum = OPTION_NUMBER_MSS;
options->as_mss.length = sizeof(struct option_mss);
options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
optionssize += sizeof(struct option_mss);
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
options->as_windowscale.length = sizeof(struct option_windowscale);
options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
optionssize += sizeof(struct option_windowscale);
if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)
) {
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_base.optionnum = OPTION_NUMBER_WRITE0;
options->as_base.length = sizeof(struct option_base);
optionssize += sizeof(struct option_base);
/* we need the size to be a multiple of 4 */
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_end = 1;
optionssize += 1;
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_end = 1;
optionssize += 1;
}
options = (union all_known_options *)&optionsbuffer[optionssize];
options->as_end = OPTION_NUMBER_END;
optionssize += 1;
skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
if (sendack)
flags |= SET_ACK;
form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
return ret;
}
/**
* send_reset
*/
int send_reset(struct nes_cm_node *cm_node)
{
int ret;
struct sk_buff *skb = get_free_pkt(cm_node);
int flags = SET_RST | SET_ACK;
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
add_ref_cm_node(cm_node);
form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
return ret;
}
/**
* send_ack
*/
int send_ack(struct nes_cm_node *cm_node)
{
int ret;
struct sk_buff *skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0);
return ret;
}
/**
* send_fin
*/
int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
{
int ret;
/* if we didn't get a frame get one */
if (!skb)
skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
return ret;
}
/**
* get_free_pkt
*/
struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
{
struct sk_buff *skb, *new_skb;
/* check to see if we need to repopulate the free tx pkt queue */
if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) {
while (skb_queue_len(&cm_node->cm_core->tx_free_list) <
cm_node->cm_core->free_tx_pkt_max) {
/* replace the frame we took, we won't get it back */
new_skb = dev_alloc_skb(cm_node->cm_core->mtu);
BUG_ON(!new_skb);
/* add a replacement frame to the free tx list head */
skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb);
}
}
skb = skb_dequeue(&cm_node->cm_core->tx_free_list);
return skb;
}
/**
* make_hashkey - generate hash key from node tuple
*/
static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port,
nes_addr_t rem_addr)
{
u32 hashkey = 0;
hashkey = loc_addr + rem_addr + loc_port + rem_port;
hashkey = (hashkey % NES_CM_HASHTABLE_SIZE);
return hashkey;
}
/**
* find_node - find a cm node that matches the reference cm node
*/
static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
{
unsigned long flags;
u32 hashkey;
struct list_head *list_pos;
struct list_head *hte;
struct nes_cm_node *cm_node;
/* make a hash index key for this packet */
hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr);
/* get a handle on the hte */
hte = &cm_core->connected_nodes;
nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
loc_addr, loc_port, cm_core, hte);
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each(list_pos, hte) {
cm_node = container_of(list_pos, struct nes_cm_node, list);
/* compare quad, return node handle if a match */
nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
cm_node->loc_addr, cm_node->loc_port,
loc_addr, loc_port,
cm_node->rem_addr, cm_node->rem_port,
rem_addr, rem_port);
if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&
(cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
add_ref_cm_node(cm_node);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
return cm_node;
}
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
/* no owner node */
return NULL;
}
/**
* find_listener - find a cm node listening on this addr-port pair
*/
static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
{
unsigned long flags;
struct list_head *listen_list;
struct nes_cm_listener *listen_node;
/* walk list and find cm_node associated with this session ID */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each(listen_list, &cm_core->listen_list.list) {
listen_node = container_of(listen_list, struct nes_cm_listener, list);
/* compare node pair, return node handle if a match */
if (((listen_node->loc_addr == dst_addr) ||
listen_node->loc_addr == 0x00000000) &&
(listen_node->loc_port == dst_port) &&
(listener_state & listen_node->listener_state)) {
atomic_inc(&listen_node->ref_count);
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
return listen_node;
}
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
dst_addr, dst_port);
/* no listener */
return NULL;
}
/**
* add_hte_node - add a cm node to the hash table
*/
static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
{
unsigned long flags;
u32 hashkey;
struct list_head *hte;
if (!cm_node || !cm_core)
return -EINVAL;
nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n");
/* first, make an index into our hash table */
hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr,
cm_node->rem_port, cm_node->rem_addr);
cm_node->hashkey = hashkey;
spin_lock_irqsave(&cm_core->ht_lock, flags);
/* get a handle on the hash table element (list head for this slot) */
hte = &cm_core->connected_nodes;
list_add_tail(&cm_node->list, hte);
atomic_inc(&cm_core->ht_node_cnt);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
return 0;
}
/**
* mini_cm_dec_refcnt_listen
*/
static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
struct nes_cm_listener *listener, int free_hanging_nodes)
{
int ret = 1;
unsigned long flags;
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
if (!atomic_dec_return(&listener->ref_count)) {
list_del(&listener->list);
/* decrement our listen node count */
atomic_dec(&cm_core->listen_node_cnt);
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
if (listener->nesvnic) {
nes_manage_apbvt(listener->nesvnic, listener->loc_port,
PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
}
nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
kfree(listener);
ret = 0;
cm_listens_destroyed++;
} else {
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
if (listener) {
if (atomic_read(&listener->pend_accepts_cnt) > 0)
nes_debug(NES_DBG_CM, "destroying listener (%p)"
" with non-zero pending accepts=%u\n",
listener, atomic_read(&listener->pend_accepts_cnt));
}
return ret;
}
/**
* mini_cm_del_listen
*/
static int mini_cm_del_listen(struct nes_cm_core *cm_core,
struct nes_cm_listener *listener)
{
listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE;
listener->cm_id = NULL; /* going to be destroyed pretty soon */
return mini_cm_dec_refcnt_listen(cm_core, listener, 1);
}
/**
* mini_cm_accelerated
*/
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
u32 was_timer_set;
cm_node->accelerated = 1;
if (cm_node->accept_pend) {
BUG_ON(!cm_node->listener);
atomic_dec(&cm_node->listener->pend_accepts_cnt);
BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
was_timer_set = timer_pending(&cm_core->tcp_timer);
if (!was_timer_set) {
cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
add_timer(&cm_core->tcp_timer);
}
return 0;
}
/**
* nes_addr_send_arp
*/
static void nes_addr_send_arp(u32 dst_ip)
{
struct rtable *rt;
struct flowi fl;
memset(&fl, 0, sizeof fl);
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
if (ip_route_output_key(&init_net, &rt, &fl)) {
printk("%s: ip_route_output_key failed for 0x%08X\n",
__FUNCTION__, dst_ip);
return;
}
neigh_event_send(rt->u.dst.neighbour, NULL);
ip_rt_put(rt);
}
/**
* make_cm_node - create a new instance of a cm node
*/
static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
struct nes_cm_listener *listener)
{
struct nes_cm_node *cm_node;
struct timespec ts;
int arpindex = 0;
struct nes_device *nesdev;
struct nes_adapter *nesadapter;
/* create an hte and cm_node for this instance */
cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
if (!cm_node)
return NULL;
/* set our node specific transport info */
cm_node->loc_addr = cm_info->loc_addr;
cm_node->rem_addr = cm_info->rem_addr;
cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
cm_node->send_write0 = send_first;
nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
cm_node->listener = listener;
cm_node->netdev = nesvnic->netdev;
cm_node->cm_id = cm_info->cm_id;
memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n",
cm_node->listener, cm_node->cm_id);
INIT_LIST_HEAD(&cm_node->retrans_list);
spin_lock_init(&cm_node->retrans_list_lock);
INIT_LIST_HEAD(&cm_node->recv_list);
spin_lock_init(&cm_node->recv_list_lock);
cm_node->loopbackpartner = NULL;
atomic_set(&cm_node->ref_count, 1);
/* associate our parent CM core */
cm_node->cm_core = cm_core;
cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID;
cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
NES_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
sizeof(struct tcphdr) - ETH_HLEN;
cm_node->tcp_cntxt.rcv_nxt = 0;
/* get a unique session ID , add thread_id to an upcounter to handle race */
atomic_inc(&cm_core->node_cnt);
atomic_inc(&cm_core->session_id);
cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
cm_node->conn_type = cm_info->conn_type;
cm_node->apbvt_set = 0;
cm_node->accept_pend = 0;
cm_node->nesvnic = nesvnic;
/* get some device handles, for arp lookup */
nesdev = nesvnic->nesdev;
nesadapter = nesdev->nesadapter;
cm_node->loopbackpartner = NULL;
/* get the mac addr for the remote node */
arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
if (arpindex < 0) {
kfree(cm_node);
nes_addr_send_arp(cm_info->rem_addr);
return NULL;
}
/* copy the mac addr to node context */
memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
" %02x, %02x, %02x, %02x, %02x\n",
cm_node->rem_mac[0], cm_node->rem_mac[1],
cm_node->rem_mac[2], cm_node->rem_mac[3],
cm_node->rem_mac[4], cm_node->rem_mac[5]);
add_hte_node(cm_core, cm_node);
atomic_inc(&cm_nodes_created);
return cm_node;
}
/**
* add_ref_cm_node - destroy an instance of a cm node
*/
static int add_ref_cm_node(struct nes_cm_node *cm_node)
{
atomic_inc(&cm_node->ref_count);
return 0;
}
/**
* rem_ref_cm_node - destroy an instance of a cm node
*/
static int rem_ref_cm_node(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
unsigned long flags, qplockflags;
struct nes_timer_entry *send_entry;
struct nes_timer_entry *recv_entry;
struct iw_cm_id *cm_id;
struct list_head *list_core, *list_node_temp;
struct nes_qp *nesqp;
if (!cm_node)
return -EINVAL;
spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
if (atomic_dec_return(&cm_node->ref_count)) {
spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
return 0;
}
list_del(&cm_node->list);
atomic_dec(&cm_core->ht_node_cnt);
spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
/* if the node is destroyed before connection was accelerated */
if (!cm_node->accelerated && cm_node->accept_pend) {
BUG_ON(!cm_node->listener);
atomic_dec(&cm_node->listener->pend_accepts_cnt);
BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
}
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
send_entry = container_of(list_core, struct nes_timer_entry, list);
list_del(&send_entry->list);
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
dev_kfree_skb_any(send_entry->skb);
kfree(send_entry);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
continue;
}
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
recv_entry = container_of(list_core, struct nes_timer_entry, list);
list_del(&recv_entry->list);
cm_id = cm_node->cm_id;
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
nesqp = (struct nes_qp *)recv_entry->skb;
spin_lock_irqsave(&nesqp->lock, qplockflags);
if (nesqp->cm_id) {
nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
" with something to do!!! ******\n",
nesqp->hwqp.qp_id, cm_id);
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
nesqp->ibqp_state = IB_QPS_ERR;
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_cm_disconn(nesqp);
} else {
spin_unlock_irqrestore(&nesqp->lock, qplockflags);
nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
" with nothing to do!!! ******\n",
nesqp->hwqp.qp_id, cm_id);
nes_rem_ref(&nesqp->ibqp);
}
cm_id->rem_ref(cm_id);
} else if (recv_entry->type == NES_TIMER_TYPE_RECV) {
dev_kfree_skb_any(recv_entry->skb);
}
kfree(recv_entry);
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
}
spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
if (cm_node->listener) {
mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
} else {
if (cm_node->apbvt_set && cm_node->nesvnic) {
nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
}
}
kfree(cm_node);
atomic_dec(&cm_core->node_cnt);
atomic_inc(&cm_nodes_destroyed);
return 0;
}
/**
* process_options
*/
static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet)
{
u32 tmp;
u32 offset = 0;
union all_known_options *all_options;
char got_mss_option = 0;
while (offset < optionsize) {
all_options = (union all_known_options *)(optionsloc + offset);
switch (all_options->as_base.optionnum) {
case OPTION_NUMBER_END:
offset = optionsize;
break;
case OPTION_NUMBER_NONE:
offset += 1;
continue;
case OPTION_NUMBER_MSS:
nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
__FUNCTION__,
all_options->as_mss.length, offset, optionsize);
got_mss_option = 1;
if (all_options->as_mss.length != 4) {
return 1;
} else {
tmp = ntohs(all_options->as_mss.mss);
if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
cm_node->tcp_cntxt.mss = tmp;
}
break;
case OPTION_NUMBER_WINDOW_SCALE:
cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount;
break;
case OPTION_NUMBER_WRITE0:
cm_node->send_write0 = 1;
break;
default:
nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
all_options->as_base.optionnum);
break;
}
offset += all_options->as_base.length;
}
if ((!got_mss_option) && (syn_packet))
cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
return 0;
}
/**
* process_packet
*/
int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
struct nes_cm_core *cm_core)
{
int optionsize;
int datasize;
int ret = 0;
struct tcphdr *tcph = tcp_hdr(skb);
u32 inc_sequence;
if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) {
inc_sequence = ntohl(tcph->seq);
cm_node->tcp_cntxt.rcv_nxt = inc_sequence;
}
if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) {
BUG_ON(!tcph);
atomic_inc(&cm_accel_dropped_pkts);
return -1;
}
if (tcph->rst) {
atomic_inc(&cm_resets_recvd);
nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n",
cm_node, cm_node->state, atomic_read(&cm_node->ref_count));
switch (cm_node->state) {
case NES_CM_STATE_LISTENING:
rem_ref_cm_node(cm_core, cm_node);
break;
case NES_CM_STATE_TSA:
case NES_CM_STATE_CLOSED:
break;
case NES_CM_STATE_SYN_RCVD:
nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
" remote 0x%08X:%04X, node state = %u\n",
cm_node->loc_addr, cm_node->loc_port,
cm_node->rem_addr, cm_node->rem_port,
cm_node->state);
rem_ref_cm_node(cm_core, cm_node);
break;
case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
case NES_CM_STATE_ESTABLISHED:
case NES_CM_STATE_MPAREQ_SENT:
default:
nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
" remote 0x%08X:%04X, node state = %u refcnt=%d\n",
cm_node->loc_addr, cm_node->loc_port,
cm_node->rem_addr, cm_node->rem_port,
cm_node->state, atomic_read(&cm_node->ref_count));
// create event
cm_node->state = NES_CM_STATE_CLOSED;
create_event(cm_node, NES_CM_EVENT_ABORTED);
break;
}
return -1;
}
optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
skb_pull(skb, ip_hdr(skb)->ihl << 2);
skb_pull(skb, tcph->doff << 2);
datasize = skb->len;
inc_sequence = ntohl(tcph->seq);
nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X,"
" rcv_nxt = 0x%08X Flags: %s %s.\n",
datasize, inc_sequence, ntohl(tcph->ack_seq),
cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""),
(tcph->ack ? "ACK":""));
if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt)
) {
nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X,"
" ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n",
datasize, inc_sequence, ntohl(tcph->ack_seq),
cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":""));
if (cm_node->state == NES_CM_STATE_LISTENING) {
rem_ref_cm_node(cm_core, cm_node);
}
return -1;
}
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
if (optionsize) {
u8 *optionsloc = (u8 *)&tcph[1];
if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node);
send_reset(cm_node);
if (cm_node->state != NES_CM_STATE_SYN_SENT)
rem_ref_cm_node(cm_core, cm_node);
return 0;
}
} else if (tcph->syn)
cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
cm_node->tcp_cntxt.snd_wscale;
if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) {
cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
}
if (tcph->ack) {
cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
/* read and stash current sequence number */
if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) {
nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !="
" cm_node->tcp_cntxt.loc_seq_num\n");
send_reset(cm_node);
return 0;
}
if (cm_node->state == NES_CM_STATE_SYN_SENT)
cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED;
else {
cm_node->state = NES_CM_STATE_ESTABLISHED;
}
break;
case NES_CM_STATE_LAST_ACK:
cm_node->state = NES_CM_STATE_CLOSED;
break;
case NES_CM_STATE_FIN_WAIT1:
cm_node->state = NES_CM_STATE_FIN_WAIT2;
break;
case NES_CM_STATE_CLOSING:
cm_node->state = NES_CM_STATE_TIME_WAIT;
/* need to schedule this to happen in 2MSL timeouts */
cm_node->state = NES_CM_STATE_CLOSED;
break;
case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
case NES_CM_STATE_ESTABLISHED:
case NES_CM_STATE_MPAREQ_SENT:
case NES_CM_STATE_CLOSE_WAIT:
case NES_CM_STATE_TIME_WAIT:
case NES_CM_STATE_CLOSED:
break;
case NES_CM_STATE_LISTENING:
nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn);
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
send_reset(cm_node);
/* send_reset bumps refcount, this should have been a new node */
rem_ref_cm_node(cm_core, cm_node);
return -1;
break;
case NES_CM_STATE_TSA:
nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n");
break;
case NES_CM_STATE_UNKNOWN:
case NES_CM_STATE_INITED:
case NES_CM_STATE_ACCEPTING:
case NES_CM_STATE_FIN_WAIT2:
default:
nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n",
cm_node->state);
send_reset(cm_node);
break;
}
}
if (tcph->syn) {
if (cm_node->state == NES_CM_STATE_LISTENING) {
/* do not exceed backlog */
atomic_inc(&cm_node->listener->pend_accepts_cnt);
if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
cm_node->listener->backlog) {
nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n");
cm_backlog_drops++;
atomic_dec(&cm_node->listener->pend_accepts_cnt);
rem_ref_cm_node(cm_core, cm_node);
return 0;
}
cm_node->accept_pend = 1;
}
if (datasize == 0)
cm_node->tcp_cntxt.rcv_nxt ++;
if (cm_node->state == NES_CM_STATE_LISTENING) {
cm_node->state = NES_CM_STATE_SYN_RCVD;
send_syn(cm_node, 1);
}
if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) {
cm_node->state = NES_CM_STATE_ESTABLISHED;
/* send final handshake ACK */
ret = send_ack(cm_node);
if (ret < 0)
return ret;
cm_node->state = NES_CM_STATE_MPAREQ_SENT;
ret = send_mpa_request(cm_node);
if (ret < 0)
return ret;
}
}
if (tcph->fin) {
cm_node->tcp_cntxt.rcv_nxt++;
switch (cm_node->state) {
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
case NES_CM_STATE_ESTABLISHED:
case NES_CM_STATE_ACCEPTING:
case NES_CM_STATE_MPAREQ_SENT:
cm_node->state = NES_CM_STATE_CLOSE_WAIT;
cm_node->state = NES_CM_STATE_LAST_ACK;
ret = send_fin(cm_node, NULL);
break;
case NES_CM_STATE_FIN_WAIT1:
cm_node->state = NES_CM_STATE_CLOSING;
ret = send_ack(cm_node);
break;
case NES_CM_STATE_FIN_WAIT2:
cm_node->state = NES_CM_STATE_TIME_WAIT;
cm_node->tcp_cntxt.loc_seq_num ++;
ret = send_ack(cm_node);
/* need to schedule this to happen in 2MSL timeouts */
cm_node->state = NES_CM_STATE_CLOSED;
break;
case NES_CM_STATE_CLOSE_WAIT:
case NES_CM_STATE_LAST_ACK:
case NES_CM_STATE_CLOSING:
case NES_CM_STATE_TSA:
default:
nes_debug(NES_DBG_CM, "Received a fin while in %x state\n",
cm_node->state);
ret = -EINVAL;
break;
}
}
if (datasize) {
u8 *dataloc = skb->data;
/* figure out what state we are in and handle transition to next state */
switch (cm_node->state) {
case NES_CM_STATE_LISTENING:
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_FIN_WAIT2:
case NES_CM_STATE_CLOSE_WAIT:
case NES_CM_STATE_LAST_ACK:
case NES_CM_STATE_CLOSING:
break;
case NES_CM_STATE_MPAREQ_SENT:
/* recv the mpa res frame, ret=frame len (incl priv data) */
ret = parse_mpa(cm_node, dataloc, datasize);
if (ret < 0)
break;
/* set the req frame payload len in skb */
/* we are done handling this state, set node to a TSA state */
cm_node->state = NES_CM_STATE_TSA;
send_ack(cm_node);
create_event(cm_node, NES_CM_EVENT_CONNECTED);
break;
case NES_CM_STATE_ESTABLISHED:
/* we are expecting an MPA req frame */
ret = parse_mpa(cm_node, dataloc, datasize);
if (ret < 0) {
break;
}
cm_node->state = NES_CM_STATE_TSA;
send_ack(cm_node);
/* we got a valid MPA request, create an event */
create_event(cm_node, NES_CM_EVENT_MPA_REQ);
break;
case NES_CM_STATE_TSA:
handle_exception_pkt(cm_node, skb);
break;
case NES_CM_STATE_UNKNOWN:
case NES_CM_STATE_INITED:
default:
ret = -1;
}
}
return ret;
}
/**
* mini_cm_listen - create a listen node with params
*/
static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
{
struct nes_cm_listener *listener;
unsigned long flags;
nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
cm_info->loc_addr, cm_info->loc_port);
/* cannot have multiple matching listeners */
listener = find_listener(cm_core, htonl(cm_info->loc_addr),
htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
/* find automatically incs ref count ??? */
atomic_dec(&listener->ref_count);
nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n");
return NULL;
}
if (!listener) {
/* create a CM listen node (1/2 node to compare incoming traffic to) */
listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
if (!listener) {
nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
return NULL;
}
memset(listener, 0, sizeof(struct nes_cm_listener));
listener->loc_addr = htonl(cm_info->loc_addr);
listener->loc_port = htons(cm_info->loc_port);
listener->reused_node = 0;
atomic_set(&listener->ref_count, 1);
}
/* pasive case */
/* find already inc'ed the ref count */
else {
listener->reused_node = 1;
}
listener->cm_id = cm_info->cm_id;
atomic_set(&listener->pend_accepts_cnt, 0);
listener->cm_core = cm_core;
listener->nesvnic = nesvnic;
atomic_inc(&cm_core->node_cnt);
atomic_inc(&cm_core->session_id);
listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
listener->conn_type = cm_info->conn_type;
listener->backlog = cm_info->backlog;
listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
if (!listener->reused_node) {
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_add(&listener->list, &cm_core->listen_list.list);
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
atomic_inc(&cm_core->listen_node_cnt);
}
nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x,"
" listener = %p, backlog = %d, cm_id = %p.\n",
cm_info->loc_addr, cm_info->loc_port,
listener, listener->backlog, listener->cm_id);
return listener;
}
/**
* mini_cm_connect - make a connection node with params
*/
struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame,
struct nes_cm_info *cm_info)
{
int ret = 0;
struct nes_cm_node *cm_node;
struct nes_cm_listener *loopbackremotelistener;
struct nes_cm_node *loopbackremotenode;
struct nes_cm_info loopback_cm_info;
u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
ntohs(mpa_frame->priv_data_len);
cm_info->loc_addr = htonl(cm_info->loc_addr);
cm_info->rem_addr = htonl(cm_info->rem_addr);
cm_info->loc_port = htons(cm_info->loc_port);
cm_info->rem_port = htons(cm_info->rem_port);
/* create a CM connection node */
cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
if (!cm_node)
return NULL;
// set our node side to client (active) side
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
if (cm_info->loc_addr == cm_info->rem_addr) {
loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr,
cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE);
if (loopbackremotelistener == NULL) {
create_event(cm_node, NES_CM_EVENT_ABORTED);
} else {
atomic_inc(&cm_loopbacks);
loopback_cm_info = *cm_info;
loopback_cm_info.loc_port = cm_info->rem_port;
loopback_cm_info.rem_port = cm_info->loc_port;
loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info,
loopbackremotelistener);
loopbackremotenode->loopbackpartner = cm_node;
loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
cm_node->loopbackpartner = loopbackremotenode;
memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data,
mpa_frame_size);
loopbackremotenode->mpa_frame_size = mpa_frame_size -
sizeof(struct ietf_mpa_frame);
// we are done handling this state, set node to a TSA state
cm_node->state = NES_CM_STATE_TSA;
cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale;
loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
}
return cm_node;
}
/* set our node side to client (active) side */
cm_node->tcp_cntxt.client = 1;
/* init our MPA frame ptr */
memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size);
cm_node->mpa_frame_size = mpa_frame_size;
/* send a syn and goto syn sent state */
cm_node->state = NES_CM_STATE_SYN_SENT;
ret = send_syn(cm_node, 0);
nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x,"
" cm_node=%p, cm_id = %p.\n",
cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id);
return cm_node;
}
/**
* mini_cm_accept - accept a connection
* This function is never called
*/
int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
struct nes_cm_node *cm_node)
{
return 0;
}
/**
* mini_cm_reject - reject and teardown a connection
*/
int mini_cm_reject(struct nes_cm_core *cm_core,
struct ietf_mpa_frame *mpa_frame,
struct nes_cm_node *cm_node)
{
int ret = 0;
struct sk_buff *skb;
u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
ntohs(mpa_frame->priv_data_len);
skb = get_free_pkt(cm_node);
if (!skb) {
nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
return -1;
}
/* send an MPA Request frame */
form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN);
ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
cm_node->state = NES_CM_STATE_CLOSED;
ret = send_fin(cm_node, NULL);
if (ret < 0) {
printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n");
return ret;
}
return ret;
}
/**
* mini_cm_close
*/
int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
{
int ret = 0;
if (!cm_core || !cm_node)
return -EINVAL;
switch (cm_node->state) {
/* if passed in node is null, create a reference key node for node search */
/* check if we found an owner node for this pkt */
case NES_CM_STATE_SYN_RCVD:
case NES_CM_STATE_SYN_SENT:
case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
case NES_CM_STATE_ESTABLISHED:
case NES_CM_STATE_ACCEPTING:
case NES_CM_STATE_MPAREQ_SENT:
cm_node->state = NES_CM_STATE_FIN_WAIT1;
send_fin(cm_node, NULL);
break;
case NES_CM_STATE_CLOSE_WAIT:
cm_node->state = NES_CM_STATE_LAST_ACK;
send_fin(cm_node, NULL);
break;
case NES_CM_STATE_FIN_WAIT1:
case NES_CM_STATE_FIN_WAIT2:
case NES_CM_STATE_LAST_ACK:
case NES_CM_STATE_TIME_WAIT:
case NES_CM_STATE_CLOSING:
ret = -1;
break;
case NES_CM_STATE_LISTENING:
case NES_CM_STATE_UNKNOWN:
case NES_CM_STATE_INITED:
case NES_CM_STATE_CLOSED:
case NES_CM_STATE_TSA:
ret = rem_ref_cm_node(cm_core, cm_node);
break;
}
cm_node->cm_id = NULL;
return ret;
}
/**
* recv_pkt - recv an ETHERNET packet, and process it through CM
* node state machine
*/
int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
struct sk_buff *skb)
{
struct nes_cm_node *cm_node = NULL;
struct nes_cm_listener *listener = NULL;
struct iphdr *iph;
struct tcphdr *tcph;
struct nes_cm_info nfo;
int ret = 0;
if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
ret = -EINVAL;
goto out;
}
iph = (struct iphdr *)skb->data;
tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*tcph));
skb->len = ntohs(iph->tot_len);
nfo.loc_addr = ntohl(iph->daddr);
nfo.loc_port = ntohs(tcph->dest);
nfo.rem_addr = ntohl(iph->saddr);
nfo.rem_port = ntohs(tcph->source);
nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
iph->daddr, tcph->dest, iph->saddr, tcph->source);
/* note: this call is going to increment cm_node ref count */
cm_node = find_node(cm_core,
nfo.rem_port, nfo.rem_addr,
nfo.loc_port, nfo.loc_addr);
if (!cm_node) {
listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port,
NES_CM_LISTENER_ACTIVE_STATE);
if (listener) {
nfo.cm_id = listener->cm_id;
nfo.conn_type = listener->conn_type;
} else {
nfo.cm_id = NULL;
nfo.conn_type = 0;
}
cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener);
if (!cm_node) {
nes_debug(NES_DBG_CM, "Unable to allocate node\n");
if (listener) {
nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n");
atomic_dec(&listener->ref_count);
}
ret = -1;
goto out;
}
if (!listener) {
nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n",
nfo.loc_port, atomic_read(&cm_node->ref_count));
if (!tcph->rst) {
nes_debug(NES_DBG_CM, "Packet found for unknown port=%d"
" rem_port=%d refcnt=%d\n",
nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count));
cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq);
cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
send_reset(cm_node);
}
rem_ref_cm_node(cm_core, cm_node);
ret = -1;
goto out;
}
add_ref_cm_node(cm_node);
cm_node->state = NES_CM_STATE_LISTENING;
}
nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n",
cm_node, skb->data);
process_packet(cm_node, skb, cm_core);
rem_ref_cm_node(cm_core, cm_node);
out:
if (skb)
dev_kfree_skb_any(skb);
return ret;
}
/**
* nes_cm_alloc_core - allocate a top level instance of a cm core
*/
struct nes_cm_core *nes_cm_alloc_core(void)
{
int i;
struct nes_cm_core *cm_core;
struct sk_buff *skb = NULL;
/* setup the CM core */
/* alloc top level core control structure */
cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL);
if (!cm_core)
return NULL;
INIT_LIST_HEAD(&cm_core->connected_nodes);
init_timer(&cm_core->tcp_timer);
cm_core->tcp_timer.function = nes_cm_timer_tick;
cm_core->mtu = NES_CM_DEFAULT_MTU;
cm_core->state = NES_CM_STATE_INITED;
cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
atomic_set(&cm_core->session_id, 0);
atomic_set(&cm_core->events_posted, 0);
/* init the packet lists */
skb_queue_head_init(&cm_core->tx_free_list);
for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) {
skb = dev_alloc_skb(cm_core->mtu);
if (!skb) {
kfree(cm_core);
return NULL;
}
/* add 'raw' skb to free frame list */
skb_queue_head(&cm_core->tx_free_list, skb);
}
cm_core->api = &nes_cm_api;
spin_lock_init(&cm_core->ht_lock);
spin_lock_init(&cm_core->listen_list_lock);
INIT_LIST_HEAD(&cm_core->listen_list.list);
nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
cm_core->event_wq = create_singlethread_workqueue("nesewq");
cm_core->post_event = nes_cm_post_event;
nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
print_core(cm_core);
return cm_core;
}
/**
* mini_cm_dealloc_core - deallocate a top level instance of a cm core
*/
int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
{
nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
if (!cm_core)
return -EINVAL;
barrier();
if (timer_pending(&cm_core->tcp_timer)) {
del_timer(&cm_core->tcp_timer);
}
destroy_workqueue(cm_core->event_wq);
destroy_workqueue(cm_core->disconn_wq);
nes_debug(NES_DBG_CM, "\n");
kfree(cm_core);
return 0;
}
/**
* mini_cm_get
*/
int mini_cm_get(struct nes_cm_core *cm_core)
{
return cm_core->state;
}
/**
* mini_cm_set
*/
int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
{
int ret = 0;
switch (type) {
case NES_CM_SET_PKT_SIZE:
cm_core->mtu = value;
break;
case NES_CM_SET_FREE_PKT_Q_SIZE:
cm_core->free_tx_pkt_max = value;
break;
default:
/* unknown set option */
ret = -EINVAL;
}
return ret;
}
/**
* nes_cm_init_tsa_conn setup HW; MPA frames must be
* successfully exchanged when this is called
*/
static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node)
{
int ret = 0;
if (!nesqp)
return -EINVAL;
nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 |
NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
NES_QPCONTEXT_MISC_DROS);
if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale)
nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE);
nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT);
nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16);
nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32(
(u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
(cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
(cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
nesqp->nesqp_context->keepalive = cpu_to_le32(0x80);
nesqp->nesqp_context->ts_recent = 0;
nesqp->nesqp_context->ts_age = 0;
nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
cm_node->tcp_cntxt.rcv_wscale);
nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->srtt = 0;
nesqp->nesqp_context->rttvar = cpu_to_le32(0x6);
nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000);
nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss);
nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X,"
" Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
le32_to_cpu(nesqp->nesqp_context->snd_nxt),
cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
le32_to_cpu(nesqp->nesqp_context->misc));
nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd));
nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd));
nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd));
nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n");
cm_node->state = NES_CM_STATE_TSA;
return ret;
}
/**
* nes_cm_disconn
*/
int nes_cm_disconn(struct nes_qp *nesqp)
{
unsigned long flags;
spin_lock_irqsave(&nesqp->lock, flags);
if (nesqp->disconn_pending == 0) {
nesqp->disconn_pending++;
spin_unlock_irqrestore(&nesqp->lock, flags);
/* nes_add_ref(&nesqp->ibqp); */
/* init our disconnect work element, to */
INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
} else {
spin_unlock_irqrestore(&nesqp->lock, flags);
nes_rem_ref(&nesqp->ibqp);
}
return 0;
}
/**
* nes_disconnect_worker
*/
void nes_disconnect_worker(struct work_struct *work)
{
struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
nesqp->last_aeq, nesqp->hwqp.qp_id);
nes_cm_disconn_true(nesqp);
}
/**
* nes_cm_disconn_true
*/
int nes_cm_disconn_true(struct nes_qp *nesqp)
{
unsigned long flags;
int ret = 0;
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
struct nes_vnic *nesvnic;
u16 last_ae;
u8 original_hw_tcp_state;
u8 original_ibqp_state;
u8 issued_disconnect_reset = 0;
if (!nesqp) {
nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
return -1;
}
spin_lock_irqsave(&nesqp->lock, flags);
cm_id = nesqp->cm_id;
/* make sure we havent already closed this connection */
if (!cm_id) {
nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
nesqp->hwqp.qp_id);
spin_unlock_irqrestore(&nesqp->lock, flags);
nes_rem_ref(&nesqp->ibqp);
return -1;
}
nesvnic = to_nesvnic(nesqp->ibqp.device);
nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id);
original_hw_tcp_state = nesqp->hw_tcp_state;
original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq;
nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
if ((nesqp->cm_id) && (cm_id->event_handler)) {
if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
((original_ibqp_state == IB_QPS_RTS) &&
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
atomic_inc(&cm_disconnects);
cm_event.event = IW_CM_EVENT_DISCONNECT;
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
issued_disconnect_reset = 1;
cm_event.status = IW_CM_EVENT_STATUS_RESET;
nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for "
" QP%u, cm_id = %p. \n",
nesqp->hwqp.qp_id, cm_id);
} else {
cm_event.status = IW_CM_EVENT_STATUS_OK;
}
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for "
" QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n",
nesqp->hwqp.qp_id,
nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id,
atomic_read(&nesqp->refcount));
spin_unlock_irqrestore(&nesqp->lock, flags);
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
spin_lock_irqsave(&nesqp->lock, flags);
}
nesqp->disconn_pending = 0;
/* There might have been another AE while the lock was released */
original_hw_tcp_state = nesqp->hw_tcp_state;
original_ibqp_state = nesqp->ibqp_state;
last_ae = nesqp->last_aeq;
if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
(original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
(last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
atomic_inc(&cm_closes);
nesqp->cm_id = NULL;
nesqp->in_disconnect = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
nes_disconnect(nesqp, 1);
cm_id->provider_data = nesqp;
/* Send up the close complete event */
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = IW_CM_EVENT_STATUS_OK;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret) {
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
}
cm_id->rem_ref(cm_id);
spin_lock_irqsave(&nesqp->lock, flags);
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
spin_unlock_irqrestore(&nesqp->lock, flags);
flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1);
} else {
spin_unlock_irqrestore(&nesqp->lock, flags);
}
/* This reference is from either ModifyQP or the AE processing,
there is still a race here with modifyqp */
nes_rem_ref(&nesqp->ibqp);
} else {
cm_id = nesqp->cm_id;
spin_unlock_irqrestore(&nesqp->lock, flags);
/* check to see if the inbound reset beat the outbound reset */
if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset"
" beating the outbound reset.\n",
nesqp->hwqp.qp_id);
nes_rem_ref(&nesqp->ibqp);
}
}
} else {
nesqp->disconn_pending = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
}
nes_rem_ref(&nesqp->ibqp);
return 0;
}
/**
* nes_disconnect
*/
int nes_disconnect(struct nes_qp *nesqp, int abrupt)
{
int ret = 0;
struct nes_vnic *nesvnic;
struct nes_device *nesdev;
nesvnic = to_nesvnic(nesqp->ibqp.device);
if (!nesvnic)
return -EINVAL;
nesdev = nesvnic->nesdev;
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
atomic_read(&nesvnic->netdev->refcnt));
if (nesqp->active_conn) {
/* indicate this connection is NOT active */
nesqp->active_conn = 0;
} else {
/* Need to free the Last Streaming Mode Message */
if (nesqp->ietf_frame) {
pci_free_consistent(nesdev->pcidev,
nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
nesqp->ietf_frame, nesqp->ietf_frame_pbase);
}
}
/* close the CM node down if it is still active */
if (nesqp->cm_node) {
nes_debug(NES_DBG_CM, "Call close API\n");
g_cm_core->api->close(g_cm_core, nesqp->cm_node);
nesqp->cm_node = NULL;
}
return ret;
}
/**
* nes_accept
*/
int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
u64 u64temp;
struct ib_qp *ibqp;
struct nes_qp *nesqp;
struct nes_vnic *nesvnic;
struct nes_device *nesdev;
struct nes_cm_node *cm_node;
struct nes_adapter *adapter;
struct ib_qp_attr attr;
struct iw_cm_event cm_event;
struct nes_hw_qp_wqe *wqe;
struct nes_v4_quad nes_quad;
int ret;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
return -EINVAL;
/* get all our handles */
nesqp = to_nesqp(ibqp);
nesvnic = to_nesvnic(nesqp->ibqp.device);
nesdev = nesvnic->nesdev;
adapter = nesdev->nesadapter;
nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
nesvnic, nesvnic->netdev, nesvnic->netdev->name);
/* since this is from a listen, we were able to put node handle into cm_id */
cm_node = (struct nes_cm_node *)cm_id->provider_data;
/* associate the node with the QP */
nesqp->cm_node = (void *)cm_node;
nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n",
nesqp->hwqp.qp_id, cm_node, jiffies);
atomic_inc(&cm_accepts);
nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
atomic_read(&nesvnic->netdev->refcnt));
/* allocate the ietf frame and space for private data */
nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
&nesqp->ietf_frame_pbase);
if (!nesqp->ietf_frame) {
nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
return -ENOMEM;
}
/* setup the MPA frame */
nesqp->private_data_len = conn_param->private_data_len;
memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
conn_param->private_data_len);
nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len);
nesqp->ietf_frame->rev = mpa_version;
nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
/* setup our first outgoing iWarp send WQE (the IETF frame response) */
wqe = &nesqp->hwqp.sq_vbase[0];
if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) {
u64temp = (unsigned long)nesqp;
u64temp |= NES_SW_CONTEXT_ALIGN>>1;
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
u64temp);
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU);
wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
cpu_to_le32((u32)nesqp->ietf_frame_pbase);
wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU);
} else {
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
}
nesqp->skip_lsmm = 1;
/* Cache the cm_id in the qp */
nesqp->cm_id = cm_id;
cm_node->cm_id = cm_id;
/* nesqp->cm_node = (void *)cm_id->provider_data; */
cm_id->provider_data = nesqp;
nesqp->active_conn = 0;
nes_cm_init_tsa_conn(nesqp, cm_node);
nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
NES_ARP_RESOLVE) << 16);
nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
/* Produce hash key */
nesqp->hte_index = cpu_to_be32(
crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
nesqp->hte_index &= adapter->hte_index_mask;
nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X,"
" rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n",
nesqp->hwqp.qp_id,
ntohl(cm_id->remote_addr.sin_addr.s_addr),
ntohs(cm_id->remote_addr.sin_port),
ntohl(cm_id->local_addr.sin_addr.s_addr),
ntohs(cm_id->local_addr.sin_port),
le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
le32_to_cpu(nesqp->nesqp_context->snd_nxt),
conn_param->private_data_len+sizeof(struct ietf_mpa_frame));
attr.qp_state = IB_QPS_RTS;
nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
/* notify OF layer that accept event was successfull */
cm_id->add_ref(cm_id);
cm_event.event = IW_CM_EVENT_ESTABLISHED;
cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
cm_event.provider_data = (void *)nesqp;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
ret = cm_id->event_handler(cm_id, &cm_event);
if (cm_node->loopbackpartner) {
cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len;
/* copy entire MPA frame to our cm_node's frame */
memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data,
nesqp->private_data_len);
create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
}
if (ret)
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
__FUNCTION__, __LINE__, ret);
return 0;
}
/**
* nes_reject
*/
int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
{
struct nes_cm_node *cm_node;
struct nes_cm_core *cm_core;
atomic_inc(&cm_rejects);
cm_node = (struct nes_cm_node *) cm_id->provider_data;
cm_core = cm_node->cm_core;
cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
cm_node->mpa_frame.rev = mpa_version;
cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
return 0;
}
/**
* nes_connect
* setup and launch cm connect node
*/
int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct ib_qp *ibqp;
struct nes_qp *nesqp;
struct nes_vnic *nesvnic;
struct nes_device *nesdev;
struct nes_cm_node *cm_node;
struct nes_cm_info cm_info;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
return -EINVAL;
nesqp = to_nesqp(ibqp);
if (!nesqp)
return -EINVAL;
nesvnic = to_nesvnic(nesqp->ibqp.device);
if (!nesvnic)
return -EINVAL;
nesdev = nesvnic->nesdev;
if (!nesdev)
return -EINVAL;
atomic_inc(&cm_connects);
nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) +
conn_param->private_data_len, GFP_KERNEL);
if (!nesqp->ietf_frame)
return -ENOMEM;
/* set qp as having an active connection */
nesqp->active_conn = 1;
nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n",
nesqp->hwqp.qp_id,
ntohl(cm_id->remote_addr.sin_addr.s_addr),
ntohs(cm_id->remote_addr.sin_port),
ntohl(cm_id->local_addr.sin_addr.s_addr),
ntohs(cm_id->local_addr.sin_port));
/* cache the cm_id in the qp */
nesqp->cm_id = cm_id;
cm_id->provider_data = nesqp;
/* copy the private data */
if (conn_param->private_data_len) {
memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
conn_param->private_data_len);
}
nesqp->private_data_len = conn_param->private_data_len;
nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len);
strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ);
nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
nesqp->ietf_frame->rev = IETF_MPA_VERSION;
nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len);
if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
/* set up the connection params for the node */
cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr);
cm_info.loc_port = (cm_id->local_addr.sin_port);
cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr);
cm_info.rem_port = (cm_id->remote_addr.sin_port);
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
cm_id->add_ref(cm_id);
nes_add_ref(&nesqp->ibqp);
/* create a connect CM node connection */
cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info);
if (!cm_node) {
if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
nes_rem_ref(&nesqp->ibqp);
kfree(nesqp->ietf_frame);
nesqp->ietf_frame = NULL;
cm_id->rem_ref(cm_id);
return -ENOMEM;
}
cm_node->apbvt_set = 1;
nesqp->cm_node = cm_node;
return 0;
}
/**
* nes_create_listen
*/
int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
{
struct nes_vnic *nesvnic;
struct nes_cm_listener *cm_node;
struct nes_cm_info cm_info;
struct nes_adapter *adapter;
int err;
nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
cm_id, ntohs(cm_id->local_addr.sin_port));
nesvnic = to_nesvnic(cm_id->device);
if (!nesvnic)
return -EINVAL;
adapter = nesvnic->nesdev->nesadapter;
nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
nesvnic, nesvnic->netdev, nesvnic->netdev->name);
nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr);
/* setup listen params in our api call struct */
cm_info.loc_addr = nesvnic->local_ipaddr;
cm_info.loc_port = cm_id->local_addr.sin_port;
cm_info.backlog = backlog;
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
if (!cm_node) {
printk("%s[%u] Error returned from listen API call\n",
__FUNCTION__, __LINE__);
return -ENOMEM;
}
cm_id->provider_data = cm_node;
if (!cm_node->reused_node) {
err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
if (err) {
printk("nes_manage_apbvt call returned %d.\n", err);
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
return err;
}
cm_listens_created++;
}
cm_id->add_ref(cm_id);
cm_id->provider_data = (void *)cm_node;
return 0;
}
/**
* nes_destroy_listen
*/
int nes_destroy_listen(struct iw_cm_id *cm_id)
{
if (cm_id->provider_data)
g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data);
else
nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n");
cm_id->rem_ref(cm_id);
return 0;
}
/**
* nes_cm_recv
*/
int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice)
{
cm_packets_received++;
if ((g_cm_core) && (g_cm_core->api)) {
g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb);
} else {
nes_debug(NES_DBG_CM, "Unable to process packet for CM,"
" cm is not setup properly.\n");
}
return 0;
}
/**
* nes_cm_start
* Start and init a cm core module
*/
int nes_cm_start(void)
{
nes_debug(NES_DBG_CM, "\n");
/* create the primary CM core, pass this handle to subsequent core inits */
g_cm_core = nes_cm_alloc_core();
if (g_cm_core) {
return 0;
} else {
return -ENOMEM;
}
}
/**
* nes_cm_stop
* stop and dealloc all cm core instances
*/
int nes_cm_stop(void)
{
g_cm_core->api->destroy_cm_core(g_cm_core);
return 0;
}
/**
* cm_event_connected
* handle a connected event, setup QPs and HW
*/
void cm_event_connected(struct nes_cm_event *event)
{
u64 u64temp;
struct nes_qp *nesqp;
struct nes_vnic *nesvnic;
struct nes_device *nesdev;
struct nes_cm_node *cm_node;
struct nes_adapter *nesadapter;
struct ib_qp_attr attr;
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
struct nes_hw_qp_wqe *wqe;
struct nes_v4_quad nes_quad;
int ret;
/* get all our handles */
cm_node = event->cm_node;
cm_id = cm_node->cm_id;
nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id);
nesqp = (struct nes_qp *)cm_id->provider_data;
nesvnic = to_nesvnic(nesqp->ibqp.device);
nesdev = nesvnic->nesdev;
nesadapter = nesdev->nesadapter;
if (nesqp->destroyed) {
return;
}
atomic_inc(&cm_connecteds);
nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
" local port 0x%04X. jiffies = %lu.\n",
nesqp->hwqp.qp_id,
ntohl(cm_id->remote_addr.sin_addr.s_addr),
ntohs(cm_id->remote_addr.sin_port),
ntohs(cm_id->local_addr.sin_port),
jiffies);
nes_cm_init_tsa_conn(nesqp, cm_node);
/* set the QP tsa context */
nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0),
NULL, NES_ARP_RESOLVE) << 16);
nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
nesqp->nesqp_context->ird_ord_sizes |=
cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
/* Adjust tail for not having a LSMM */
nesqp->hwqp.sq_tail = 1;
#if defined(NES_SEND_FIRST_WRITE)
if (cm_node->send_write0) {
nes_debug(NES_DBG_CM, "Sending first write.\n");
wqe = &nesqp->hwqp.sq_vbase[0];
u64temp = (unsigned long)nesqp;
u64temp |= NES_SW_CONTEXT_ALIGN>>1;
set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
u64temp);
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
/* use the reserved spot on the WQ for the extra first WQE */
nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
nesqp->skip_lsmm = 1;
nesqp->hwqp.sq_tail = 0;
nes_write32(nesdev->regs + NES_WQE_ALLOC,
(1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
}
#endif
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
/* Produce hash key */
nesqp->hte_index = cpu_to_be32(
crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
nesqp->hte_index &= nesadapter->hte_index_mask;
nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
nesqp->ietf_frame = &cm_node->mpa_frame;
nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
/* modify QP state to rts */
attr.qp_state = IB_QPS_RTS;
nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
/* notify OF layer we successfully created the requested connection */
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr.sin_family = AF_INET;
cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
if (ret)
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
__FUNCTION__, __LINE__, ret);
nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
nesqp->hwqp.qp_id, jiffies );
nes_rem_ref(&nesqp->ibqp);
return;
}
/**
* cm_event_connect_error
*/
void cm_event_connect_error(struct nes_cm_event *event)
{
struct nes_qp *nesqp;
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
/* struct nes_cm_info cm_info; */
int ret;
if (!event->cm_node)
return;
cm_id = event->cm_node->cm_id;
if (!cm_id) {
return;
}
nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id);
nesqp = cm_id->provider_data;
if (!nesqp) {
return;
}
/* notify OF layer about this connection error event */
/* cm_id->rem_ref(cm_id); */
nesqp->cm_id = NULL;
cm_id->provider_data = NULL;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n",
cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
if (ret)
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
__FUNCTION__, __LINE__, ret);
nes_rem_ref(&nesqp->ibqp);
cm_id->rem_ref(cm_id);
return;
}
/**
* cm_event_reset
*/
void cm_event_reset(struct nes_cm_event *event)
{
struct nes_qp *nesqp;
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
/* struct nes_cm_info cm_info; */
int ret;
if (!event->cm_node)
return;
if (!event->cm_node->cm_id)
return;
cm_id = event->cm_node->cm_id;
nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
nesqp = cm_id->provider_data;
nesqp->cm_id = NULL;
/* cm_id->provider_data = NULL; */
cm_event.event = IW_CM_EVENT_DISCONNECT;
cm_event.status = IW_CM_EVENT_STATUS_RESET;
cm_event.provider_data = cm_id->provider_data;
cm_event.local_addr = cm_id->local_addr;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
/* notify OF layer about this connection error event */
cm_id->rem_ref(cm_id);
return;
}
/**
* cm_event_mpa_req
*/
void cm_event_mpa_req(struct nes_cm_event *event)
{
struct iw_cm_id *cm_id;
struct iw_cm_event cm_event;
int ret;
struct nes_cm_node *cm_node;
cm_node = event->cm_node;
if (!cm_node)
return;
cm_id = cm_node->cm_id;
atomic_inc(&cm_connect_reqs);
nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
cm_node, cm_id, jiffies);
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.status = IW_CM_EVENT_STATUS_OK;
cm_event.provider_data = (void *)cm_node;
cm_event.local_addr.sin_family = AF_INET;
cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
cm_event.remote_addr.sin_family = AF_INET;
cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
cm_event.private_data = cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
__FUNCTION__, __LINE__, ret);
return;
}
static void nes_cm_event_handler(struct work_struct *);
/**
* nes_cm_post_event
* post an event to the cm event handler
*/
int nes_cm_post_event(struct nes_cm_event *event)
{
atomic_inc(&event->cm_node->cm_core->events_posted);
add_ref_cm_node(event->cm_node);
event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
INIT_WORK(&event->event_work, nes_cm_event_handler);
nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event);
queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
nes_debug(NES_DBG_CM, "Exit\n");
return 0;
}
/**
* nes_cm_event_handler
* worker function to handle cm events
* will free instance of nes_cm_event
*/
static void nes_cm_event_handler(struct work_struct *work)
{
struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work);
struct nes_cm_core *cm_core;
if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) {
return;
}
cm_core = event->cm_node->cm_core;
nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
event, event->type, atomic_read(&cm_core->events_posted));
switch (event->type) {
case NES_CM_EVENT_MPA_REQ:
cm_event_mpa_req(event);
nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n");
break;
case NES_CM_EVENT_RESET:
nes_debug(NES_DBG_CM, "CM Event: RESET\n");
cm_event_reset(event);
break;
case NES_CM_EVENT_CONNECTED:
if ((!event->cm_node->cm_id) ||
(event->cm_node->state != NES_CM_STATE_TSA)) {
break;
}
cm_event_connected(event);
nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
break;
case NES_CM_EVENT_ABORTED:
if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) {
break;
}
cm_event_connect_error(event);
nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
break;
case NES_CM_EVENT_DROPPED_PKT:
nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
break;
default:
nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
break;
}
atomic_dec(&cm_core->events_posted);
event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
rem_ref_cm_node(cm_core, event->cm_node);
kfree(event);
return;
}
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef NES_CM_H
#define NES_CM_H
#define QUEUE_EVENTS
#define NES_MANAGE_APBVT_DEL 0
#define NES_MANAGE_APBVT_ADD 1
/* IETF MPA -- defines, enums, structs */
#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
#define IETF_MPA_KEY_SIZE 16
#define IETF_MPA_VERSION 1
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
};
struct ietf_mpa_frame {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
u8 rev;
__be16 priv_data_len;
u8 priv_data[0];
};
#define ietf_mpa_req_resp_frame ietf_mpa_frame
struct nes_v4_quad {
u32 rsvd0;
__le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */
__be32 SrcIpadr;
__be16 TcpPorts[2]; /* src is low, dest is high */
};
struct nes_cm_node;
enum nes_timer_type {
NES_TIMER_TYPE_SEND,
NES_TIMER_TYPE_RECV,
NES_TIMER_NODE_CLEANUP,
NES_TIMER_TYPE_CLOSE,
};
#define MAX_NES_IFS 4
#define SET_ACK 1
#define SET_SYN 2
#define SET_FIN 4
#define SET_RST 8
struct option_base {
u8 optionnum;
u8 length;
};
enum option_numbers {
OPTION_NUMBER_END,
OPTION_NUMBER_NONE,
OPTION_NUMBER_MSS,
OPTION_NUMBER_WINDOW_SCALE,
OPTION_NUMBER_SACK_PERM,
OPTION_NUMBER_SACK,
OPTION_NUMBER_WRITE0 = 0xbc
};
struct option_mss {
u8 optionnum;
u8 length;
__be16 mss;
};
struct option_windowscale {
u8 optionnum;
u8 length;
u8 shiftcount;
};
union all_known_options {
char as_end;
struct option_base as_base;
struct option_mss as_mss;
struct option_windowscale as_windowscale;
};
struct nes_timer_entry {
struct list_head list;
unsigned long timetosend; /* jiffies */
struct sk_buff *skb;
u32 type;
u32 retrycount;
u32 retranscount;
u32 context;
u32 seq_num;
u32 send_retrans;
int close_when_complete;
struct net_device *netdev;
};
#define NES_DEFAULT_RETRYS 64
#define NES_DEFAULT_RETRANS 8
#ifdef CONFIG_INFINIBAND_NES_DEBUG
#define NES_RETRY_TIMEOUT (1000*HZ/1000)
#else
#define NES_RETRY_TIMEOUT (3000*HZ/1000)
#endif
#define NES_SHORT_TIME (10)
#define NES_LONG_TIME (2000*HZ/1000)
#define NES_CM_HASHTABLE_SIZE 1024
#define NES_CM_TCP_TIMER_INTERVAL 3000
#define NES_CM_DEFAULT_MTU 1540
#define NES_CM_DEFAULT_FRAME_CNT 10
#define NES_CM_THREAD_STACK_SIZE 256
#define NES_CM_DEFAULT_RCV_WND 64240 // before we know that window scaling is allowed
#define NES_CM_DEFAULT_RCV_WND_SCALED 256960 // after we know that window scaling is allowed
#define NES_CM_DEFAULT_RCV_WND_SCALE 2
#define NES_CM_DEFAULT_FREE_PKTS 0x000A
#define NES_CM_FREE_PKT_LO_WATERMARK 2
#define NES_CM_DEFAULT_MSS 536
#define NES_CM_DEF_SEQ 0x159bf75f
#define NES_CM_DEF_LOCAL_ID 0x3b47
#define NES_CM_DEF_SEQ2 0x18ed5740
#define NES_CM_DEF_LOCAL_ID2 0xb807
typedef u32 nes_addr_t;
#define nes_cm_tsa_context nes_qp_context
struct nes_qp;
/* cm node transition states */
enum nes_cm_node_state {
NES_CM_STATE_UNKNOWN,
NES_CM_STATE_INITED,
NES_CM_STATE_LISTENING,
NES_CM_STATE_SYN_RCVD,
NES_CM_STATE_SYN_SENT,
NES_CM_STATE_ONE_SIDE_ESTABLISHED,
NES_CM_STATE_ESTABLISHED,
NES_CM_STATE_ACCEPTING,
NES_CM_STATE_MPAREQ_SENT,
NES_CM_STATE_TSA,
NES_CM_STATE_FIN_WAIT1,
NES_CM_STATE_FIN_WAIT2,
NES_CM_STATE_CLOSE_WAIT,
NES_CM_STATE_TIME_WAIT,
NES_CM_STATE_LAST_ACK,
NES_CM_STATE_CLOSING,
NES_CM_STATE_CLOSED
};
/* type of nes connection */
enum nes_cm_conn_type {
NES_CM_IWARP_CONN_TYPE,
};
/* CM context params */
struct nes_cm_tcp_context {
u8 client;
u32 loc_seq_num;
u32 loc_ack_num;
u32 rem_ack_num;
u32 rcv_nxt;
u32 loc_id;
u32 rem_id;
u32 snd_wnd;
u32 max_snd_wnd;
u32 rcv_wnd;
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
struct nes_cm_tsa_context tsa_cntxt;
struct timeval sent_ts;
};
enum nes_cm_listener_state {
NES_CM_LISTENER_PASSIVE_STATE=1,
NES_CM_LISTENER_ACTIVE_STATE=2,
NES_CM_LISTENER_EITHER_STATE=3
};
struct nes_cm_listener {
struct list_head list;
u64 session_id;
struct nes_cm_core *cm_core;
u8 loc_mac[ETH_ALEN];
nes_addr_t loc_addr;
u16 loc_port;
struct iw_cm_id *cm_id;
enum nes_cm_conn_type conn_type;
atomic_t ref_count;
struct nes_vnic *nesvnic;
atomic_t pend_accepts_cnt;
int backlog;
enum nes_cm_listener_state listener_state;
u32 reused_node;
};
/* per connection node and node state information */
struct nes_cm_node {
u64 session_id;
u32 hashkey;
nes_addr_t loc_addr, rem_addr;
u16 loc_port, rem_port;
u8 loc_mac[ETH_ALEN];
u8 rem_mac[ETH_ALEN];
enum nes_cm_node_state state;
struct nes_cm_tcp_context tcp_cntxt;
struct nes_cm_core *cm_core;
struct sk_buff_head resend_list;
atomic_t ref_count;
struct net_device *netdev;
struct nes_cm_node *loopbackpartner;
struct list_head retrans_list;
spinlock_t retrans_list_lock;
struct list_head recv_list;
spinlock_t recv_list_lock;
int send_write0;
union {
struct ietf_mpa_frame mpa_frame;
u8 mpa_frame_buf[NES_CM_DEFAULT_MTU];
};
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
struct list_head list;
int accelerated;
struct nes_cm_listener *listener;
enum nes_cm_conn_type conn_type;
struct nes_vnic *nesvnic;
int apbvt_set;
int accept_pend;
};
/* structure for client or CM to fill when making CM api calls. */
/* - only need to set relevant data, based on op. */
struct nes_cm_info {
union {
struct iw_cm_id *cm_id;
struct net_device *netdev;
};
u16 loc_port;
u16 rem_port;
nes_addr_t loc_addr;
nes_addr_t rem_addr;
enum nes_cm_conn_type conn_type;
int backlog;
};
/* CM event codes */
enum nes_cm_event_type {
NES_CM_EVENT_UNKNOWN,
NES_CM_EVENT_ESTABLISHED,
NES_CM_EVENT_MPA_REQ,
NES_CM_EVENT_MPA_CONNECT,
NES_CM_EVENT_MPA_ACCEPT,
NES_CM_EVENT_MPA_ESTABLISHED,
NES_CM_EVENT_CONNECTED,
NES_CM_EVENT_CLOSED,
NES_CM_EVENT_RESET,
NES_CM_EVENT_DROPPED_PKT,
NES_CM_EVENT_CLOSE_IMMED,
NES_CM_EVENT_CLOSE_HARD,
NES_CM_EVENT_CLOSE_CLEAN,
NES_CM_EVENT_ABORTED,
NES_CM_EVENT_SEND_FIRST
};
/* event to post to CM event handler */
struct nes_cm_event {
enum nes_cm_event_type type;
struct nes_cm_info cm_info;
struct work_struct event_work;
struct nes_cm_node *cm_node;
};
struct nes_cm_core {
enum nes_cm_node_state state;
atomic_t session_id;
atomic_t listen_node_cnt;
struct nes_cm_node listen_list;
spinlock_t listen_list_lock;
u32 mtu;
u32 free_tx_pkt_max;
u32 rx_pkt_posted;
struct sk_buff_head tx_free_list;
atomic_t ht_node_cnt;
struct list_head connected_nodes;
/* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */
spinlock_t ht_lock;
struct timer_list tcp_timer;
struct nes_cm_ops *api;
int (*post_event)(struct nes_cm_event *event);
atomic_t events_posted;
struct workqueue_struct *event_wq;
struct workqueue_struct *disconn_wq;
atomic_t node_cnt;
u64 aborted_connects;
u32 options;
struct nes_cm_node *current_listen_node;
};
#define NES_CM_SET_PKT_SIZE (1 << 1)
#define NES_CM_SET_FREE_PKT_Q_SIZE (1 << 2)
/* CM ops/API for client interface */
struct nes_cm_ops {
int (*accelerated)(struct nes_cm_core *, struct nes_cm_node *);
struct nes_cm_listener * (*listen)(struct nes_cm_core *, struct nes_vnic *,
struct nes_cm_info *);
int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
struct nes_cm_node * (*connect)(struct nes_cm_core *,
struct nes_vnic *, struct ietf_mpa_frame *,
struct nes_cm_info *);
int (*close)(struct nes_cm_core *, struct nes_cm_node *);
int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
struct nes_cm_node *);
int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
struct nes_cm_node *);
int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
struct sk_buff *);
int (*destroy_cm_core)(struct nes_cm_core *);
int (*get)(struct nes_cm_core *);
int (*set)(struct nes_cm_core *, u32, u32);
};
int send_mpa_request(struct nes_cm_node *);
struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
void *, u32, void *, u32, u8);
int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
enum nes_timer_type, int, int);
void nes_cm_timer_tick(unsigned long);
int send_syn(struct nes_cm_node *, u32);
int send_reset(struct nes_cm_node *);
int send_ack(struct nes_cm_node *);
int send_fin(struct nes_cm_node *, struct sk_buff *);
struct sk_buff *get_free_pkt(struct nes_cm_node *);
int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
int mini_cm_dealloc_core(struct nes_cm_core *);
int mini_cm_get(struct nes_cm_core *);
int mini_cm_set(struct nes_cm_core *, u32, u32);
int nes_cm_disconn(struct nes_qp *);
void nes_disconnect_worker(struct work_struct *);
int nes_cm_disconn_true(struct nes_qp *);
int nes_disconnect(struct nes_qp *, int);
int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
int nes_reject(struct iw_cm_id *, const void *, u8);
int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
int nes_create_listen(struct iw_cm_id *, int);
int nes_destroy_listen(struct iw_cm_id *);
int nes_cm_recv(struct sk_buff *, struct net_device *);
int nes_cm_start(void);
int nes_cm_stop(void);
/* CM event handler functions */
void cm_event_connected(struct nes_cm_event *);
void cm_event_connect_error(struct nes_cm_event *);
void cm_event_reset(struct nes_cm_event *);
void cm_event_mpa_req(struct nes_cm_event *);
int nes_cm_post_event(struct nes_cm_event *);
#endif /* NES_CM_H */
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NES_CONTEXT_H
#define NES_CONTEXT_H
struct nes_qp_context {
__le32 misc;
__le32 cqs;
__le32 sq_addr_low;
__le32 sq_addr_high;
__le32 rq_addr_low;
__le32 rq_addr_high;
__le32 misc2;
__le16 tcpPorts[2];
__le32 ip0;
__le32 ip1;
__le32 ip2;
__le32 ip3;
__le32 mss;
__le32 arp_index_vlan;
__le32 tcp_state_flow_label;
__le32 pd_index_wscale;
__le32 keepalive;
u32 ts_recent;
u32 ts_age;
__le32 snd_nxt;
__le32 snd_wnd;
__le32 rcv_nxt;
__le32 rcv_wnd;
__le32 snd_max;
__le32 snd_una;
u32 srtt;
__le32 rttvar;
__le32 ssthresh;
__le32 cwnd;
__le32 snd_wl1;
__le32 snd_wl2;
__le32 max_snd_wnd;
__le32 ts_val_delta;
u32 retransmit;
u32 probe_cnt;
u32 hte_index;
__le32 q2_addr_low;
__le32 q2_addr_high;
__le32 ird_index;
u32 Rsvd3;
__le32 ird_ord_sizes;
u32 mrkr_offset;
__le32 aeq_token_low;
__le32 aeq_token_high;
};
/* QP Context Misc Field */
#define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003
#define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0
#define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0
#define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6
#define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300
#define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8
#define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00
#define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10
#define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000
#define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12
#define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000
#define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16
enum nes_qp_context_misc_bits {
NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004,
NES_QPCONTEXT_MISC_IPV4 = 0x00000008,
NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010,
NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020,
NES_QPCONTEXT_MISC_DROS = 0x00008000,
NES_QPCONTEXT_MISC_WSCALE = 0x00080000,
NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000,
NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000,
NES_QPCONTEXT_MISC_SACK = 0x00400000,
NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000,
NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000,
NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000,
NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000,
NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000,
NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000
};
enum nes_qp_acc_wq_sizes {
HCONTEXT_TSA_WQ_SIZE_4 = 0,
HCONTEXT_TSA_WQ_SIZE_32 = 1,
HCONTEXT_TSA_WQ_SIZE_128 = 2,
HCONTEXT_TSA_WQ_SIZE_512 = 3
};
/* QP Context Misc2 Fields */
#define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff
#define NES_QPCONTEXT_MISC2_TTL_SHIFT 0
#define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff
#define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0
#define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300
#define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8
#define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00
#define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10
#define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000
#define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16
#define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000
#define NES_QPCONTEXT_MISC2_TOS_SHIFT 24
#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000
#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24
/* QP Context Tcp State/Flow Label Fields */
#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff
#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0
#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000
#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28
enum nes_qp_tcp_state {
NES_QPCONTEXT_TCPSTATE_CLOSED = 1,
NES_QPCONTEXT_TCPSTATE_EST = 5,
NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11,
};
/* QP Context PD Index/wscale Fields */
#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f
#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0
#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00
#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8
#define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000
#define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16
/* QP Context Keepalive Fields */
#define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff
#define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0
#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000
#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16
#define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000
#define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24
/* QP Context ORD/IRD Fields */
#define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f
#define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0
#define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000
#define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16
#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000
#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28
enum nes_ord_ird_bits {
NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000,
NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000,
NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000,
NES_QPCONTEXT_ORDIRD_AAH = 0x40000000,
NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000
};
enum nes_iwarp_qp_state {
NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0,
NES_QPCONTEXT_IWARP_STATE_IDLE = 1,
NES_QPCONTEXT_IWARP_STATE_RTS = 2,
NES_QPCONTEXT_IWARP_STATE_CLOSING = 3,
NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5,
NES_QPCONTEXT_IWARP_STATE_ERROR = 6
};
#endif /* NES_CONTEXT_H */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __NES_HW_H
#define __NES_HW_H
#define NES_PHY_TYPE_1G 2
#define NES_PHY_TYPE_IRIS 3
#define NES_PHY_TYPE_PUMA_10G 6
#define NES_MULTICAST_PF_MAX 8
enum pci_regs {
NES_INT_STAT = 0x0000,
NES_INT_MASK = 0x0004,
NES_INT_PENDING = 0x0008,
NES_INTF_INT_STAT = 0x000C,
NES_INTF_INT_MASK = 0x0010,
NES_TIMER_STAT = 0x0014,
NES_PERIODIC_CONTROL = 0x0018,
NES_ONE_SHOT_CONTROL = 0x001C,
NES_EEPROM_COMMAND = 0x0020,
NES_EEPROM_DATA = 0x0024,
NES_FLASH_COMMAND = 0x0028,
NES_FLASH_DATA = 0x002C,
NES_SOFTWARE_RESET = 0x0030,
NES_CQ_ACK = 0x0034,
NES_WQE_ALLOC = 0x0040,
NES_CQE_ALLOC = 0x0044,
};
enum indexed_regs {
NES_IDX_CREATE_CQP_LOW = 0x0000,
NES_IDX_CREATE_CQP_HIGH = 0x0004,
NES_IDX_QP_CONTROL = 0x0040,
NES_IDX_FLM_CONTROL = 0x0080,
NES_IDX_INT_CPU_STATUS = 0x00a0,
NES_IDX_GPIO_CONTROL = 0x00f0,
NES_IDX_GPIO_DATA = 0x00f4,
NES_IDX_TCP_CONFIG0 = 0x01e4,
NES_IDX_TCP_TIMER_CONFIG = 0x01ec,
NES_IDX_TCP_NOW = 0x01f0,
NES_IDX_QP_MAX_CFG_SIZES = 0x0200,
NES_IDX_QP_CTX_SIZE = 0x0218,
NES_IDX_TCP_TIMER_SIZE0 = 0x0238,
NES_IDX_TCP_TIMER_SIZE1 = 0x0240,
NES_IDX_ARP_CACHE_SIZE = 0x0258,
NES_IDX_CQ_CTX_SIZE = 0x0260,
NES_IDX_MRT_SIZE = 0x0278,
NES_IDX_PBL_REGION_SIZE = 0x0280,
NES_IDX_IRRQ_COUNT = 0x02b0,
NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x02f0,
NES_IDX_RX_WINDOW_BUFFER_SIZE = 0x0300,
NES_IDX_DST_IP_ADDR = 0x0400,
NES_IDX_PCIX_DIAG = 0x08e8,
NES_IDX_MPP_DEBUG = 0x0a00,
NES_IDX_PORT_RX_DISCARDS = 0x0a30,
NES_IDX_PORT_TX_DISCARDS = 0x0a34,
NES_IDX_MPP_LB_DEBUG = 0x0b00,
NES_IDX_DENALI_CTL_22 = 0x1058,
NES_IDX_MAC_TX_CONTROL = 0x2000,
NES_IDX_MAC_TX_CONFIG = 0x2004,
NES_IDX_MAC_TX_PAUSE_QUANTA = 0x2008,
NES_IDX_MAC_RX_CONTROL = 0x200c,
NES_IDX_MAC_RX_CONFIG = 0x2010,
NES_IDX_MAC_EXACT_MATCH_BOTTOM = 0x201c,
NES_IDX_MAC_MDIO_CONTROL = 0x2084,
NES_IDX_MAC_TX_OCTETS_LOW = 0x2100,
NES_IDX_MAC_TX_OCTETS_HIGH = 0x2104,
NES_IDX_MAC_TX_FRAMES_LOW = 0x2108,
NES_IDX_MAC_TX_FRAMES_HIGH = 0x210c,
NES_IDX_MAC_TX_PAUSE_FRAMES = 0x2118,
NES_IDX_MAC_TX_ERRORS = 0x2138,
NES_IDX_MAC_RX_OCTETS_LOW = 0x213c,
NES_IDX_MAC_RX_OCTETS_HIGH = 0x2140,
NES_IDX_MAC_RX_FRAMES_LOW = 0x2144,
NES_IDX_MAC_RX_FRAMES_HIGH = 0x2148,
NES_IDX_MAC_RX_BC_FRAMES_LOW = 0x214c,
NES_IDX_MAC_RX_MC_FRAMES_HIGH = 0x2150,
NES_IDX_MAC_RX_PAUSE_FRAMES = 0x2154,
NES_IDX_MAC_RX_SHORT_FRAMES = 0x2174,
NES_IDX_MAC_RX_OVERSIZED_FRAMES = 0x2178,
NES_IDX_MAC_RX_JABBER_FRAMES = 0x217c,
NES_IDX_MAC_RX_CRC_ERR_FRAMES = 0x2180,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES = 0x2184,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES = 0x2188,
NES_IDX_MAC_INT_STATUS = 0x21f0,
NES_IDX_MAC_INT_MASK = 0x21f4,
NES_IDX_PHY_PCS_CONTROL_STATUS0 = 0x2800,
NES_IDX_PHY_PCS_CONTROL_STATUS1 = 0x2a00,
NES_IDX_ETH_SERDES_COMMON_CONTROL0 = 0x2808,
NES_IDX_ETH_SERDES_COMMON_CONTROL1 = 0x2a08,
NES_IDX_ETH_SERDES_COMMON_STATUS0 = 0x280c,
NES_IDX_ETH_SERDES_COMMON_STATUS1 = 0x2a0c,
NES_IDX_ETH_SERDES_TX_EMP0 = 0x2810,
NES_IDX_ETH_SERDES_TX_EMP1 = 0x2a10,
NES_IDX_ETH_SERDES_TX_DRIVE0 = 0x2814,
NES_IDX_ETH_SERDES_TX_DRIVE1 = 0x2a14,
NES_IDX_ETH_SERDES_RX_MODE0 = 0x2818,
NES_IDX_ETH_SERDES_RX_MODE1 = 0x2a18,
NES_IDX_ETH_SERDES_RX_SIGDET0 = 0x281c,
NES_IDX_ETH_SERDES_RX_SIGDET1 = 0x2a1c,
NES_IDX_ETH_SERDES_BYPASS0 = 0x2820,
NES_IDX_ETH_SERDES_BYPASS1 = 0x2a20,
NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0 = 0x2824,
NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1 = 0x2a24,
NES_IDX_ETH_SERDES_RX_EQ_CONTROL0 = 0x2828,
NES_IDX_ETH_SERDES_RX_EQ_CONTROL1 = 0x2a28,
NES_IDX_ETH_SERDES_RX_EQ_STATUS0 = 0x282c,
NES_IDX_ETH_SERDES_RX_EQ_STATUS1 = 0x2a2c,
NES_IDX_ETH_SERDES_CDR_RESET0 = 0x2830,
NES_IDX_ETH_SERDES_CDR_RESET1 = 0x2a30,
NES_IDX_ETH_SERDES_CDR_CONTROL0 = 0x2834,
NES_IDX_ETH_SERDES_CDR_CONTROL1 = 0x2a34,
NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0 = 0x2838,
NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1 = 0x2a38,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD = 0x3080,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO = 0x3000,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI = 0x3004,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO = 0x3008,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI = 0x300c,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO = 0x7000,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
NES_IDX_CM_CONFIG = 0x5100,
NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
NES_IDX_NIC_ACTIVE = 0x6010,
NES_IDX_NIC_UNICAST_ALL = 0x6018,
NES_IDX_NIC_MULTICAST_ALL = 0x6020,
NES_IDX_NIC_MULTICAST_ENABLE = 0x6028,
NES_IDX_NIC_BROADCAST_ON = 0x6030,
NES_IDX_USED_CHUNKS_TX = 0x60b0,
NES_IDX_TX_POOL_SIZE = 0x60b8,
NES_IDX_QUAD_HASH_TABLE_SIZE = 0x6148,
NES_IDX_PERFECT_FILTER_LOW = 0x6200,
NES_IDX_PERFECT_FILTER_HIGH = 0x6204,
NES_IDX_IPV4_TCP_REXMITS = 0x7080,
NES_IDX_DEBUG_ERROR_CONTROL_STATUS = 0x913c,
NES_IDX_DEBUG_ERROR_MASKS0 = 0x9140,
NES_IDX_DEBUG_ERROR_MASKS1 = 0x9144,
NES_IDX_DEBUG_ERROR_MASKS2 = 0x9148,
NES_IDX_DEBUG_ERROR_MASKS3 = 0x914c,
NES_IDX_DEBUG_ERROR_MASKS4 = 0x9150,
NES_IDX_DEBUG_ERROR_MASKS5 = 0x9154,
};
#define NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE 1
#define NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE (1 << 17)
enum nes_cqp_opcodes {
NES_CQP_CREATE_QP = 0x00,
NES_CQP_MODIFY_QP = 0x01,
NES_CQP_DESTROY_QP = 0x02,
NES_CQP_CREATE_CQ = 0x03,
NES_CQP_MODIFY_CQ = 0x04,
NES_CQP_DESTROY_CQ = 0x05,
NES_CQP_ALLOCATE_STAG = 0x09,
NES_CQP_REGISTER_STAG = 0x0a,
NES_CQP_QUERY_STAG = 0x0b,
NES_CQP_REGISTER_SHARED_STAG = 0x0c,
NES_CQP_DEALLOCATE_STAG = 0x0d,
NES_CQP_MANAGE_ARP_CACHE = 0x0f,
NES_CQP_SUSPEND_QPS = 0x11,
NES_CQP_UPLOAD_CONTEXT = 0x13,
NES_CQP_CREATE_CEQ = 0x16,
NES_CQP_DESTROY_CEQ = 0x18,
NES_CQP_CREATE_AEQ = 0x19,
NES_CQP_DESTROY_AEQ = 0x1b,
NES_CQP_LMI_ACCESS = 0x20,
NES_CQP_FLUSH_WQES = 0x22,
NES_CQP_MANAGE_APBVT = 0x23
};
enum nes_cqp_wqe_word_idx {
NES_CQP_WQE_OPCODE_IDX = 0,
NES_CQP_WQE_ID_IDX = 1,
NES_CQP_WQE_COMP_CTX_LOW_IDX = 2,
NES_CQP_WQE_COMP_CTX_HIGH_IDX = 3,
NES_CQP_WQE_COMP_SCRATCH_LOW_IDX = 4,
NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5,
};
enum nes_cqp_cq_wqeword_idx {
NES_CQP_CQ_WQE_PBL_LOW_IDX = 6,
NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7,
NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX = 8,
NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX = 9,
NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX = 10,
};
enum nes_cqp_stag_wqeword_idx {
NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX = 1,
NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX = 6,
NES_CQP_STAG_WQE_LEN_LOW_IDX = 7,
NES_CQP_STAG_WQE_STAG_IDX = 8,
NES_CQP_STAG_WQE_VA_LOW_IDX = 10,
NES_CQP_STAG_WQE_VA_HIGH_IDX = 11,
NES_CQP_STAG_WQE_PA_LOW_IDX = 12,
NES_CQP_STAG_WQE_PA_HIGH_IDX = 13,
NES_CQP_STAG_WQE_PBL_LEN_IDX = 14
};
#define NES_CQP_OP_IWARP_STATE_SHIFT 28
enum nes_cqp_qp_bits {
NES_CQP_QP_ARP_VALID = (1<<8),
NES_CQP_QP_WINBUF_VALID = (1<<9),
NES_CQP_QP_CONTEXT_VALID = (1<<10),
NES_CQP_QP_ORD_VALID = (1<<11),
NES_CQP_QP_WINBUF_DATAIND_EN = (1<<12),
NES_CQP_QP_VIRT_WQS = (1<<13),
NES_CQP_QP_DEL_HTE = (1<<14),
NES_CQP_QP_CQS_VALID = (1<<15),
NES_CQP_QP_TYPE_TSA = 0,
NES_CQP_QP_TYPE_IWARP = (1<<16),
NES_CQP_QP_TYPE_CQP = (4<<16),
NES_CQP_QP_TYPE_NIC = (5<<16),
NES_CQP_QP_MSS_CHG = (1<<20),
NES_CQP_QP_STATIC_RESOURCES = (1<<21),
NES_CQP_QP_IGNORE_MW_BOUND = (1<<22),
NES_CQP_QP_VWQ_USE_LMI = (1<<23),
NES_CQP_QP_IWARP_STATE_IDLE = (1<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_RTS = (2<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_CLOSING = (3<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
NES_CQP_QP_RESET = (1<<31),
};
enum nes_cqp_qp_wqe_word_idx {
NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
};
enum nes_nic_ctx_bits {
NES_NIC_CTX_RQ_SIZE_32 = (3<<8),
NES_NIC_CTX_RQ_SIZE_512 = (3<<8),
NES_NIC_CTX_SQ_SIZE_32 = (1<<10),
NES_NIC_CTX_SQ_SIZE_512 = (3<<10),
};
enum nes_nic_qp_ctx_word_idx {
NES_NIC_CTX_MISC_IDX = 0,
NES_NIC_CTX_SQ_LOW_IDX = 2,
NES_NIC_CTX_SQ_HIGH_IDX = 3,
NES_NIC_CTX_RQ_LOW_IDX = 4,
NES_NIC_CTX_RQ_HIGH_IDX = 5,
};
enum nes_cqp_cq_bits {
NES_CQP_CQ_CEQE_MASK = (1<<9),
NES_CQP_CQ_CEQ_VALID = (1<<10),
NES_CQP_CQ_RESIZE = (1<<11),
NES_CQP_CQ_CHK_OVERFLOW = (1<<12),
NES_CQP_CQ_4KB_CHUNK = (1<<14),
NES_CQP_CQ_VIRT = (1<<15),
};
enum nes_cqp_stag_bits {
NES_CQP_STAG_VA_TO = (1<<9),
NES_CQP_STAG_DEALLOC_PBLS = (1<<10),
NES_CQP_STAG_PBL_BLK_SIZE = (1<<11),
NES_CQP_STAG_MR = (1<<13),
NES_CQP_STAG_RIGHTS_LOCAL_READ = (1<<16),
NES_CQP_STAG_RIGHTS_LOCAL_WRITE = (1<<17),
NES_CQP_STAG_RIGHTS_REMOTE_READ = (1<<18),
NES_CQP_STAG_RIGHTS_REMOTE_WRITE = (1<<19),
NES_CQP_STAG_RIGHTS_WINDOW_BIND = (1<<20),
NES_CQP_STAG_REM_ACC_EN = (1<<21),
NES_CQP_STAG_LEAVE_PENDING = (1<<31),
};
enum nes_cqp_ceq_wqeword_idx {
NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX = 1,
NES_CQP_CEQ_WQE_PBL_LOW_IDX = 6,
NES_CQP_CEQ_WQE_PBL_HIGH_IDX = 7,
};
enum nes_cqp_ceq_bits {
NES_CQP_CEQ_4KB_CHUNK = (1<<14),
NES_CQP_CEQ_VIRT = (1<<15),
};
enum nes_cqp_aeq_wqeword_idx {
NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX = 1,
NES_CQP_AEQ_WQE_PBL_LOW_IDX = 6,
NES_CQP_AEQ_WQE_PBL_HIGH_IDX = 7,
};
enum nes_cqp_aeq_bits {
NES_CQP_AEQ_4KB_CHUNK = (1<<14),
NES_CQP_AEQ_VIRT = (1<<15),
};
enum nes_cqp_lmi_wqeword_idx {
NES_CQP_LMI_WQE_LMI_OFFSET_IDX = 1,
NES_CQP_LMI_WQE_FRAG_LOW_IDX = 8,
NES_CQP_LMI_WQE_FRAG_HIGH_IDX = 9,
NES_CQP_LMI_WQE_FRAG_LEN_IDX = 10,
};
enum nes_cqp_arp_wqeword_idx {
NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX = 6,
NES_CQP_ARP_WQE_MAC_HIGH_IDX = 7,
NES_CQP_ARP_WQE_REACHABILITY_MAX_IDX = 1,
};
enum nes_cqp_upload_wqeword_idx {
NES_CQP_UPLOAD_WQE_CTXT_LOW_IDX = 6,
NES_CQP_UPLOAD_WQE_CTXT_HIGH_IDX = 7,
NES_CQP_UPLOAD_WQE_HTE_IDX = 8,
};
enum nes_cqp_arp_bits {
NES_CQP_ARP_VALID = (1<<8),
NES_CQP_ARP_PERM = (1<<9),
};
enum nes_cqp_flush_bits {
NES_CQP_FLUSH_SQ = (1<<30),
NES_CQP_FLUSH_RQ = (1<<31),
};
enum nes_cqe_opcode_bits {
NES_CQE_STAG_VALID = (1<<6),
NES_CQE_ERROR = (1<<7),
NES_CQE_SQ = (1<<8),
NES_CQE_SE = (1<<9),
NES_CQE_PSH = (1<<29),
NES_CQE_FIN = (1<<30),
NES_CQE_VALID = (1<<31),
};
enum nes_cqe_word_idx {
NES_CQE_PAYLOAD_LENGTH_IDX = 0,
NES_CQE_COMP_COMP_CTX_LOW_IDX = 2,
NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3,
NES_CQE_INV_STAG_IDX = 4,
NES_CQE_QP_ID_IDX = 5,
NES_CQE_ERROR_CODE_IDX = 6,
NES_CQE_OPCODE_IDX = 7,
};
enum nes_ceqe_word_idx {
NES_CEQE_CQ_CTX_LOW_IDX = 0,
NES_CEQE_CQ_CTX_HIGH_IDX = 1,
};
enum nes_ceqe_status_bit {
NES_CEQE_VALID = (1<<31),
};
enum nes_int_bits {
NES_INT_CEQ0 = (1<<0),
NES_INT_CEQ1 = (1<<1),
NES_INT_CEQ2 = (1<<2),
NES_INT_CEQ3 = (1<<3),
NES_INT_CEQ4 = (1<<4),
NES_INT_CEQ5 = (1<<5),
NES_INT_CEQ6 = (1<<6),
NES_INT_CEQ7 = (1<<7),
NES_INT_CEQ8 = (1<<8),
NES_INT_CEQ9 = (1<<9),
NES_INT_CEQ10 = (1<<10),
NES_INT_CEQ11 = (1<<11),
NES_INT_CEQ12 = (1<<12),
NES_INT_CEQ13 = (1<<13),
NES_INT_CEQ14 = (1<<14),
NES_INT_CEQ15 = (1<<15),
NES_INT_AEQ0 = (1<<16),
NES_INT_AEQ1 = (1<<17),
NES_INT_AEQ2 = (1<<18),
NES_INT_AEQ3 = (1<<19),
NES_INT_AEQ4 = (1<<20),
NES_INT_AEQ5 = (1<<21),
NES_INT_AEQ6 = (1<<22),
NES_INT_AEQ7 = (1<<23),
NES_INT_MAC0 = (1<<24),
NES_INT_MAC1 = (1<<25),
NES_INT_MAC2 = (1<<26),
NES_INT_MAC3 = (1<<27),
NES_INT_TSW = (1<<28),
NES_INT_TIMER = (1<<29),
NES_INT_INTF = (1<<30),
};
enum nes_intf_int_bits {
NES_INTF_INT_PCIERR = (1<<0),
NES_INTF_PERIODIC_TIMER = (1<<2),
NES_INTF_ONE_SHOT_TIMER = (1<<3),
NES_INTF_INT_CRITERR = (1<<14),
NES_INTF_INT_AEQ0_OFLOW = (1<<16),
NES_INTF_INT_AEQ1_OFLOW = (1<<17),
NES_INTF_INT_AEQ2_OFLOW = (1<<18),
NES_INTF_INT_AEQ3_OFLOW = (1<<19),
NES_INTF_INT_AEQ4_OFLOW = (1<<20),
NES_INTF_INT_AEQ5_OFLOW = (1<<21),
NES_INTF_INT_AEQ6_OFLOW = (1<<22),
NES_INTF_INT_AEQ7_OFLOW = (1<<23),
NES_INTF_INT_AEQ_OFLOW = (0xff<<16),
};
enum nes_mac_int_bits {
NES_MAC_INT_LINK_STAT_CHG = (1<<1),
NES_MAC_INT_XGMII_EXT = (1<<2),
NES_MAC_INT_TX_UNDERFLOW = (1<<6),
NES_MAC_INT_TX_ERROR = (1<<7),
};
enum nes_cqe_allocate_bits {
NES_CQE_ALLOC_INC_SELECT = (1<<28),
NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29),
NES_CQE_ALLOC_NOTIFY_SE = (1<<30),
NES_CQE_ALLOC_RESET = (1<<31),
};
enum nes_nic_rq_wqe_word_idx {
NES_NIC_RQ_WQE_LENGTH_1_0_IDX = 0,
NES_NIC_RQ_WQE_LENGTH_3_2_IDX = 1,
NES_NIC_RQ_WQE_FRAG0_LOW_IDX = 2,
NES_NIC_RQ_WQE_FRAG0_HIGH_IDX = 3,
NES_NIC_RQ_WQE_FRAG1_LOW_IDX = 4,
NES_NIC_RQ_WQE_FRAG1_HIGH_IDX = 5,
NES_NIC_RQ_WQE_FRAG2_LOW_IDX = 6,
NES_NIC_RQ_WQE_FRAG2_HIGH_IDX = 7,
NES_NIC_RQ_WQE_FRAG3_LOW_IDX = 8,
NES_NIC_RQ_WQE_FRAG3_HIGH_IDX = 9,
};
enum nes_nic_sq_wqe_word_idx {
NES_NIC_SQ_WQE_MISC_IDX = 0,
NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX = 1,
NES_NIC_SQ_WQE_LSO_INFO_IDX = 2,
NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX = 3,
NES_NIC_SQ_WQE_LENGTH_2_1_IDX = 4,
NES_NIC_SQ_WQE_LENGTH_4_3_IDX = 5,
NES_NIC_SQ_WQE_FRAG0_LOW_IDX = 6,
NES_NIC_SQ_WQE_FRAG0_HIGH_IDX = 7,
NES_NIC_SQ_WQE_FRAG1_LOW_IDX = 8,
NES_NIC_SQ_WQE_FRAG1_HIGH_IDX = 9,
NES_NIC_SQ_WQE_FRAG2_LOW_IDX = 10,
NES_NIC_SQ_WQE_FRAG2_HIGH_IDX = 11,
NES_NIC_SQ_WQE_FRAG3_LOW_IDX = 12,
NES_NIC_SQ_WQE_FRAG3_HIGH_IDX = 13,
NES_NIC_SQ_WQE_FRAG4_LOW_IDX = 14,
NES_NIC_SQ_WQE_FRAG4_HIGH_IDX = 15,
};
enum nes_iwarp_sq_wqe_word_idx {
NES_IWARP_SQ_WQE_MISC_IDX = 0,
NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1,
NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2,
NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3,
NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7,
NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8,
NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9,
NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10,
NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11,
NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12,
NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16,
NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17,
NES_IWARP_SQ_WQE_LENGTH0_IDX = 18,
NES_IWARP_SQ_WQE_STAG0_IDX = 19,
NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20,
NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21,
NES_IWARP_SQ_WQE_LENGTH1_IDX = 22,
NES_IWARP_SQ_WQE_STAG1_IDX = 23,
NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24,
NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25,
NES_IWARP_SQ_WQE_LENGTH2_IDX = 26,
NES_IWARP_SQ_WQE_STAG2_IDX = 27,
NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28,
NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29,
NES_IWARP_SQ_WQE_LENGTH3_IDX = 30,
NES_IWARP_SQ_WQE_STAG3_IDX = 31,
};
enum nes_iwarp_sq_bind_wqe_word_idx {
NES_IWARP_SQ_BIND_WQE_MR_IDX = 6,
NES_IWARP_SQ_BIND_WQE_MW_IDX = 7,
NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX = 8,
NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX = 9,
NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX = 10,
NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX = 11,
};
enum nes_iwarp_sq_fmr_wqe_word_idx {
NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX = 7,
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX = 8,
NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX = 9,
NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX = 10,
NES_IWARP_SQ_FMR_WQE_VA_FBO_HIGH_IDX = 11,
NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX = 12,
NES_IWARP_SQ_FMR_WQE_PBL_ADDR_HIGH_IDX = 13,
NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
};
enum nes_iwarp_sq_locinv_wqe_word_idx {
NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
};
enum nes_iwarp_rq_wqe_word_idx {
NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3,
NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8,
NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9,
NES_IWARP_RQ_WQE_LENGTH0_IDX = 10,
NES_IWARP_RQ_WQE_STAG0_IDX = 11,
NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12,
NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13,
NES_IWARP_RQ_WQE_LENGTH1_IDX = 14,
NES_IWARP_RQ_WQE_STAG1_IDX = 15,
NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16,
NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17,
NES_IWARP_RQ_WQE_LENGTH2_IDX = 18,
NES_IWARP_RQ_WQE_STAG2_IDX = 19,
NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20,
NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21,
NES_IWARP_RQ_WQE_LENGTH3_IDX = 22,
NES_IWARP_RQ_WQE_STAG3_IDX = 23,
};
enum nes_nic_sq_wqe_bits {
NES_NIC_SQ_WQE_PHDR_CS_READY = (1<<21),
NES_NIC_SQ_WQE_LSO_ENABLE = (1<<22),
NES_NIC_SQ_WQE_TAGVALUE_ENABLE = (1<<23),
NES_NIC_SQ_WQE_DISABLE_CHKSUM = (1<<30),
NES_NIC_SQ_WQE_COMPLETION = (1<<31),
};
enum nes_nic_cqe_word_idx {
NES_NIC_CQE_ACCQP_ID_IDX = 0,
NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
NES_NIC_CQE_MISC_IDX = 3,
};
#define NES_PKT_TYPE_APBVT_BITS 0xC112
#define NES_PKT_TYPE_APBVT_MASK 0xff3e
#define NES_PKT_TYPE_PVALID_BITS 0x10000000
#define NES_PKT_TYPE_PVALID_MASK 0x30000000
#define NES_PKT_TYPE_TCPV4_BITS 0x0110
#define NES_PKT_TYPE_TCPV4_MASK 0x3f30
#define NES_PKT_TYPE_UDPV4_BITS 0x0210
#define NES_PKT_TYPE_UDPV4_MASK 0x3f30
#define NES_PKT_TYPE_IPV4_BITS 0x0010
#define NES_PKT_TYPE_IPV4_MASK 0x3f30
#define NES_PKT_TYPE_OTHER_BITS 0x0000
#define NES_PKT_TYPE_OTHER_MASK 0x0030
#define NES_NIC_CQE_ERRV_SHIFT 16
enum nes_nic_ev_bits {
NES_NIC_ERRV_BITS_MODE = (1<<0),
NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1),
NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2),
NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3),
NES_NIC_ERRV_BITS_IPH_ERR = (1<<4),
};
enum nes_nic_cqe_bits {
NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT),
NES_NIC_CQE_SQ = (1<<24),
NES_NIC_CQE_ACCQP_PORT = (1<<28),
NES_NIC_CQE_ACCQP_VALID = (1<<29),
NES_NIC_CQE_TAG_VALID = (1<<30),
NES_NIC_CQE_VALID = (1<<31),
};
enum nes_aeqe_word_idx {
NES_AEQE_COMP_CTXT_LOW_IDX = 0,
NES_AEQE_COMP_CTXT_HIGH_IDX = 1,
NES_AEQE_COMP_QP_CQ_ID_IDX = 2,
NES_AEQE_MISC_IDX = 3,
};
enum nes_aeqe_bits {
NES_AEQE_QP = (1<<16),
NES_AEQE_CQ = (1<<17),
NES_AEQE_SQ = (1<<18),
NES_AEQE_INBOUND_RDMA = (1<<19),
NES_AEQE_IWARP_STATE_MASK = (7<<20),
NES_AEQE_TCP_STATE_MASK = (0xf<<24),
NES_AEQE_VALID = (1<<31),
};
#define NES_AEQE_IWARP_STATE_SHIFT 20
#define NES_AEQE_TCP_STATE_SHIFT 24
enum nes_aeqe_iwarp_state {
NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
NES_AEQE_IWARP_STATE_IDLE = 1,
NES_AEQE_IWARP_STATE_RTS = 2,
NES_AEQE_IWARP_STATE_CLOSING = 3,
NES_AEQE_IWARP_STATE_TERMINATE = 5,
NES_AEQE_IWARP_STATE_ERROR = 6
};
enum nes_aeqe_tcp_state {
NES_AEQE_TCP_STATE_NON_EXISTANT = 0,
NES_AEQE_TCP_STATE_CLOSED = 1,
NES_AEQE_TCP_STATE_LISTEN = 2,
NES_AEQE_TCP_STATE_SYN_SENT = 3,
NES_AEQE_TCP_STATE_SYN_RCVD = 4,
NES_AEQE_TCP_STATE_ESTABLISHED = 5,
NES_AEQE_TCP_STATE_CLOSE_WAIT = 6,
NES_AEQE_TCP_STATE_FIN_WAIT_1 = 7,
NES_AEQE_TCP_STATE_CLOSING = 8,
NES_AEQE_TCP_STATE_LAST_ACK = 9,
NES_AEQE_TCP_STATE_FIN_WAIT_2 = 10,
NES_AEQE_TCP_STATE_TIME_WAIT = 11
};
enum nes_aeqe_aeid {
NES_AEQE_AEID_AMP_UNALLOCATED_STAG = 0x0102,
NES_AEQE_AEID_AMP_INVALID_STAG = 0x0103,
NES_AEQE_AEID_AMP_BAD_QP = 0x0104,
NES_AEQE_AEID_AMP_BAD_PD = 0x0105,
NES_AEQE_AEID_AMP_BAD_STAG_KEY = 0x0106,
NES_AEQE_AEID_AMP_BAD_STAG_INDEX = 0x0107,
NES_AEQE_AEID_AMP_BOUNDS_VIOLATION = 0x0108,
NES_AEQE_AEID_AMP_RIGHTS_VIOLATION = 0x0109,
NES_AEQE_AEID_AMP_TO_WRAP = 0x010a,
NES_AEQE_AEID_AMP_FASTREG_SHARED = 0x010b,
NES_AEQE_AEID_AMP_FASTREG_VALID_STAG = 0x010c,
NES_AEQE_AEID_AMP_FASTREG_MW_STAG = 0x010d,
NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS = 0x010e,
NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW = 0x010f,
NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH = 0x0110,
NES_AEQE_AEID_AMP_INVALIDATE_SHARED = 0x0111,
NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS = 0x0112,
NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS = 0x0113,
NES_AEQE_AEID_AMP_MWBIND_VALID_STAG = 0x0114,
NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG = 0x0115,
NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG = 0x0116,
NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG = 0x0117,
NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS = 0x0118,
NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS = 0x0119,
NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT = 0x011a,
NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED = 0x011b,
NES_AEQE_AEID_BAD_CLOSE = 0x0201,
NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE = 0x0202,
NES_AEQE_AEID_CQ_OPERATION_ERROR = 0x0203,
NES_AEQE_AEID_PRIV_OPERATION_DENIED = 0x0204,
NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO = 0x0205,
NES_AEQE_AEID_STAG_ZERO_INVALID = 0x0206,
NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN = 0x0301,
NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID = 0x0302,
NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER = 0x0303,
NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION = 0x0304,
NES_AEQE_AEID_DDP_UBE_INVALID_MO = 0x0305,
NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE = 0x0306,
NES_AEQE_AEID_DDP_UBE_INVALID_QN = 0x0307,
NES_AEQE_AEID_DDP_NO_L_BIT = 0x0308,
NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION = 0x0311,
NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE = 0x0312,
NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST = 0x0313,
NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP = 0x0314,
NES_AEQE_AEID_INVALID_ARP_ENTRY = 0x0401,
NES_AEQE_AEID_INVALID_TCP_OPTION_RCVD = 0x0402,
NES_AEQE_AEID_STALE_ARP_ENTRY = 0x0403,
NES_AEQE_AEID_LLP_CLOSE_COMPLETE = 0x0501,
NES_AEQE_AEID_LLP_CONNECTION_RESET = 0x0502,
NES_AEQE_AEID_LLP_FIN_RECEIVED = 0x0503,
NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH = 0x0504,
NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR = 0x0505,
NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE = 0x0506,
NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL = 0x0507,
NES_AEQE_AEID_LLP_SYN_RECEIVED = 0x0508,
NES_AEQE_AEID_LLP_TERMINATE_RECEIVED = 0x0509,
NES_AEQE_AEID_LLP_TOO_MANY_RETRIES = 0x050a,
NES_AEQE_AEID_LLP_TOO_MANY_KEEPALIVE_RETRIES = 0x050b,
NES_AEQE_AEID_RESET_SENT = 0x0601,
NES_AEQE_AEID_TERMINATE_SENT = 0x0602,
NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC = 0x0700
};
enum nes_iwarp_sq_opcodes {
NES_IWARP_SQ_WQE_WRPDU = (1<<15),
NES_IWARP_SQ_WQE_PSH = (1<<21),
NES_IWARP_SQ_WQE_STREAMING = (1<<23),
NES_IWARP_SQ_WQE_IMM_DATA = (1<<28),
NES_IWARP_SQ_WQE_READ_FENCE = (1<<29),
NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30),
NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31),
};
enum nes_iwarp_sq_wqe_bits {
NES_IWARP_SQ_OP_RDMAW = 0,
NES_IWARP_SQ_OP_RDMAR = 1,
NES_IWARP_SQ_OP_SEND = 3,
NES_IWARP_SQ_OP_SENDINV = 4,
NES_IWARP_SQ_OP_SENDSE = 5,
NES_IWARP_SQ_OP_SENDSEINV = 6,
NES_IWARP_SQ_OP_BIND = 8,
NES_IWARP_SQ_OP_FAST_REG = 9,
NES_IWARP_SQ_OP_LOCINV = 10,
NES_IWARP_SQ_OP_RDMAR_LOCINV = 11,
NES_IWARP_SQ_OP_NOP = 12,
};
#define NES_EEPROM_READ_REQUEST (1<<16)
#define NES_MAC_ADDR_VALID (1<<20)
/*
* NES index registers init values.
*/
struct nes_init_values {
u32 index;
u32 data;
u8 wrt;
};
/*
* NES registers in BAR0.
*/
struct nes_pci_regs {
u32 int_status;
u32 int_mask;
u32 int_pending;
u32 intf_int_status;
u32 intf_int_mask;
u32 other_regs[59]; /* pad out to 256 bytes for now */
};
#define NES_CQP_SQ_SIZE 128
#define NES_CCQ_SIZE 128
#define NES_NIC_WQ_SIZE 512
#define NES_NIC_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_512) | (NES_NIC_CTX_SQ_SIZE_512))
#define NES_NIC_BACK_STORE 0x00038000
struct nes_device;
struct nes_hw_nic_qp_context {
__le32 context_words[6];
};
struct nes_hw_nic_sq_wqe {
__le32 wqe_words[16];
};
struct nes_hw_nic_rq_wqe {
__le32 wqe_words[16];
};
struct nes_hw_nic_cqe {
__le32 cqe_words[4];
};
struct nes_hw_cqp_qp_context {
__le32 context_words[4];
};
struct nes_hw_cqp_wqe {
__le32 wqe_words[16];
};
struct nes_hw_qp_wqe {
__le32 wqe_words[32];
};
struct nes_hw_cqe {
__le32 cqe_words[8];
};
struct nes_hw_ceqe {
__le32 ceqe_words[2];
};
struct nes_hw_aeqe {
__le32 aeqe_words[4];
};
struct nes_cqp_request {
union {
u64 cqp_callback_context;
void *cqp_callback_pointer;
};
wait_queue_head_t waitq;
struct nes_hw_cqp_wqe cqp_wqe;
struct list_head list;
atomic_t refcount;
void (*cqp_callback)(struct nes_device *nesdev, struct nes_cqp_request *cqp_request);
u16 major_code;
u16 minor_code;
u8 waiting;
u8 request_done;
u8 dynamic;
u8 callback;
};
struct nes_hw_cqp {
struct nes_hw_cqp_wqe *sq_vbase;
dma_addr_t sq_pbase;
spinlock_t lock;
wait_queue_head_t waitq;
u16 qp_id;
u16 sq_head;
u16 sq_tail;
u16 sq_size;
};
#define NES_FIRST_FRAG_SIZE 128
struct nes_first_frag {
u8 buffer[NES_FIRST_FRAG_SIZE];
};
struct nes_hw_nic {
struct nes_first_frag *first_frag_vbase; /* virtual address of first frags */
struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */
struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
struct sk_buff *tx_skb[NES_NIC_WQ_SIZE];
struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
dma_addr_t frag_paddr[NES_NIC_WQ_SIZE];
unsigned long first_frag_overflow[BITS_TO_LONGS(NES_NIC_WQ_SIZE)];
dma_addr_t sq_pbase; /* PCI memory for host rings */
dma_addr_t rq_pbase; /* PCI memory for host rings */
u16 qp_id;
u16 sq_head;
u16 sq_tail;
u16 sq_size;
u16 rq_head;
u16 rq_tail;
u16 rq_size;
u8 replenishing_rq;
u8 reserved;
spinlock_t sq_lock;
spinlock_t rq_lock;
};
struct nes_hw_nic_cq {
struct nes_hw_nic_cqe volatile *cq_vbase; /* PCI memory for host rings */
void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
dma_addr_t cq_pbase; /* PCI memory for host rings */
int rx_cqes_completed;
int cqe_allocs_pending;
int rx_pkts_indicated;
u16 cq_head;
u16 cq_size;
u16 cq_number;
u8 cqes_pending;
};
struct nes_hw_qp {
struct nes_hw_qp_wqe *sq_vbase; /* PCI memory for host rings */
struct nes_hw_qp_wqe *rq_vbase; /* PCI memory for host rings */
void *q2_vbase; /* PCI memory for host rings */
dma_addr_t sq_pbase; /* PCI memory for host rings */
dma_addr_t rq_pbase; /* PCI memory for host rings */
dma_addr_t q2_pbase; /* PCI memory for host rings */
u32 qp_id;
u16 sq_head;
u16 sq_tail;
u16 sq_size;
u16 rq_head;
u16 rq_tail;
u16 rq_size;
u8 rq_encoded_size;
u8 sq_encoded_size;
};
struct nes_hw_cq {
struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */
void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
dma_addr_t cq_pbase; /* PCI memory for host rings */
u16 cq_head;
u16 cq_size;
u16 cq_number;
};
struct nes_hw_ceq {
struct nes_hw_ceqe volatile *ceq_vbase; /* PCI memory for host rings */
dma_addr_t ceq_pbase; /* PCI memory for host rings */
u16 ceq_head;
u16 ceq_size;
};
struct nes_hw_aeq {
struct nes_hw_aeqe volatile *aeq_vbase; /* PCI memory for host rings */
dma_addr_t aeq_pbase; /* PCI memory for host rings */
u16 aeq_head;
u16 aeq_size;
};
struct nic_qp_map {
u8 qpid;
u8 nic_index;
u8 logical_port;
u8 is_hnic;
};
#define NES_CQP_ARP_AEQ_INDEX_MASK 0x000f0000
#define NES_CQP_ARP_AEQ_INDEX_SHIFT 16
#define NES_CQP_APBVT_ADD 0x00008000
#define NES_CQP_APBVT_NIC_SHIFT 16
#define NES_ARP_ADD 1
#define NES_ARP_DELETE 2
#define NES_ARP_RESOLVE 3
#define NES_MAC_SW_IDLE 0
#define NES_MAC_SW_INTERRUPT 1
#define NES_MAC_SW_MH 2
struct nes_arp_entry {
u32 ip_addr;
u8 mac_addr[ETH_ALEN];
};
#define NES_NIC_FAST_TIMER 96
#define NES_NIC_FAST_TIMER_LOW 40
#define NES_NIC_FAST_TIMER_HIGH 1000
#define DEFAULT_NES_QL_HIGH 256
#define DEFAULT_NES_QL_LOW 16
#define DEFAULT_NES_QL_TARGET 64
#define DEFAULT_JUMBO_NES_QL_LOW 12
#define DEFAULT_JUMBO_NES_QL_TARGET 40
#define DEFAULT_JUMBO_NES_QL_HIGH 128
#define NES_NIC_CQ_DOWNWARD_TREND 8
struct nes_hw_tune_timer {
//u16 cq_count;
u16 threshold_low;
u16 threshold_target;
u16 threshold_high;
u16 timer_in_use;
u16 timer_in_use_old;
u16 timer_in_use_min;
u16 timer_in_use_max;
u8 timer_direction_upward;
u8 timer_direction_downward;
u16 cq_count_old;
u8 cq_direction_downward;
};
#define NES_TIMER_INT_LIMIT 2
#define NES_TIMER_INT_LIMIT_DYNAMIC 10
#define NES_TIMER_ENABLE_LIMIT 4
#define NES_MAX_LINK_INTERRUPTS 128
#define NES_MAX_LINK_CHECK 200
struct nes_adapter {
u64 fw_ver;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_arps;
struct nes_qp **qp_table;
struct workqueue_struct *work_q;
struct list_head list;
struct list_head active_listeners;
/* list of the netdev's associated with each logical port */
struct list_head nesvnic_list[4];
struct timer_list mh_timer;
struct timer_list lc_timer;
struct work_struct work;
spinlock_t resource_lock;
spinlock_t phy_lock;
spinlock_t pbl_lock;
spinlock_t periodic_timer_lock;
struct nes_arp_entry arp_table[NES_MAX_ARP_TABLE_SIZE];
/* Adapter CEQ and AEQs */
struct nes_hw_ceq ceq[16];
struct nes_hw_aeq aeq[8];
struct nes_hw_tune_timer tune_timer;
unsigned long doorbell_start;
u32 hw_rev;
u32 vendor_id;
u32 vendor_part_id;
u32 device_cap_flags;
u32 tick_delta;
u32 timer_int_req;
u32 arp_table_size;
u32 next_arp_index;
u32 max_mr;
u32 max_256pbl;
u32 max_4kpbl;
u32 free_256pbl;
u32 free_4kpbl;
u32 max_mr_size;
u32 max_qp;
u32 next_qp;
u32 max_irrq;
u32 max_qp_wr;
u32 max_sge;
u32 max_cq;
u32 next_cq;
u32 max_cqe;
u32 max_pd;
u32 base_pd;
u32 next_pd;
u32 hte_index_mask;
/* EEPROM information */
u32 rx_pool_size;
u32 tx_pool_size;
u32 rx_threshold;
u32 tcp_timer_core_clk_divisor;
u32 iwarp_config;
u32 cm_config;
u32 sws_timer_config;
u32 tcp_config1;
u32 wqm_wat;
u32 core_clock;
u32 firmware_version;
u32 nic_rx_eth_route_err;
u32 et_rx_coalesce_usecs;
u32 et_rx_max_coalesced_frames;
u32 et_rx_coalesce_usecs_irq;
u32 et_rx_max_coalesced_frames_irq;
u32 et_pkt_rate_low;
u32 et_rx_coalesce_usecs_low;
u32 et_rx_max_coalesced_frames_low;
u32 et_pkt_rate_high;
u32 et_rx_coalesce_usecs_high;
u32 et_rx_max_coalesced_frames_high;
u32 et_rate_sample_interval;
u32 timer_int_limit;
/* Adapter base MAC address */
u32 mac_addr_low;
u16 mac_addr_high;
u16 firmware_eeprom_offset;
u16 software_eeprom_offset;
u16 max_irrq_wr;
/* pd config for each port */
u16 pd_config_size[4];
u16 pd_config_base[4];
u16 link_interrupt_count[4];
/* the phy index for each port */
u8 phy_index[4];
u8 mac_sw_state[4];
u8 mac_link_down[4];
u8 phy_type[4];
/* PCI information */
unsigned int devfn;
unsigned char bus_number;
unsigned char OneG_Mode;
unsigned char ref_count;
u8 netdev_count;
u8 netdev_max; /* from host nic address count in EEPROM */
u8 port_count;
u8 virtwq;
u8 et_use_adaptive_rx_coalesce;
u8 adapter_fcn_count;
};
struct nes_pbl {
u64 *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
unsigned long user_base;
u32 pbl_size;
struct list_head list;
/* TODO: need to add list for two level tables */
};
struct nes_listener {
struct work_struct work;
struct workqueue_struct *wq;
struct nes_vnic *nesvnic;
struct iw_cm_id *cm_id;
struct list_head list;
unsigned long socket;
u8 accept_failed;
};
struct nes_ib_device;
struct nes_vnic {
struct nes_ib_device *nesibdev;
u64 sq_full;
u64 sq_locked;
u64 tso_requests;
u64 segmented_tso_requests;
u64 linearized_skbs;
u64 tx_sw_dropped;
u64 endnode_nstat_rx_discard;
u64 endnode_nstat_rx_octets;
u64 endnode_nstat_rx_frames;
u64 endnode_nstat_tx_octets;
u64 endnode_nstat_tx_frames;
u64 endnode_ipv4_tcp_retransmits;
/* void *mem; */
struct nes_device *nesdev;
struct net_device *netdev;
struct vlan_group *vlan_grp;
atomic_t rx_skbs_needed;
atomic_t rx_skb_timer_running;
int budget;
u32 msg_enable;
/* u32 tx_avail; */
__be32 local_ipaddr;
struct napi_struct napi;
spinlock_t tx_lock; /* could use netdev tx lock? */
struct timer_list rq_wqes_timer;
u32 nic_mem_size;
void *nic_vbase;
dma_addr_t nic_pbase;
struct nes_hw_nic nic;
struct nes_hw_nic_cq nic_cq;
u32 mcrq_qp_id;
struct nes_ucontext *mcrq_ucontext;
struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev);
void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *, int);
int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr );
struct net_device_stats netstats;
/* used to put the netdev on the adapters logical port list */
struct list_head list;
u16 max_frame_size;
u8 netdev_open;
u8 linkup;
u8 logical_port;
u8 netdev_index; /* might not be needed, indexes nesdev->netdev */
u8 perfect_filter_index;
u8 nic_index;
u8 qp_nic_index[4];
u8 next_qp_nic_index;
u8 of_device_registered;
u8 rdma_enabled;
u8 rx_checksum_disabled;
};
struct nes_ib_device {
struct ib_device ibdev;
struct nes_vnic *nesvnic;
/* Virtual RNIC Limits */
u32 max_mr;
u32 max_qp;
u32 max_cq;
u32 max_pd;
u32 num_mr;
u32 num_qp;
u32 num_cq;
u32 num_pd;
};
#define nes_vlan_rx vlan_hwaccel_receive_skb
#define nes_netif_rx netif_receive_skb
#endif /* __NES_HW_H */
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/inet.h>
#include "nes.h"
static struct nic_qp_map nic_qp_mapping_0[] = {
{16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
{20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
{18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_1[] = {
{18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_2[] = {
{20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
};
static struct nic_qp_map nic_qp_mapping_3[] = {
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_4[] = {
{28,8,0,0},{32,12,0,0}
};
static struct nic_qp_map nic_qp_mapping_5[] = {
{29,9,1,0},{33,13,1,0}
};
static struct nic_qp_map nic_qp_mapping_6[] = {
{30,10,2,0},{34,14,2,0}
};
static struct nic_qp_map nic_qp_mapping_7[] = {
{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map *nic_qp_mapping_per_function[] = {
nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
};
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
static int debug = -1;
static int nes_netdev_open(struct net_device *);
static int nes_netdev_stop(struct net_device *);
static int nes_netdev_start_xmit(struct sk_buff *, struct net_device *);
static struct net_device_stats *nes_netdev_get_stats(struct net_device *);
static void nes_netdev_tx_timeout(struct net_device *);
static int nes_netdev_set_mac_address(struct net_device *, void *);
static int nes_netdev_change_mtu(struct net_device *, int);
/**
* nes_netdev_poll
*/
static int nes_netdev_poll(struct napi_struct *napi, int budget)
{
struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
struct net_device *netdev = nesvnic->netdev;
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
nesvnic->budget = budget;
nescq->cqes_pending = 0;
nescq->rx_cqes_completed = 0;
nescq->cqe_allocs_pending = 0;
nescq->rx_pkts_indicated = 0;
nes_nic_ce_handler(nesdev, nescq);
if (nescq->cqes_pending == 0) {
netif_rx_complete(netdev, napi);
/* clear out completed cqes and arm */
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
nes_read32(nesdev->regs+NES_CQE_ALLOC);
} else {
/* clear out completed cqes but don't arm */
nes_write32(nesdev->regs+NES_CQE_ALLOC,
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
nesvnic->netdev->name);
}
return nescq->rx_pkts_indicated;
}
/**
* nes_netdev_open - Activate the network interface; ifconfig
* ethx up.
*/
static int nes_netdev_open(struct net_device *netdev)
{
u32 macaddr_low;
u16 macaddr_high;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
int ret;
int i;
struct nes_vnic *first_nesvnic;
u32 nic_active_bit;
u32 nic_active;
assert(nesdev != NULL);
first_nesvnic = list_entry(nesdev->nesadapter->nesvnic_list[nesdev->mac_index].next,
struct nes_vnic, list);
if (netif_msg_ifup(nesvnic))
printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
ret = nes_init_nic_qp(nesdev, netdev);
if (ret) {
return ret;
}
netif_carrier_off(netdev);
netif_stop_queue(netdev);
if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
nesvnic->nesibdev = nes_init_ofa_device(netdev);
if (nesvnic->nesibdev == NULL) {
printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
} else {
nesvnic->nesibdev->nesvnic = nesvnic;
ret = nes_register_ofa_device(nesvnic->nesibdev);
if (ret) {
printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
netdev->name, ret);
}
}
}
/* Set packet filters */
nic_active_bit = 1 << nesvnic->nic_index;
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
macaddr_low += (u32)netdev->dev_addr[5];
/* Program the various MAC regs */
for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
if (nesvnic->qp_nic_index[i] == 0xf) {
break;
}
nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
" (Addr:%08X) = %08X, HIGH = %08X.\n",
i, nesvnic->qp_nic_index[i],
NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8),
macaddr_low,
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
macaddr_low);
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
}
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
nesvnic->nic_cq.cq_number);
nes_read32(nesdev->regs+NES_CQE_ALLOC);
if (first_nesvnic->linkup) {
/* Enable network packets */
nesvnic->linkup = 1;
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
napi_enable(&nesvnic->napi);
nesvnic->netdev_open = 1;
return 0;
}
/**
* nes_netdev_stop
*/
static int nes_netdev_stop(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 nic_active_mask;
u32 nic_active;
nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
nesvnic, nesdev, netdev, netdev->name);
if (nesvnic->netdev_open == 0)
return 0;
if (netif_msg_ifdown(nesvnic))
printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
/* Disable network packets */
napi_disable(&nesvnic->napi);
netif_stop_queue(netdev);
if ((nesdev->netdev[0] == netdev) & (nesvnic->logical_port == nesdev->mac_index)) {
nes_write_indexed(nesdev,
NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
}
nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
(nesvnic->perfect_filter_index*8), 0);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
if (nesvnic->of_device_registered) {
nes_destroy_ofa_device(nesvnic->nesibdev);
nesvnic->nesibdev = NULL;
nesvnic->of_device_registered = 0;
}
nes_destroy_nic_qp(nesvnic);
nesvnic->netdev_open = 0;
return 0;
}
/**
* nes_nic_send
*/
static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic *nesnic = &nesvnic->nic;
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
__le16 *wqe_fragment_length;
u32 wqe_misc;
u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
u16 skb_fragment_index;
dma_addr_t bus_address;
nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
} else
wqe_misc = 0;
/* bump past the vlan tag */
wqe_fragment_length++;
/* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tcph = tcp_hdr(skb);
if (1) {
if (skb_is_gso(skb)) {
/* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
netdev->name, skb_is_gso(skb)); */
wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
((u32)tcph->doff) |
(((u32)(((unsigned char *)tcph) - skb->data)) << 4));
} else {
wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
}
}
} else { /* CHECKSUM_HW */
wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
skb->len);
memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
skb_headlen(skb)));
wqe_fragment_length[1] = 0;
if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
kfree_skb(skb);
nesvnic->tx_sw_dropped++;
return NETDEV_TX_LOCKED;
}
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index++] =
cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
((u64)(bus_address)));
nesnic->tx_skb[nesnic->sq_head] = skb;
}
if (skb_headlen(skb) == skb->len) {
if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
nesnic->tx_skb[nesnic->sq_head] = NULL;
dev_kfree_skb(skb);
}
} else {
/* Deal with Fragments */
nesnic->tx_skb[nesnic->sq_head] = skb;
for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
skb_fragment_index++) {
bus_address = pci_map_page( nesdev->pcidev,
skb_shinfo(skb)->frags[skb_fragment_index].page,
skb_shinfo(skb)->frags[skb_fragment_index].page_offset,
skb_shinfo(skb)->frags[skb_fragment_index].size,
PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
bus_address);
wqe_fragment_index++;
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
}
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size - 1;
return NETDEV_TX_OK;
}
/**
* nes_netdev_start_xmit
*/
static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic *nesnic = &nesvnic->nic;
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
/* struct udphdr *udph; */
#define NES_MAX_TSO_FRAGS 18
/* 64K segment plus overflow on each side */
dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
dma_addr_t bus_address;
u32 tso_frag_index;
u32 tso_frag_count;
u32 tso_wqe_length;
u32 curr_tcp_seq;
u32 wqe_count=1;
u32 send_rc;
struct iphdr *iph;
unsigned long flags;
__le16 *wqe_fragment_length;
u32 nr_frags;
u32 original_first_length;
// u64 *wqe_fragment_address;
/* first fragment (0) is used by copy buffer */
u16 wqe_fragment_index=1;
u16 hoffset;
u16 nhoffset;
u16 wqes_needed;
u16 wqes_available;
u32 old_head;
u32 wqe_misc;
/* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
" (%u frags), tso_size=%u\n",
netdev->name, skb->len, skb_headlen(skb),
skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
if (!netif_carrier_ok(netdev))
return NETDEV_TX_OK;
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
local_irq_save(flags);
if (!spin_trylock(&nesnic->sq_lock)) {
local_irq_restore(flags);
nesvnic->sq_locked++;
return NETDEV_TX_LOCKED;
}
/* Check if SQ is full */
if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
if (!netif_queue_stopped(netdev)) {
netif_stop_queue(netdev);
barrier();
if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
netif_start_queue(netdev);
goto sq_no_longer_full;
}
}
nesvnic->sq_full++;
spin_unlock_irqrestore(&nesnic->sq_lock, flags);
return NETDEV_TX_BUSY;
}
sq_no_longer_full:
nr_frags = skb_shinfo(skb)->nr_frags;
if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
nr_frags++;
}
/* Check if too many fragments */
if (unlikely((nr_frags > 4))) {
if (skb_is_gso(skb)) {
nesvnic->segmented_tso_requests++;
nesvnic->tso_requests++;
old_head = nesnic->sq_head;
/* Basically 4 fragments available per WQE with extended fragments */
wqes_needed = nr_frags >> 2;
wqes_needed += (nr_frags&3)?1:0;
wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
(nesnic->sq_size - 1);
if (unlikely(wqes_needed > wqes_available)) {
if (!netif_queue_stopped(netdev)) {
netif_stop_queue(netdev);
barrier();
wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
(nesnic->sq_size - 1);
if (wqes_needed <= wqes_available) {
netif_start_queue(netdev);
goto tso_sq_no_longer_full;
}
}
nesvnic->sq_full++;
spin_unlock_irqrestore(&nesnic->sq_lock, flags);
nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
netdev->name);
return NETDEV_TX_BUSY;
}
tso_sq_no_longer_full:
/* Map all the buffers */
for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
tso_frag_count++) {
tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev,
skb_shinfo(skb)->frags[tso_frag_count].page,
skb_shinfo(skb)->frags[tso_frag_count].page_offset,
skb_shinfo(skb)->frags[tso_frag_count].size,
PCI_DMA_TODEVICE);
}
tso_frag_index = 0;
curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
tso_wqe_length = 0;
nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb) );
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
} else
wqe_misc = 0;
/* bump past the vlan tag */
wqe_fragment_length++;
/* Assumes header totally fits in allocated buffer and is in first fragment */
if (original_first_length > NES_FIRST_FRAG_SIZE) {
nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
original_first_length, NES_FIRST_FRAG_SIZE);
nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
" (%u frags), tso_size=%u\n",
netdev->name,
skb->len, skb_headlen(skb),
skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
}
memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
original_first_length));
iph = (struct iphdr *)
(&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
tcph = (struct tcphdr *)
(&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
if ((wqe_count+1)!=(u32)wqes_needed) {
tcph->fin = 0;
tcph->psh = 0;
tcph->rst = 0;
tcph->urg = 0;
}
if (wqe_count) {
tcph->syn = 0;
}
tcph->seq = htonl(curr_tcp_seq);
wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
original_first_length));
wqe_fragment_index = 1;
if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index++] =
cpu_to_le16(skb_headlen(skb) - original_first_length);
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
bus_address);
}
while (wqe_fragment_index < 5) {
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
(u64)tso_bus_address[tso_frag_index]);
wqe_fragment_index++;
tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
if (tso_frag_index == tso_frag_count)
break;
}
if ((wqe_count+1) == (u32)wqes_needed) {
nesnic->tx_skb[nesnic->sq_head] = skb;
} else {
nesnic->tx_skb[nesnic->sq_head] = NULL;
}
wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
} else {
iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
wqe_misc);
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
((u32)tcph->doff) | (((u32)hoffset) << 4));
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
tso_wqe_length + original_first_length);
curr_tcp_seq += tso_wqe_length;
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size-1;
}
} else {
nesvnic->linearized_skbs++;
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
skb_linearize(skb);
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK) {
spin_unlock_irqrestore(&nesnic->sq_lock, flags);
return NETDEV_TX_OK;
}
}
} else {
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK) {
spin_unlock_irqrestore(&nesnic->sq_lock, flags);
return NETDEV_TX_OK;
}
}
barrier();
if (wqe_count)
nes_write32(nesdev->regs+NES_WQE_ALLOC,
(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
netdev->trans_start = jiffies;
spin_unlock_irqrestore(&nesnic->sq_lock, flags);
return NETDEV_TX_OK;
}
/**
* nes_netdev_get_stats
*/
static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u64 u64temp;
u32 u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->endnode_nstat_rx_discard += u32temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_rx_octets += u64temp;
nesvnic->netstats.rx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_rx_frames += u64temp;
nesvnic->netstats.rx_packets += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_tx_octets += u64temp;
nesvnic->netstats.tx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_tx_frames += u64temp;
nesvnic->netstats.tx_packets += u64temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_short_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_length_errors += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_crc_errors += u32temp;
nesvnic->netstats.rx_crc_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_tx_errors += u32temp;
nesvnic->netstats.tx_errors += u32temp;
return &nesvnic->netstats;
}
/**
* nes_netdev_tx_timeout
*/
static void nes_netdev_tx_timeout(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (netif_msg_timer(nesvnic))
nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
}
/**
* nes_netdev_set_mac_address
*/
static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct sockaddr *mac_addr = p;
int i;
u32 macaddr_low;
u16 macaddr_high;
if (!is_valid_ether_addr(mac_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
__FUNCTION__, netdev->addr_len,
mac_addr->sa_data[0], mac_addr->sa_data[1],
mac_addr->sa_data[2], mac_addr->sa_data[3],
mac_addr->sa_data[4], mac_addr->sa_data[5]);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
macaddr_low += (u32)netdev->dev_addr[5];
for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
if (nesvnic->qp_nic_index[i] == 0xf) {
break;
}
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
macaddr_low);
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
}
return 0;
}
/**
* nes_netdev_set_multicast_list
*/
void nes_netdev_set_multicast_list(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct dev_mc_list *multicast_addr;
u32 nic_active_bit;
u32 nic_active;
u32 perfect_filter_register_address;
u32 macaddr_low;
u16 macaddr_high;
u8 mc_all_on = 0;
u8 mc_index;
int mc_nic_index = -1;
nic_active_bit = 1 << nesvnic->nic_index;
if (netdev->flags & IFF_PROMISC) {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
mc_all_on = 1;
} else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) ||
(nesvnic->nic_index > 3)) {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
mc_all_on = 1;
} else {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0,
(netdev->flags & IFF_ALLMULTI)?1:0);
if (!mc_all_on) {
multicast_addr = netdev->mc_list;
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80;
perfect_filter_register_address += nesvnic->nic_index*0x40;
for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) {
while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0))
multicast_addr = multicast_addr->next;
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
if (multicast_addr) {
nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
perfect_filter_register_address+(mc_index * 8), mc_nic_index);
macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
macaddr_high += (u16)multicast_addr->dmi_addr[1];
macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
macaddr_low += (u32)multicast_addr->dmi_addr[5];
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)(1<<mc_nic_index)) << 16)));
multicast_addr = multicast_addr->next;
} else {
nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
perfect_filter_register_address+(mc_index * 8));
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
0);
}
}
}
}
/**
* nes_netdev_change_mtu
*/
static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
int ret = 0;
u8 jumbomode=0;
if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
return -EINVAL;
netdev->mtu = new_mtu;
nesvnic->max_frame_size = new_mtu+ETH_HLEN;
if (netdev->mtu > 1500) {
jumbomode=1;
}
nes_nic_init_timer_defaults(nesdev, jumbomode);
if (netif_running(netdev)) {
nes_netdev_stop(netdev);
nes_netdev_open(netdev);
}
return ret;
}
/**
* nes_netdev_exit - destroy network device
*/
void nes_netdev_exit(struct nes_vnic *nesvnic)
{
struct net_device *netdev = nesvnic->netdev;
struct nes_ib_device *nesibdev = nesvnic->nesibdev;
nes_debug(NES_DBG_SHUTDOWN, "\n");
// destroy the ibdevice if RDMA enabled
if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
nes_destroy_ofa_device( nesibdev );
nesvnic->of_device_registered = 0;
nesvnic->nesibdev = NULL;
}
unregister_netdev(netdev);
nes_debug(NES_DBG_SHUTDOWN, "\n");
}
#define NES_ETHTOOL_STAT_COUNT 55
static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
"T/GSO Requests",
"Pause Frames Sent",
"Pause Frames Received",
"Internal Routing Errors",
"SQ SW Dropped SKBs",
"SQ Locked",
"SQ Full",
"Segmented TSO Requests",
"Rx Symbol Errors",
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
"mh detected",
"mh pauses",
"Retransmission Count",
"CM Connects",
"CM Accepts",
"Disconnects",
"Connected Events",
"Connect Requests",
"CM Rejects",
"ModifyQP Timeouts",
"CreateQPs",
"SW DestroyQPs",
"DestroyQPs",
"CM Closes",
"CM Packets Sent",
"CM Packets Bounced",
"CM Packets Created",
"CM Packets Rcvd",
"CM Packets Dropped",
"CM Packets Retrans",
"CM Listens Created",
"CM Listens Destroyed",
"CM Backlog Drops",
"CM Loopbacks",
"CM Nodes Created",
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
"Timer Inits",
"CQ Depth 1",
"CQ Depth 4",
"CQ Depth 16",
"CQ Depth 24",
"CQ Depth 32",
"CQ Depth 128",
"CQ Depth 256",
};
/**
* nes_netdev_get_rx_csum
*/
static u32 nes_netdev_get_rx_csum (struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (nesvnic->rx_checksum_disabled)
return 0;
else
return 1;
}
/**
* nes_netdev_set_rc_csum
*/
static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (enable)
nesvnic->rx_checksum_disabled = 0;
else
nesvnic->rx_checksum_disabled = 1;
return 0;
}
/**
* nes_netdev_get_stats_count
*/
static int nes_netdev_get_stats_count(struct net_device *netdev)
{
return NES_ETHTOOL_STAT_COUNT;
}
/**
* nes_netdev_get_strings
*/
static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
u8 *ethtool_strings)
{
if (stringset == ETH_SS_STATS)
memcpy(ethtool_strings,
&nes_ethtool_stringset,
sizeof(nes_ethtool_stringset));
}
/**
* nes_netdev_get_ethtool_stats
*/
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 nic_count;
u32 u32temp;
target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
target_stat_values[0] = nesvnic->nesdev->link_status_interrupts;
target_stat_values[1] = nesvnic->linearized_skbs;
target_stat_values[2] = nesvnic->tso_requests;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_pause_frames_sent += u32temp;
target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_pause_frames_received += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
nesvnic->nesdev->port_rx_discards += u32temp;
nesvnic->netstats.rx_dropped += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
u32temp = nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
(nesvnic->qp_nic_index[nic_count]*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->endnode_nstat_rx_discard += u32temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_rx_octets += u64temp;
nesvnic->netstats.rx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_rx_frames += u64temp;
nesvnic->netstats.rx_packets += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_tx_octets += u64temp;
nesvnic->netstats.tx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_tx_frames += u64temp;
nesvnic->netstats.tx_packets += u64temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
}
target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received;
target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err;
target_stat_values[6] = nesvnic->tx_sw_dropped;
target_stat_values[7] = nesvnic->sq_locked;
target_stat_values[8] = nesvnic->sq_full;
target_stat_values[9] = nesvnic->segmented_tso_requests;
target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames;
target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames;
target_stat_values[14] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[15] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[16] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[17] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[18] = nesvnic->endnode_nstat_tx_frames;
target_stat_values[19] = mh_detected;
target_stat_values[20] = mh_pauses_sent;
target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits;
target_stat_values[22] = atomic_read(&cm_connects);
target_stat_values[23] = atomic_read(&cm_accepts);
target_stat_values[24] = atomic_read(&cm_disconnects);
target_stat_values[25] = atomic_read(&cm_connecteds);
target_stat_values[26] = atomic_read(&cm_connect_reqs);
target_stat_values[27] = atomic_read(&cm_rejects);
target_stat_values[28] = atomic_read(&mod_qp_timouts);
target_stat_values[29] = atomic_read(&qps_created);
target_stat_values[30] = atomic_read(&sw_qps_destroyed);
target_stat_values[31] = atomic_read(&qps_destroyed);
target_stat_values[32] = atomic_read(&cm_closes);
target_stat_values[33] = cm_packets_sent;
target_stat_values[34] = cm_packets_bounced;
target_stat_values[35] = cm_packets_created;
target_stat_values[36] = cm_packets_received;
target_stat_values[37] = cm_packets_dropped;
target_stat_values[38] = cm_packets_retrans;
target_stat_values[39] = cm_listens_created;
target_stat_values[40] = cm_listens_destroyed;
target_stat_values[41] = cm_backlog_drops;
target_stat_values[42] = atomic_read(&cm_loopbacks);
target_stat_values[43] = atomic_read(&cm_nodes_created);
target_stat_values[44] = atomic_read(&cm_nodes_destroyed);
target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[46] = atomic_read(&cm_resets_recvd);
target_stat_values[47] = int_mod_timer_init;
target_stat_values[48] = int_mod_cq_depth_1;
target_stat_values[49] = int_mod_cq_depth_4;
target_stat_values[50] = int_mod_cq_depth_16;
target_stat_values[51] = int_mod_cq_depth_24;
target_stat_values[52] = int_mod_cq_depth_32;
target_stat_values[53] = int_mod_cq_depth_128;
target_stat_values[54] = int_mod_cq_depth_256;
}
/**
* nes_netdev_get_drvinfo
*/
static void nes_netdev_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
strcpy(drvinfo->driver, DRV_NAME);
strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
strcpy(drvinfo->fw_version, "TBD");
strcpy(drvinfo->version, DRV_VERSION);
drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
drvinfo->regdump_len = 0;
}
/**
* nes_netdev_set_coalesce
*/
static int nes_netdev_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et_coalesce)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
unsigned long flags;
spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
if (et_coalesce->rx_max_coalesced_frames_low) {
shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
}
if (et_coalesce->rx_max_coalesced_frames_irq) {
shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
}
if (et_coalesce->rx_max_coalesced_frames_high) {
shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
}
if (et_coalesce->rx_coalesce_usecs_low) {
shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
}
if (et_coalesce->rx_coalesce_usecs_high) {
shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
}
spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
/* using this to drive total interrupt moderation */
nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
if (et_coalesce->use_adaptive_rx_coalesce) {
nesadapter->et_use_adaptive_rx_coalesce = 1;
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
nesadapter->et_rx_coalesce_usecs_irq = 0;
if (et_coalesce->pkt_rate_low) {
nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
}
} else {
nesadapter->et_use_adaptive_rx_coalesce = 0;
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
if (nesadapter->et_rx_coalesce_usecs_irq) {
nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
}
}
return 0;
}
/**
* nes_netdev_get_coalesce
*/
static int nes_netdev_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et_coalesce)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ethtool_coalesce temp_et_coalesce;
struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
unsigned long flags;
memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
if (nesadapter->et_use_adaptive_rx_coalesce) {
temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
}
spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
return 0;
}
/**
* nes_netdev_get_pauseparam
*/
static void nes_netdev_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *et_pauseparam)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
et_pauseparam->autoneg = 0;
et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
}
/**
* nes_netdev_set_pauseparam
*/
static int nes_netdev_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *et_pauseparam)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 u32temp;
if (et_pauseparam->autoneg) {
/* TODO: should return unsupported */
return 0;
}
if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
nesdev->disable_rx_flow_control = 0;
} else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
nesdev->disable_rx_flow_control = 1;
}
return 0;
}
/**
* nes_netdev_get_settings
*/
static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u16 phy_data;
et_cmd->duplex = DUPLEX_FULL;
et_cmd->port = PORT_MII;
if (nesadapter->OneG_Mode) {
et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg;
et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg;
et_cmd->speed = SPEED_1000;
nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
&phy_data);
if (phy_data&0x1000) {
et_cmd->autoneg = AUTONEG_ENABLE;
} else {
et_cmd->autoneg = AUTONEG_DISABLE;
}
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
} else {
if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->port = PORT_FIBRE;
et_cmd->supported = SUPPORTED_FIBRE;
et_cmd->advertising = ADVERTISED_FIBRE;
et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
} else {
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->supported = SUPPORTED_10000baseT_Full;
et_cmd->advertising = ADVERTISED_10000baseT_Full;
et_cmd->phy_address = nesdev->mac_index;
}
et_cmd->speed = SPEED_10000;
et_cmd->autoneg = AUTONEG_DISABLE;
}
et_cmd->maxtxpkt = 511;
et_cmd->maxrxpkt = 511;
return 0;
}
/**
* nes_netdev_set_settings
*/
static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u16 phy_data;
if (nesadapter->OneG_Mode) {
nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
&phy_data);
if (et_cmd->autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
} else {
// Turn off autoneg
phy_data &= ~0x1000;
}
nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
phy_data);
}
return 0;
}
static struct ethtool_ops nes_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = nes_netdev_get_settings,
.set_settings = nes_netdev_set_settings,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_rx_csum = nes_netdev_get_rx_csum,
.get_sg = ethtool_op_get_sg,
.get_strings = nes_netdev_get_strings,
.get_stats_count = nes_netdev_get_stats_count,
.get_ethtool_stats = nes_netdev_get_ethtool_stats,
.get_drvinfo = nes_netdev_get_drvinfo,
.get_coalesce = nes_netdev_get_coalesce,
.set_coalesce = nes_netdev_set_coalesce,
.get_pauseparam = nes_netdev_get_pauseparam,
.set_pauseparam = nes_netdev_set_pauseparam,
.set_tx_csum = ethtool_op_set_tx_csum,
.set_rx_csum = nes_netdev_set_rx_csum,
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
};
static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 u32temp;
nesvnic->vlan_grp = grp;
/* Enable/Disable VLAN Stripping */
u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
if (grp)
u32temp &= 0xfdffffff;
else
u32temp |= 0x02000000;
nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
}
/**
* nes_netdev_init - initialize network device
*/
struct net_device *nes_netdev_init(struct nes_device *nesdev,
void __iomem *mmio_addr)
{
u64 u64temp;
struct nes_vnic *nesvnic = NULL;
struct net_device *netdev;
struct nic_qp_map *curr_qp_map;
u32 u32temp;
u16 phy_data;
u16 temp_phy_data;
netdev = alloc_etherdev(sizeof(struct nes_vnic));
if (!netdev) {
printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
return NULL;
}
nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
nesvnic = netdev_priv(netdev);
memset(nesvnic, 0, sizeof(*nesvnic));
netdev->open = nes_netdev_open;
netdev->stop = nes_netdev_stop;
netdev->hard_start_xmit = nes_netdev_start_xmit;
netdev->get_stats = nes_netdev_get_stats;
netdev->tx_timeout = nes_netdev_tx_timeout;
netdev->set_mac_address = nes_netdev_set_mac_address;
netdev->set_multicast_list = nes_netdev_set_multicast_list;
netdev->change_mtu = nes_netdev_change_mtu;
netdev->watchdog_timeo = NES_TX_TIMEOUT;
netdev->irq = nesdev->pcidev->irq;
netdev->mtu = ETH_DATA_LEN;
netdev->hard_header_len = ETH_HLEN;
netdev->addr_len = ETH_ALEN;
netdev->type = ARPHRD_ETHER;
netdev->features = NETIF_F_HIGHDMA;
netdev->ethtool_ops = &nes_ethtool_ops;
netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
netdev->features |= NETIF_F_LLTX;
/* Fill in the port structure */
nesvnic->netdev = netdev;
nesvnic->nesdev = nesdev;
nesvnic->msg_enable = netif_msg_init(debug, default_msg);
nesvnic->netdev_index = nesdev->netdev_count;
nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
nesvnic->max_frame_size = netdev->mtu+netdev->hard_header_len;
curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
/* Setup the burned in MAC address */
u64temp = (u64)nesdev->nesadapter->mac_addr_low;
u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
u64temp += nesvnic->nic_index;
netdev->dev_addr[0] = (u8)(u64temp>>40);
netdev->dev_addr[1] = (u8)(u64temp>>32);
netdev->dev_addr[2] = (u8)(u64temp>>24);
netdev->dev_addr[3] = (u8)(u64temp>>16);
netdev->dev_addr[4] = (u8)(u64temp>>8);
netdev->dev_addr[5] = (u8)u64temp;
memcpy(netdev->perm_addr, netdev->dev_addr, 6);
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
} else {
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
}
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
" nic_index = %d, logical_port = %d, mac_index = %d.\n",
nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
if (nesvnic->nesdev->nesadapter->port_count == 1) {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
}
} else {
if (nesvnic->nesdev->nesadapter->port_count == 2) {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2;
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = 0xf;
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
}
}
nesvnic->next_qp_nic_index = 0;
if (nesdev->netdev_count == 0) {
nesvnic->rdma_enabled = 1;
} else {
nesvnic->rdma_enabled = 0;
}
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
spin_lock_init(&nesvnic->tx_lock);
nesdev->netdev[nesdev->netdev_count] = netdev;
nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
nesvnic, nesdev->mac_index);
list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
if ((nesdev->netdev_count == 0) &&
(PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) {
nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1)));
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200*(nesvnic->logical_port&1)));
u32temp |= 0x00200000;
nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200*(nesvnic->logical_port&1)), u32temp);
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200*(nesvnic->logical_port&1)) );
if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
nes_init_phy(nesdev);
nes_read_10G_phy_reg(nesdev, 1,
nesdev->nesadapter->phy_index[nesvnic->logical_port]);
temp_phy_data = (u16)nes_read_indexed(nesdev,
NES_IDX_MAC_MDIO_CONTROL);
u32temp = 20;
do {
nes_read_10G_phy_reg(nesdev, 1,
nesdev->nesadapter->phy_index[nesvnic->logical_port]);
phy_data = (u16)nes_read_indexed(nesdev,
NES_IDX_MAC_MDIO_CONTROL);
if ((phy_data == temp_phy_data) || (!(--u32temp)))
break;
temp_phy_data = phy_data;
} while (1);
if (phy_data & 4) {
nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
nesvnic->linkup = 1;
} else {
nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
}
} else {
nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
nesvnic->linkup = 1;
}
}
nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port));
nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port), u32temp);
if (nesdev->nesadapter->phy_type[nesvnic->logical_port] != NES_PHY_TYPE_IRIS)
nes_init_phy(nesdev);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesvnic->logical_port),
~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
}
return netdev;
}
/**
* nes_netdev_destroy - destroy network device structure
*/
void nes_netdev_destroy(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
/* make sure 'stop' method is called by Linux stack */
/* nes_netdev_stop(netdev); */
list_del(&nesvnic->list);
if (nesvnic->of_device_registered) {
nes_destroy_ofa_device(nesvnic->nesibdev);
}
free_netdev(netdev);
}
/**
* nes_nic_cm_xmit -- CM calls this to send out pkts
*/
int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int ret;
skb->dev = netdev;
ret = dev_queue_xmit(skb);
if (ret) {
nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
}
return ret;
}
/*
* Copyright (c) 2006 - 2008 NetEffect. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef NES_USER_H
#define NES_USER_H
#include <linux/types.h>
#define NES_ABI_USERSPACE_VER 1
#define NES_ABI_KERNEL_VER 1
/*
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
* In particular do not use pointer types -- pass pointers in __u64
* instead.
*/
struct nes_alloc_ucontext_req {
__u32 reserved32;
__u8 userspace_ver;
__u8 reserved8[3];
};
struct nes_alloc_ucontext_resp {
__u32 max_pds; /* maximum pds allowed for this user process */
__u32 max_qps; /* maximum qps allowed for this user process */
__u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
__u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
__u8 kernel_ver;
__u8 reserved[2];
};
struct nes_alloc_pd_resp {
__u32 pd_id;
__u32 mmap_db_index;
};
struct nes_create_cq_req {
__u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
__u64 user_wqe_buffers;
};
enum iwnes_memreg_type {
IWNES_MEMREG_TYPE_MEM = 0x0000,
IWNES_MEMREG_TYPE_QP = 0x0001,
IWNES_MEMREG_TYPE_CQ = 0x0002,
IWNES_MEMREG_TYPE_MW = 0x0003,
IWNES_MEMREG_TYPE_FMR = 0x0004,
};
struct nes_mem_reg_req {
__u32 reg_type; /* indicates if id is memory, QP or CQ */
__u32 reserved;
};
struct nes_create_cq_resp {
__u32 cq_id;
__u32 cq_size;
__u32 mmap_db_index;
__u32 reserved;
};
struct nes_create_qp_resp {
__u32 qp_id;
__u32 actual_sq_size;
__u32 actual_rq_size;
__u32 mmap_sq_db_index;
__u32 mmap_rq_db_index;
__u32 nes_drv_opt;
};
#endif /* NES_USER_H */
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
#include "nes.h"
static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
u32 mh_detected;
u32 mh_pauses_sent;
/**
* nes_read_eeprom_values -
*/
int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter)
{
u32 mac_addr_low;
u16 mac_addr_high;
u16 eeprom_data;
u16 eeprom_offset;
u16 next_section_address;
u16 sw_section_ver;
u8 major_ver = 0;
u8 minor_ver = 0;
/* TODO: deal with EEPROM endian issues */
if (nesadapter->firmware_eeprom_offset == 0) {
/* Read the EEPROM Parameters */
eeprom_data = nes_read16_eeprom(nesdev->regs, 0);
nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data);
eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) <<
((eeprom_data & 0x0080) >> 7));
nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset);
nesadapter->firmware_eeprom_offset = eeprom_offset;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
if (eeprom_data != 0x5746) {
nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data);
return -1;
}
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8);
nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset);
nesadapter->software_eeprom_offset = eeprom_offset;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
if (eeprom_data != 0x5753) {
printk("Not a valid Software Image = 0x%04X\n", eeprom_data);
return -1;
}
sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6);
nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n",
sw_section_ver);
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
((eeprom_data & 0x0100) >> 8));
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
if (eeprom_data != 0x414d) {
nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
eeprom_data);
goto no_fw_rev;
}
eeprom_offset = next_section_address;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
((eeprom_data & 0x0100) >> 8));
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
if (eeprom_data != 0x4f52) {
nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n",
eeprom_data);
goto no_fw_rev;
}
eeprom_offset = next_section_address;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
if (eeprom_data != 0x5746) {
nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n",
eeprom_data);
goto no_fw_rev;
}
eeprom_offset = next_section_address;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
if (eeprom_data != 0x5753) {
nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n",
eeprom_data);
goto no_fw_rev;
}
eeprom_offset = next_section_address;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
if (eeprom_data != 0x414d) {
nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
eeprom_data);
goto no_fw_rev;
}
eeprom_offset = next_section_address;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
eeprom_offset + 2, eeprom_data);
next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
if (eeprom_data != 0x464e) {
nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n",
eeprom_data);
goto no_fw_rev;
}
eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8);
printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
major_ver = (u8)(eeprom_data >> 8);
minor_ver = (u8)(eeprom_data);
if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) {
nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n");
} else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
nesadapter->virtwq = 1;
}
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
(u32)((u8)eeprom_data);
no_fw_rev:
/* eeprom is valid */
eeprom_offset = nesadapter->software_eeprom_offset;
eeprom_offset += 8;
nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
mac_addr_low <<= 16;
mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n",
mac_addr_high, mac_addr_low);
nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max);
nesadapter->mac_addr_low = mac_addr_low;
nesadapter->mac_addr_high = mac_addr_high;
/* Read the Phy Type array */
eeprom_offset += 10;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->phy_type[0] = (u8)(eeprom_data >> 8);
nesadapter->phy_type[1] = (u8)eeprom_data;
/* Read the port array */
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->phy_type[2] = (u8)(eeprom_data >> 8);
nesadapter->phy_type[3] = (u8)eeprom_data;
/* port_count is set by soft reset reg */
nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u,"
" port 2 -> %u, port 3 -> %u\n",
nesadapter->port_count,
nesadapter->phy_type[0], nesadapter->phy_type[1],
nesadapter->phy_type[2], nesadapter->phy_type[3]);
/* Read PD config array */
eeprom_offset += 10;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_size[0] = eeprom_data;
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_base[0] = eeprom_data;
nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n",
nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_size[1] = eeprom_data;
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_base[1] = eeprom_data;
nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n",
nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_size[2] = eeprom_data;
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_base[2] = eeprom_data;
nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n",
nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_size[3] = eeprom_data;
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->pd_config_base[3] = eeprom_data;
nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n",
nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]);
/* Read Rx Pool Size */
eeprom_offset += 22; /* 46 */
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->rx_threshold = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n",
nesadapter->tcp_timer_core_clk_divisor);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->iwarp_config = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->cm_config = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->wqm_wat = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat);
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
eeprom_offset += 2;
nesadapter->core_clock = (((u32)eeprom_data) << 16) +
nes_read16_eeprom(nesdev->regs, eeprom_offset);
nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock);
if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) {
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8;
nesadapter->phy_index[1] = eeprom_data & 0x00ff;
eeprom_offset += 2;
eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8;
nesadapter->phy_index[3] = eeprom_data & 0x00ff;
} else {
nesadapter->phy_index[0] = 4;
nesadapter->phy_index[1] = 5;
nesadapter->phy_index[2] = 6;
nesadapter->phy_index[3] = 7;
}
nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n",
nesadapter->phy_index[0],nesadapter->phy_index[1],
nesadapter->phy_index[2],nesadapter->phy_index[3]);
}
return 0;
}
/**
* nes_read16_eeprom
*/
static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
{
writel(NES_EEPROM_READ_REQUEST + (offset >> 1),
(void __iomem *)addr + NES_EEPROM_COMMAND);
do {
} while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) &
NES_EEPROM_READ_REQUEST);
return readw((void __iomem *)addr + NES_EEPROM_DATA);
}
/**
* nes_write_1G_phy_reg
*/
void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
unsigned long flags;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
for (counter = 0; counter < 100 ; counter++) {
udelay(30);
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
if (u32temp & 1) {
/* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
break;
}
}
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
/**
* nes_read_1G_phy_reg
* This routine only issues the read, the data must be read
* separately.
*/
void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
u32 counter;
unsigned long flags;
/* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
phy_addr, nesdev->mac_index); */
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
for (counter = 0; counter < 100 ; counter++) {
udelay(30);
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
if (u32temp & 1) {
/* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
break;
}
}
if (!(u32temp & 1)) {
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
*data = 0xffff;
} else {
*data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
}
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
/**
* nes_write_10G_phy_reg
*/
void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
u8 phy_addr, u16 data)
{
u32 dev_addr;
u32 port_addr;
u32 u32temp;
u32 counter;
dev_addr = 1;
port_addr = phy_addr;
/* set address */
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
for (counter = 0; counter < 100 ; counter++) {
udelay(30);
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
if (u32temp & 1) {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
break;
}
}
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
/* set data */
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
for (counter = 0; counter < 100 ; counter++) {
udelay(30);
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
if (u32temp & 1) {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
break;
}
}
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
}
/**
* nes_read_10G_phy_reg
* This routine only issues the read, the data must be read
* separately.
*/
void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr)
{
u32 dev_addr;
u32 port_addr;
u32 u32temp;
u32 counter;
dev_addr = 1;
port_addr = phy_addr;
/* set address */
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
for (counter = 0; counter < 100 ; counter++) {
udelay(30);
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
if (u32temp & 1) {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
break;
}
}
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
/* issue read */
nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
for (counter = 0; counter < 100 ; counter++) {
udelay(30);
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
if (u32temp & 1) {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
break;
}
}
if (!(u32temp & 1))
nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
u32temp);
}
/**
* nes_get_cqp_request
*/
struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
{
unsigned long flags;
struct nes_cqp_request *cqp_request = NULL;
if (!list_empty(&nesdev->cqp_avail_reqs)) {
spin_lock_irqsave(&nesdev->cqp.lock, flags);
cqp_request = list_entry(nesdev->cqp_avail_reqs.next,
struct nes_cqp_request, list);
list_del_init(&cqp_request->list);
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
} else {
cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
if (cqp_request) {
cqp_request->dynamic = 1;
INIT_LIST_HEAD(&cqp_request->list);
}
}
if (cqp_request) {
init_waitqueue_head(&cqp_request->waitq);
cqp_request->waiting = 0;
cqp_request->request_done = 0;
cqp_request->callback = 0;
init_waitqueue_head(&cqp_request->waitq);
nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n",
cqp_request);
} else
printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
__FUNCTION__);
return cqp_request;
}
/**
* nes_post_cqp_request
*/
void nes_post_cqp_request(struct nes_device *nesdev,
struct nes_cqp_request *cqp_request, int ring_doorbell)
{
struct nes_hw_cqp_wqe *cqp_wqe;
unsigned long flags;
u32 cqp_head;
u64 u64temp;
spin_lock_irqsave(&nesdev->cqp.lock, flags);
if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) &
(nesdev->cqp.sq_size - 1)) != 1)
&& (list_empty(&nesdev->cqp_pending_reqs))) {
cqp_head = nesdev->cqp.sq_head++;
nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
barrier();
u64temp = (unsigned long)cqp_request;
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX,
u64temp);
nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ,"
" request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
" waiting = %d, refcount = %d.\n",
le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
cqp_request->waiting, atomic_read(&cqp_request->refcount));
barrier();
if (ring_doorbell) {
/* Ring doorbell (1 WQEs) */
nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
}
barrier();
} else {
nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X"
" put on the pending queue.\n",
cqp_request,
le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX]));
list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs);
}
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
return;
}
/**
* nes_arp_table
*/
int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
int arp_index;
int err = 0;
for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) {
if (nesadapter->arp_table[arp_index].ip_addr == ip_addr)
break;
}
if (action == NES_ARP_ADD) {
if (arp_index != nesadapter->arp_table_size) {
return -1;
}
arp_index = 0;
err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index);
if (err) {
nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
return err;
}
nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index);
nesadapter->arp_table[arp_index].ip_addr = ip_addr;
memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN);
return arp_index;
}
/* DELETE or RESOLVE */
if (arp_index == nesadapter->arp_table_size) {
nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
return -1;
}
if (action == NES_ARP_RESOLVE) {
nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index);
return arp_index;
}
if (action == NES_ARP_DELETE) {
nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index);
nesadapter->arp_table[arp_index].ip_addr = 0;
memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN);
nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index);
return arp_index;
}
return -1;
}
/**
* nes_mh_fix
*/
void nes_mh_fix(unsigned long parm)
{
unsigned long flags;
struct nes_device *nesdev = (struct nes_device *)parm;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_vnic *nesvnic;
u32 used_chunks_tx;
u32 temp_used_chunks_tx;
u32 temp_last_used_chunks_tx;
u32 used_chunks_mask;
u32 mac_tx_frames_low;
u32 mac_tx_frames_high;
u32 mac_tx_pauses;
u32 serdes_status;
u32 reset_value;
u32 tx_control;
u32 tx_config;
u32 tx_pause_quanta;
u32 rx_control;
u32 rx_config;
u32 mac_exact_match;
u32 mpp_debug;
u32 i=0;
u32 chunks_tx_progress = 0;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) {
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
goto no_mh_work;
}
nesadapter->mac_sw_state[0] = NES_MAC_SW_MH;
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
do {
mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW);
mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH);
mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX);
nesdev->mac_pause_frames_sent += mac_tx_pauses;
used_chunks_mask = 0;
temp_used_chunks_tx = used_chunks_tx;
temp_last_used_chunks_tx = nesdev->last_used_chunks_tx;
if (nesdev->netdev[0]) {
nesvnic = netdev_priv(nesdev->netdev[0]);
} else {
break;
}
for (i=0; i<4; i++) {
used_chunks_mask <<= 8;
if (nesvnic->qp_nic_index[i] != 0xff) {
used_chunks_mask |= 0xff;
if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) {
chunks_tx_progress = 1;
}
}
temp_used_chunks_tx >>= 8;
temp_last_used_chunks_tx >>= 8;
}
if ((mac_tx_frames_low) || (mac_tx_frames_high) ||
(!(used_chunks_tx&used_chunks_mask)) ||
(!(nesdev->last_used_chunks_tx&used_chunks_mask)) ||
(chunks_tx_progress) ) {
nesdev->last_used_chunks_tx = used_chunks_tx;
break;
}
nesdev->last_used_chunks_tx = used_chunks_tx;
barrier();
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005);
mh_pauses_sent++;
mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
if (mac_tx_pauses) {
nesdev->mac_pause_frames_sent += mac_tx_pauses;
break;
}
tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL);
tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA);
rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL);
rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG);
mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM);
mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG);
/* one last ditch effort to avoid a false positive */
mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
if (mac_tx_pauses) {
nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent;
nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n");
break;
}
mh_detected++;
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000);
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000);
reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d);
while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
& 0x00000040) != 0x00000040) && (i++ < 5000)) {
/* mdelay(1); */
}
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
if (nesadapter->OneG_Mode) {
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
} else {
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
}
serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta);
nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control);
nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config);
nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match);
nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug);
} while (0);
nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE;
no_mh_work:
nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5);
add_timer(&nesdev->nesadapter->mh_timer);
}
/**
* nes_clc
*/
void nes_clc(unsigned long parm)
{
unsigned long flags;
struct nes_device *nesdev = (struct nes_device *)parm;
struct nes_adapter *nesadapter = nesdev->nesadapter;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nesadapter->link_interrupt_count[0] = 0;
nesadapter->link_interrupt_count[1] = 0;
nesadapter->link_interrupt_count[2] = 0;
nesadapter->link_interrupt_count[3] = 0;
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
add_timer(&nesadapter->lc_timer);
}
/**
* nes_dump_mem
*/
void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
{
char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f'};
char *ptr;
char hex_buf[80];
char ascii_buf[20];
int num_char;
int num_ascii;
int num_hex;
if (!(nes_debug_level & dump_debug_level)) {
return;
}
ptr = addr;
if (length > 0x100) {
nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
length = 0x100;
}
nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
memset(ascii_buf, 0, 20);
memset(hex_buf, 0, 80);
num_ascii = 0;
num_hex = 0;
for (num_char = 0; num_char < length; num_char++) {
if (num_ascii == 8) {
ascii_buf[num_ascii++] = ' ';
hex_buf[num_hex++] = '-';
hex_buf[num_hex++] = ' ';
}
if (*ptr < 0x20 || *ptr > 0x7e)
ascii_buf[num_ascii++] = '.';
else
ascii_buf[num_ascii++] = *ptr;
hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
hex_buf[num_hex++] = xlate[*ptr & 0x0f];
hex_buf[num_hex++] = ' ';
ptr++;
if (num_ascii >= 17) {
/* output line and reset */
nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
memset(ascii_buf, 0, 20);
memset(hex_buf, 0, 80);
num_ascii = 0;
num_hex = 0;
}
}
/* output the rest */
if (num_ascii) {
while (num_ascii < 17) {
if (num_ascii == 8) {
hex_buf[num_hex++] = ' ';
hex_buf[num_hex++] = ' ';
}
hex_buf[num_hex++] = ' ';
hex_buf[num_hex++] = ' ';
hex_buf[num_hex++] = ' ';
num_ascii++;
}
nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
}
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef NES_VERBS_H
#define NES_VERBS_H
struct nes_device;
#define NES_MAX_USER_DB_REGIONS 4096
#define NES_MAX_USER_WQ_REGIONS 4096
struct nes_ucontext {
struct ib_ucontext ibucontext;
struct nes_device *nesdev;
unsigned long mmap_wq_offset;
unsigned long mmap_cq_offset; /* to be removed */
int index; /* rnic index (minor) */
unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
u16 first_free_db;
unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
u16 first_free_wq;
struct list_head cq_reg_mem_list;
struct list_head qp_reg_mem_list;
u32 mcrqf;
atomic_t usecnt;
};
struct nes_pd {
struct ib_pd ibpd;
u16 pd_id;
atomic_t sqp_count;
u16 mmap_db_index;
};
struct nes_mr {
union {
struct ib_mr ibmr;
struct ib_mw ibmw;
struct ib_fmr ibfmr;
};
struct ib_umem *region;
u16 pbls_used;
u8 mode;
u8 pbl_4k;
};
struct nes_hw_pb {
__le32 pa_low;
__le32 pa_high;
};
struct nes_vpbl {
dma_addr_t pbl_pbase;
struct nes_hw_pb *pbl_vbase;
};
struct nes_root_vpbl {
dma_addr_t pbl_pbase;
struct nes_hw_pb *pbl_vbase;
struct nes_vpbl *leaf_vpbl;
};
struct nes_fmr {
struct nes_mr nesmr;
u32 leaf_pbl_cnt;
struct nes_root_vpbl root_vpbl;
struct ib_qp *ib_qp;
int access_rights;
struct ib_fmr_attr attr;
};
struct nes_av;
struct nes_cq {
struct ib_cq ibcq;
struct nes_hw_cq hw_cq;
u32 polled_completions;
u32 cq_mem_size;
spinlock_t lock;
u8 virtual_cq;
u8 pad[3];
};
struct nes_wq {
spinlock_t lock;
};
struct iw_cm_id;
struct ietf_mpa_frame;
struct nes_qp {
struct ib_qp ibqp;
void *allocated_buffer;
struct iw_cm_id *cm_id;
struct workqueue_struct *wq;
struct work_struct disconn_work;
struct nes_cq *nesscq;
struct nes_cq *nesrcq;
struct nes_pd *nespd;
void *cm_node; /* handle of the node this QP is associated with */
struct ietf_mpa_frame *ietf_frame;
dma_addr_t ietf_frame_pbase;
wait_queue_head_t state_waitq;
unsigned long socket;
struct nes_hw_qp hwqp;
struct work_struct work;
struct work_struct ae_work;
enum ib_qp_state ibqp_state;
u32 iwarp_state;
u32 hte_index;
u32 last_aeq;
u32 qp_mem_size;
atomic_t refcount;
atomic_t close_timer_started;
u32 mmap_sq_db_index;
u32 mmap_rq_db_index;
spinlock_t lock;
struct nes_qp_context *nesqp_context;
dma_addr_t nesqp_context_pbase;
void *pbl_vbase;
dma_addr_t pbl_pbase;
struct page *page;
wait_queue_head_t kick_waitq;
u16 in_disconnect;
u16 private_data_len;
u8 active_conn;
u8 skip_lsmm;
u8 user_mode;
u8 hte_added;
u8 hw_iwarp_state;
u8 flush_issued;
u8 hw_tcp_state;
u8 disconn_pending;
u8 destroyed;
};
#endif /* NES_VERBS_H */
......@@ -680,12 +680,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
neigh = *to_ipoib_neigh(skb->dst->neighbour);
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto out;
}
} else if (neigh->ah) {
if (neigh->ah)
if (unlikely((memcmp(&neigh->dgid.raw,
skb->dst->neighbour->ha + 4,
sizeof(union ib_gid))) ||
......@@ -706,6 +701,12 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto out;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
goto out;
}
......@@ -813,11 +814,9 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
struct ipoib_ah *ah = NULL;
neigh = *to_ipoib_neigh(n);
if (neigh) {
if (neigh)
priv = netdev_priv(neigh->dev);
ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
n->dev->name);
} else
else
return;
ipoib_dbg(priv,
"neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
......
......@@ -204,6 +204,22 @@ static int srp_init_qp(struct srp_target_port *target,
return ret;
}
static int srp_new_cm_id(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
srp_cm_handler, target);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
if (target->cm_id)
ib_destroy_cm_id(target->cm_id);
target->cm_id = new_cm_id;
return 0;
}
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
......@@ -436,6 +452,7 @@ static void srp_remove_work(struct work_struct *work)
static int srp_connect_target(struct srp_target_port *target)
{
int retries = 3;
int ret;
ret = srp_lookup_path(target);
......@@ -468,6 +485,21 @@ static int srp_connect_target(struct srp_target_port *target)
case SRP_DLID_REDIRECT:
break;
case SRP_STALE_CONN:
/* Our current CM id was stale, and is now in timewait.
* Try to reconnect with a new one.
*/
if (!retries-- || srp_new_cm_id(target)) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"giving up on stale connection\n");
target->status = -ECONNRESET;
return target->status;
}
shost_printk(KERN_ERR, target->scsi_host, PFX
"retrying stale connection\n");
break;
default:
return target->status;
}
......@@ -507,7 +539,6 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
struct ib_cm_id *new_cm_id;
struct ib_qp_attr qp_attr;
struct srp_request *req, *tmp;
struct ib_wc wc;
......@@ -526,14 +557,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
* Now get a new local CM ID so that we avoid confusing the
* target in case things are really fouled up.
*/
new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
srp_cm_handler, target);
if (IS_ERR(new_cm_id)) {
ret = PTR_ERR(new_cm_id);
ret = srp_new_cm_id(target);
if (ret)
goto err;
}
ib_destroy_cm_id(target->cm_id);
target->cm_id = new_cm_id;
qp_attr.qp_state = IB_QPS_RESET;
ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
......@@ -1171,6 +1197,11 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
target->status = -ECONNRESET;
break;
case IB_CM_REJ_STALE_CONN:
shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
target->status = SRP_STALE_CONN;
break;
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->param.rej_rcvd.reason);
......@@ -1862,11 +1893,9 @@ static ssize_t srp_create_target(struct class_device *class_dev,
if (ret)
goto err;
target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
if (IS_ERR(target->cm_id)) {
ret = PTR_ERR(target->cm_id);
ret = srp_new_cm_id(target);
if (ret)
goto err_free;
}
target->qp_in_error = 0;
ret = srp_connect_target(target);
......
......@@ -54,6 +54,7 @@ enum {
SRP_PORT_REDIRECT = 1,
SRP_DLID_REDIRECT = 2,
SRP_STALE_CONN = 3,
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
......
......@@ -617,9 +617,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
......@@ -633,9 +630,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
if (err)
goto out;
MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
......
......@@ -99,9 +99,6 @@ struct mlx4_dev_cap {
};
struct mlx4_adapter {
u32 vendor_id;
u32 device_id;
u32 revision_id;
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
};
......
......@@ -71,7 +71,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
static const char mlx4_version[] __devinitdata =
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
......@@ -163,7 +163,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
......@@ -197,8 +197,8 @@ static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
return err;
}
static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
int cmpt_entry_sz)
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
int cmpt_entry_sz)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
......@@ -534,7 +534,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
priv->eq_table.inta_pin = adapter.inta_pin;
dev->rev_id = adapter.revision_id;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
return 0;
......@@ -688,7 +687,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
return err;
}
static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry entries[MLX4_NUM_EQ];
......
......@@ -122,7 +122,7 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
spin_unlock(&buddy->lock);
}
static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
{
int i, s;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment