Commit 89fbb69c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

parents 7efe5d7c 4cce3390
This diff is collapsed.
...@@ -39,17 +39,14 @@ ...@@ -39,17 +39,14 @@
#ifndef __AGENT_H_ #ifndef __AGENT_H_
#define __AGENT_H_ #define __AGENT_H_
extern spinlock_t ib_agent_port_list_lock; #include <rdma/ib_mad.h>
extern int ib_agent_port_open(struct ib_device *device, extern int ib_agent_port_open(struct ib_device *device, int port_num);
int port_num);
extern int ib_agent_port_close(struct ib_device *device, int port_num); extern int ib_agent_port_close(struct ib_device *device, int port_num);
extern int agent_send(struct ib_mad_private *mad, extern int agent_send_response(struct ib_mad *mad, struct ib_grh *grh,
struct ib_grh *grh, struct ib_wc *wc, struct ib_device *device,
struct ib_wc *wc, int port_num, int qpn);
struct ib_device *device,
int port_num);
#endif /* __AGENT_H_ */ #endif /* __AGENT_H_ */
/*
* Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
*/
#ifndef __IB_AGENT_PRIV_H__
#define __IB_AGENT_PRIV_H__
#include <linux/pci.h>
#define SPFX "ib_agent: "
struct ib_agent_send_wr {
struct list_head send_list;
struct ib_ah *ah;
struct ib_mad_private *mad;
DECLARE_PCI_UNMAP_ADDR(mapping)
};
struct ib_agent_port_private {
struct list_head port_list;
struct list_head send_posted_list;
spinlock_t send_list_lock;
int port_num;
struct ib_mad_agent *smp_agent; /* SM class */
struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
};
#endif /* __IB_AGENT_PRIV_H__ */
This diff is collapsed.
...@@ -186,6 +186,7 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, ...@@ -186,6 +186,7 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
req_msg->offset40 = cpu_to_be32((be32_to_cpu( req_msg->offset40 = cpu_to_be32((be32_to_cpu(
req_msg->offset40) & req_msg->offset40) &
0xFFFFFFF9) | 0x2); 0xFFFFFFF9) | 0x2);
break;
default: default:
req_msg->offset40 = cpu_to_be32(be32_to_cpu( req_msg->offset40 = cpu_to_be32(be32_to_cpu(
req_msg->offset40) & req_msg->offset40) &
......
...@@ -514,6 +514,12 @@ int ib_query_port(struct ib_device *device, ...@@ -514,6 +514,12 @@ int ib_query_port(struct ib_device *device,
u8 port_num, u8 port_num,
struct ib_port_attr *port_attr) struct ib_port_attr *port_attr)
{ {
if (device->node_type == IB_NODE_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
return -EINVAL;
return device->query_port(device, port_num, port_attr); return device->query_port(device, port_num, port_attr);
} }
EXPORT_SYMBOL(ib_query_port); EXPORT_SYMBOL(ib_query_port);
...@@ -583,6 +589,12 @@ int ib_modify_port(struct ib_device *device, ...@@ -583,6 +589,12 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask, u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify) struct ib_port_modify *port_modify)
{ {
if (device->node_type == IB_NODE_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
return -EINVAL;
return device->modify_port(device, port_num, port_modify_mask, return device->modify_port(device, port_num, port_modify_mask,
port_modify); port_modify);
} }
......
This diff is collapsed.
...@@ -118,9 +118,10 @@ struct ib_mad_send_wr_private { ...@@ -118,9 +118,10 @@ struct ib_mad_send_wr_private {
struct ib_mad_list_head mad_list; struct ib_mad_list_head mad_list;
struct list_head agent_list; struct list_head agent_list;
struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf send_buf;
DECLARE_PCI_UNMAP_ADDR(mapping)
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
u64 wr_id; /* client WR ID */
__be64 tid; __be64 tid;
unsigned long timeout; unsigned long timeout;
int retries; int retries;
...@@ -141,10 +142,7 @@ struct ib_mad_local_private { ...@@ -141,10 +142,7 @@ struct ib_mad_local_private {
struct list_head completion_list; struct list_head completion_list;
struct ib_mad_private *mad_priv; struct ib_mad_private *mad_priv;
struct ib_mad_agent_private *recv_mad_agent; struct ib_mad_agent_private *recv_mad_agent;
struct ib_send_wr send_wr; struct ib_mad_send_wr_private *mad_send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
u64 wr_id; /* client WR ID */
__be64 tid;
}; };
struct ib_mad_mgmt_method_table { struct ib_mad_mgmt_method_table {
......
...@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) ...@@ -103,12 +103,12 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
static int data_offset(u8 mgmt_class) static int data_offset(u8 mgmt_class)
{ {
if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
return offsetof(struct ib_sa_mad, data); return IB_MGMT_SA_HDR;
else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
(mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
return offsetof(struct ib_vendor_mad, data); return IB_MGMT_VENDOR_HDR;
else else
return offsetof(struct ib_rmpp_mad, data); return IB_MGMT_RMPP_HDR;
} }
static void format_ack(struct ib_rmpp_mad *ack, static void format_ack(struct ib_rmpp_mad *ack,
...@@ -135,55 +135,52 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, ...@@ -135,55 +135,52 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
struct ib_mad_recv_wc *recv_wc) struct ib_mad_recv_wc *recv_wc)
{ {
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
struct ib_send_wr *bad_send_wr; int ret;
int hdr_len, ret;
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, rmpp_recv->ah, 1, recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR,
hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, IB_MGMT_RMPP_DATA, GFP_KERNEL);
GFP_KERNEL);
if (!msg) if (!msg)
return; return;
format_ack((struct ib_rmpp_mad *) msg->mad, format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad,
(struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); rmpp_recv);
ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, msg->ah = rmpp_recv->ah;
&bad_send_wr); ret = ib_post_send_mad(msg, NULL);
if (ret) if (ret)
ib_free_send_mad(msg); ib_free_send_mad(msg);
} }
static int alloc_response_msg(struct ib_mad_agent *agent, static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
struct ib_mad_recv_wc *recv_wc, struct ib_mad_recv_wc *recv_wc)
struct ib_mad_send_buf **msg)
{ {
struct ib_mad_send_buf *m; struct ib_mad_send_buf *msg;
struct ib_ah *ah; struct ib_ah *ah;
int hdr_len;
ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
recv_wc->recv_buf.grh, agent->port_num); recv_wc->recv_buf.grh, agent->port_num);
if (IS_ERR(ah)) if (IS_ERR(ah))
return PTR_ERR(ah); return (void *) ah;
hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
m = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1,
recv_wc->wc->pkey_index, ah, 1, hdr_len, IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA,
sizeof(struct ib_rmpp_mad) - hdr_len,
GFP_KERNEL); GFP_KERNEL);
if (IS_ERR(m)) { if (IS_ERR(msg))
ib_destroy_ah(ah); ib_destroy_ah(ah);
return PTR_ERR(m); else
} msg->ah = ah;
*msg = m;
return 0; return msg;
} }
static void free_msg(struct ib_mad_send_buf *msg) void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{ {
ib_destroy_ah(msg->send_wr.wr.ud.ah); struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
ib_free_send_mad(msg);
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK)
ib_destroy_ah(mad_send_wc->send_buf->ah);
ib_free_send_mad(mad_send_wc->send_buf);
} }
static void nack_recv(struct ib_mad_agent_private *agent, static void nack_recv(struct ib_mad_agent_private *agent,
...@@ -191,14 +188,13 @@ static void nack_recv(struct ib_mad_agent_private *agent, ...@@ -191,14 +188,13 @@ static void nack_recv(struct ib_mad_agent_private *agent,
{ {
struct ib_mad_send_buf *msg; struct ib_mad_send_buf *msg;
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
struct ib_send_wr *bad_send_wr;
int ret; int ret;
ret = alloc_response_msg(&agent->agent, recv_wc, &msg); msg = alloc_response_msg(&agent->agent, recv_wc);
if (ret) if (IS_ERR(msg))
return; return;
rmpp_mad = (struct ib_rmpp_mad *) msg->mad; rmpp_mad = msg->mad;
memcpy(rmpp_mad, recv_wc->recv_buf.mad, memcpy(rmpp_mad, recv_wc->recv_buf.mad,
data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class));
...@@ -210,9 +206,11 @@ static void nack_recv(struct ib_mad_agent_private *agent, ...@@ -210,9 +206,11 @@ static void nack_recv(struct ib_mad_agent_private *agent,
rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.seg_num = 0;
rmpp_mad->rmpp_hdr.paylen_newwin = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0;
ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); ret = ib_post_send_mad(msg, NULL);
if (ret) if (ret) {
free_msg(msg); ib_destroy_ah(msg->ah);
ib_free_send_mad(msg);
}
} }
static void recv_timeout_handler(void *data) static void recv_timeout_handler(void *data)
...@@ -585,7 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -585,7 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
int timeout; int timeout;
u32 paylen; u32 paylen;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
...@@ -612,7 +610,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -612,7 +610,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
} }
/* 2 seconds for an ACK until we can find the packet lifetime */ /* 2 seconds for an ACK until we can find the packet lifetime */
timeout = mad_send_wr->send_wr.wr.ud.timeout_ms; timeout = mad_send_wr->send_buf.timeout_ms;
if (!timeout || timeout > 2000) if (!timeout || timeout > 2000)
mad_send_wr->timeout = msecs_to_jiffies(2000); mad_send_wr->timeout = msecs_to_jiffies(2000);
mad_send_wr->seg_num++; mad_send_wr->seg_num++;
...@@ -640,7 +638,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, ...@@ -640,7 +638,7 @@ static void abort_send(struct ib_mad_agent_private *agent, __be64 tid,
wc.status = IB_WC_REM_ABORT_ERR; wc.status = IB_WC_REM_ABORT_ERR;
wc.vendor_err = rmpp_status; wc.vendor_err = rmpp_status;
wc.wr_id = mad_send_wr->wr_id; wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &wc); ib_mad_complete_send_wr(mad_send_wr, &wc);
return; return;
out: out:
...@@ -694,12 +692,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -694,12 +692,12 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (seg_num > mad_send_wr->last_ack) { if (seg_num > mad_send_wr->last_ack) {
mad_send_wr->last_ack = seg_num; mad_send_wr->last_ack = seg_num;
mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; mad_send_wr->retries = mad_send_wr->send_buf.retries;
} }
mad_send_wr->newwin = newwin; mad_send_wr->newwin = newwin;
if (mad_send_wr->last_ack == mad_send_wr->total_seg) { if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
/* If no response is expected, the ACK completes the send */ /* If no response is expected, the ACK completes the send */
if (!mad_send_wr->send_wr.wr.ud.timeout_ms) { if (!mad_send_wr->send_buf.timeout_ms) {
struct ib_mad_send_wc wc; struct ib_mad_send_wc wc;
ib_mark_mad_done(mad_send_wr); ib_mark_mad_done(mad_send_wr);
...@@ -707,13 +705,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, ...@@ -707,13 +705,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
wc.status = IB_WC_SUCCESS; wc.status = IB_WC_SUCCESS;
wc.vendor_err = 0; wc.vendor_err = 0;
wc.wr_id = mad_send_wr->wr_id; wc.send_buf = &mad_send_wr->send_buf;
ib_mad_complete_send_wr(mad_send_wr, &wc); ib_mad_complete_send_wr(mad_send_wr, &wc);
return; return;
} }
if (mad_send_wr->refcount == 1) if (mad_send_wr->refcount == 1)
ib_reset_mad_timeout(mad_send_wr, mad_send_wr-> ib_reset_mad_timeout(mad_send_wr,
send_wr.wr.ud.timeout_ms); mad_send_wr->send_buf.timeout_ms);
} else if (mad_send_wr->refcount == 1 && } else if (mad_send_wr->refcount == 1 &&
mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num <= mad_send_wr->total_seg) { mad_send_wr->seg_num <= mad_send_wr->total_seg) {
...@@ -842,7 +840,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -842,7 +840,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
int i, total_len, ret; int i, total_len, ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE)) IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; return IB_RMPP_RESULT_UNHANDLED;
...@@ -863,7 +861,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -863,7 +861,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) / mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
(sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset); (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) - mad_send_wr->pad = total_len - IB_MGMT_RMPP_HDR -
be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
/* We need to wait for the final ACK even if there isn't a response */ /* We need to wait for the final ACK even if there isn't a response */
...@@ -878,23 +876,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -878,23 +876,15 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc) struct ib_mad_send_wc *mad_send_wc)
{ {
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
struct ib_mad_send_buf *msg;
int ret; int ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE)) IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
msg = (struct ib_mad_send_buf *) (unsigned long)
mad_send_wc->wr_id;
if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK)
ib_free_send_mad(msg);
else
free_msg(msg);
return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
}
if (mad_send_wc->status != IB_WC_SUCCESS || if (mad_send_wc->status != IB_WC_SUCCESS ||
mad_send_wr->status != IB_WC_SUCCESS) mad_send_wr->status != IB_WC_SUCCESS)
...@@ -905,7 +895,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, ...@@ -905,7 +895,7 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
if (mad_send_wr->last_ack == mad_send_wr->total_seg) { if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
mad_send_wr->timeout = mad_send_wr->timeout =
msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms); msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
return IB_RMPP_RESULT_PROCESSED; /* Send done */ return IB_RMPP_RESULT_PROCESSED; /* Send done */
} }
...@@ -926,7 +916,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) ...@@ -926,7 +916,7 @@ int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
struct ib_rmpp_mad *rmpp_mad; struct ib_rmpp_mad *rmpp_mad;
int ret; int ret;
rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; rmpp_mad = mad_send_wr->send_buf.mad;
if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
IB_MGMT_RMPP_FLAG_ACTIVE)) IB_MGMT_RMPP_FLAG_ACTIVE))
return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
......
...@@ -51,6 +51,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, ...@@ -51,6 +51,8 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
struct ib_mad_send_wc *mad_send_wc); struct ib_mad_send_wc *mad_send_wc);
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc);
void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent); void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent);
int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr); int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
......
This diff is collapsed.
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#ifndef __SMI_H_ #ifndef __SMI_H_
#define __SMI_H_ #define __SMI_H_
#include <rdma/ib_smi.h>
int smi_handle_dr_smp_recv(struct ib_smp *smp, int smi_handle_dr_smp_recv(struct ib_smp *smp,
u8 node_type, u8 node_type,
int port_num, int port_num,
......
...@@ -65,6 +65,11 @@ struct port_table_attribute { ...@@ -65,6 +65,11 @@ struct port_table_attribute {
int index; int index;
}; };
static inline int ibdev_is_alive(const struct ib_device *dev)
{
return dev->reg_state == IB_DEV_REGISTERED;
}
static ssize_t port_attr_show(struct kobject *kobj, static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf) struct attribute *attr, char *buf)
{ {
...@@ -74,6 +79,8 @@ static ssize_t port_attr_show(struct kobject *kobj, ...@@ -74,6 +79,8 @@ static ssize_t port_attr_show(struct kobject *kobj,
if (!port_attr->show) if (!port_attr->show)
return -EIO; return -EIO;
if (!ibdev_is_alive(p->ibdev))
return -ENODEV;
return port_attr->show(p, port_attr, buf); return port_attr->show(p, port_attr, buf);
} }
...@@ -581,6 +588,9 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf) ...@@ -581,6 +588,9 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
{ {
struct ib_device *dev = container_of(cdev, struct ib_device, class_dev); struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
if (!ibdev_is_alive(dev))
return -ENODEV;
switch (dev->node_type) { switch (dev->node_type) {
case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
...@@ -595,6 +605,9 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf) ...@@ -595,6 +605,9 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
struct ib_device_attr attr; struct ib_device_attr attr;
ssize_t ret; ssize_t ret;
if (!ibdev_is_alive(dev))
return -ENODEV;
ret = ib_query_device(dev, &attr); ret = ib_query_device(dev, &attr);
if (ret) if (ret)
return ret; return ret;
...@@ -612,6 +625,9 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf) ...@@ -612,6 +625,9 @@ static ssize_t show_node_guid(struct class_device *cdev, char *buf)
struct ib_device_attr attr; struct ib_device_attr attr;
ssize_t ret; ssize_t ret;
if (!ibdev_is_alive(dev))
return -ENODEV;
ret = ib_query_device(dev, &attr); ret = ib_query_device(dev, &attr);
if (ret) if (ret)
return ret; return ret;
......
This diff is collapsed.
This diff is collapsed.
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 PathScale, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -38,29 +39,47 @@ ...@@ -38,29 +39,47 @@
#ifndef UVERBS_H #ifndef UVERBS_H
#define UVERBS_H #define UVERBS_H
/* Include device.h and fs.h until cdev.h is self-sufficient */
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
/*
* Our lifetime rules for these structs are the following:
*
* struct ib_uverbs_device: One reference is held by the module and
* released in ib_uverbs_remove_one(). Another reference is taken by
* ib_uverbs_open() each time the character special file is opened,
* and released in ib_uverbs_release_file() when the file is released.
*
* struct ib_uverbs_file: One reference is held by the VFS and
* released when the file is closed. Another reference is taken when
* an asynchronous event queue file is created and released when the
* event file is closed.
*
* struct ib_uverbs_event_file: One reference is held by the VFS and
* released when the file is closed. For asynchronous event files,
* another reference is held by the corresponding main context file
* and released when that file is closed. For completion event files,
* a reference is taken when a CQ is created that uses the file, and
* released when the CQ is destroyed.
*/
struct ib_uverbs_device { struct ib_uverbs_device {
struct kref ref;
int devnum; int devnum;
struct cdev dev; struct cdev *dev;
struct class_device class_dev; struct class_device *class_dev;
struct ib_device *ib_dev; struct ib_device *ib_dev;
int num_comp; int num_comp_vectors;
}; };
struct ib_uverbs_event_file { struct ib_uverbs_event_file {
struct kref ref; struct kref ref;
struct file *file;
struct ib_uverbs_file *uverbs_file; struct ib_uverbs_file *uverbs_file;
spinlock_t lock; spinlock_t lock;
int fd;
int is_async; int is_async;
wait_queue_head_t poll_wait; wait_queue_head_t poll_wait;
struct fasync_struct *async_queue; struct fasync_struct *async_queue;
...@@ -73,8 +92,7 @@ struct ib_uverbs_file { ...@@ -73,8 +92,7 @@ struct ib_uverbs_file {
struct ib_uverbs_device *device; struct ib_uverbs_device *device;
struct ib_ucontext *ucontext; struct ib_ucontext *ucontext;
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
struct ib_uverbs_event_file async_file; struct ib_uverbs_event_file *async_file;
struct ib_uverbs_event_file comp_file[1];
}; };
struct ib_uverbs_event { struct ib_uverbs_event {
...@@ -110,10 +128,23 @@ extern struct idr ib_uverbs_cq_idr; ...@@ -110,10 +128,23 @@ extern struct idr ib_uverbs_cq_idr;
extern struct idr ib_uverbs_qp_idr; extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr; extern struct idr ib_uverbs_srq_idr;
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
int is_async, int *fd);
void ib_uverbs_release_event_file(struct kref *ref);
struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
struct ib_uverbs_event_file *ev_file,
struct ib_ucq_object *uobj);
void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
struct ib_uevent_object *uobj);
void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
void ib_uverbs_event_handler(struct ib_event_handler *handler,
struct ib_event *event);
int ib_umem_get(struct ib_device *dev, struct ib_umem *mem, int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
void *addr, size_t size, int write); void *addr, size_t size, int write);
...@@ -125,21 +156,26 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem); ...@@ -125,21 +156,26 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
const char __user *buf, int in_len, \ const char __user *buf, int in_len, \
int out_len) int out_len)
IB_UVERBS_DECLARE_CMD(query_params);
IB_UVERBS_DECLARE_CMD(get_context); IB_UVERBS_DECLARE_CMD(get_context);
IB_UVERBS_DECLARE_CMD(query_device); IB_UVERBS_DECLARE_CMD(query_device);
IB_UVERBS_DECLARE_CMD(query_port); IB_UVERBS_DECLARE_CMD(query_port);
IB_UVERBS_DECLARE_CMD(query_gid);
IB_UVERBS_DECLARE_CMD(query_pkey);
IB_UVERBS_DECLARE_CMD(alloc_pd); IB_UVERBS_DECLARE_CMD(alloc_pd);
IB_UVERBS_DECLARE_CMD(dealloc_pd); IB_UVERBS_DECLARE_CMD(dealloc_pd);
IB_UVERBS_DECLARE_CMD(reg_mr); IB_UVERBS_DECLARE_CMD(reg_mr);
IB_UVERBS_DECLARE_CMD(dereg_mr); IB_UVERBS_DECLARE_CMD(dereg_mr);
IB_UVERBS_DECLARE_CMD(create_comp_channel);
IB_UVERBS_DECLARE_CMD(create_cq); IB_UVERBS_DECLARE_CMD(create_cq);
IB_UVERBS_DECLARE_CMD(poll_cq);
IB_UVERBS_DECLARE_CMD(req_notify_cq);
IB_UVERBS_DECLARE_CMD(destroy_cq); IB_UVERBS_DECLARE_CMD(destroy_cq);
IB_UVERBS_DECLARE_CMD(create_qp); IB_UVERBS_DECLARE_CMD(create_qp);
IB_UVERBS_DECLARE_CMD(modify_qp); IB_UVERBS_DECLARE_CMD(modify_qp);
IB_UVERBS_DECLARE_CMD(destroy_qp); IB_UVERBS_DECLARE_CMD(destroy_qp);
IB_UVERBS_DECLARE_CMD(post_send);
IB_UVERBS_DECLARE_CMD(post_recv);
IB_UVERBS_DECLARE_CMD(post_srq_recv);
IB_UVERBS_DECLARE_CMD(create_ah);
IB_UVERBS_DECLARE_CMD(destroy_ah);
IB_UVERBS_DECLARE_CMD(attach_mcast); IB_UVERBS_DECLARE_CMD(attach_mcast);
IB_UVERBS_DECLARE_CMD(detach_mcast); IB_UVERBS_DECLARE_CMD(detach_mcast);
IB_UVERBS_DECLARE_CMD(create_srq); IB_UVERBS_DECLARE_CMD(create_srq);
......
This diff is collapsed.
This diff is collapsed.
...@@ -523,16 +523,22 @@ EXPORT_SYMBOL(ib_dealloc_fmr); ...@@ -523,16 +523,22 @@ EXPORT_SYMBOL(ib_dealloc_fmr);
int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{ {
return qp->device->attach_mcast ? if (!qp->device->attach_mcast)
qp->device->attach_mcast(qp, gid, lid) : return -ENOSYS;
-ENOSYS; if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
return -EINVAL;
return qp->device->attach_mcast(qp, gid, lid);
} }
EXPORT_SYMBOL(ib_attach_mcast); EXPORT_SYMBOL(ib_attach_mcast);
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
{ {
return qp->device->detach_mcast ? if (!qp->device->detach_mcast)
qp->device->detach_mcast(qp, gid, lid) : return -ENOSYS;
-ENOSYS; if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
return -EINVAL;
return qp->device->detach_mcast(qp, gid, lid);
} }
EXPORT_SYMBOL(ib_detach_mcast); EXPORT_SYMBOL(ib_detach_mcast);
...@@ -7,4 +7,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o ...@@ -7,4 +7,5 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += ib_mthca.o
ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o \
mthca_catas.o
/* /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU * licenses. You may choose to be licensed under the terms of the GNU
...@@ -706,9 +707,13 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) ...@@ -706,9 +707,13 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
dev->cmd.max_cmds = 1 << lg; dev->cmd.max_cmds = 1 << lg;
MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
mthca_dbg(dev, "FW version %012llx, max commands %d\n", mthca_dbg(dev, "FW version %012llx, max commands %d\n",
(unsigned long long) dev->fw_ver, dev->cmd.max_cmds); (unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
mthca_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x\n",
(unsigned long long) dev->catas_err.addr, dev->catas_err.size);
if (mthca_is_memfree(dev)) { if (mthca_is_memfree(dev)) {
MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
...@@ -933,9 +938,9 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -933,9 +938,9 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
goto out; goto out;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
dev_lim->max_srq_sz = 1 << field; dev_lim->max_srq_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
dev_lim->max_qp_sz = 1 << field; dev_lim->max_qp_sz = (1 << field) - 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
dev_lim->reserved_qps = 1 << (field & 0xf); dev_lim->reserved_qps = 1 << (field & 0xf);
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
...@@ -1045,6 +1050,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, ...@@ -1045,6 +1050,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars); dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars);
mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n", mthca_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
dev_lim->max_pds, dev_lim->reserved_mgms); dev_lim->max_pds, dev_lim->reserved_mgms);
mthca_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz);
mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags); mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
......
...@@ -83,6 +83,8 @@ enum { ...@@ -83,6 +83,8 @@ enum {
/* Arbel FW gives us these, but we need them for Tavor */ /* Arbel FW gives us these, but we need them for Tavor */
MTHCA_MPT_ENTRY_SIZE = 0x40, MTHCA_MPT_ENTRY_SIZE = 0x40,
MTHCA_MTT_SEG_SIZE = 0x40, MTHCA_MTT_SEG_SIZE = 0x40,
MTHCA_QP_PER_MGM = 4 * (MTHCA_MGM_ENTRY_SIZE / 16 - 2)
}; };
enum { enum {
...@@ -128,12 +130,16 @@ struct mthca_limits { ...@@ -128,12 +130,16 @@ struct mthca_limits {
int num_uars; int num_uars;
int max_sg; int max_sg;
int num_qps; int num_qps;
int max_wqes;
int max_qp_init_rdma;
int reserved_qps; int reserved_qps;
int num_srqs; int num_srqs;
int max_srq_wqes;
int reserved_srqs; int reserved_srqs;
int num_eecs; int num_eecs;
int reserved_eecs; int reserved_eecs;
int num_cqs; int num_cqs;
int max_cqes;
int reserved_cqs; int reserved_cqs;
int num_eqs; int num_eqs;
int reserved_eqs; int reserved_eqs;
...@@ -148,6 +154,7 @@ struct mthca_limits { ...@@ -148,6 +154,7 @@ struct mthca_limits {
int reserved_mcgs; int reserved_mcgs;
int num_pds; int num_pds;
int reserved_pds; int reserved_pds;
u32 flags;
u8 port_width_cap; u8 port_width_cap;
}; };
...@@ -251,6 +258,14 @@ struct mthca_mcg_table { ...@@ -251,6 +258,14 @@ struct mthca_mcg_table {
struct mthca_icm_table *table; struct mthca_icm_table *table;
}; };
struct mthca_catas_err {
u64 addr;
u32 __iomem *map;
unsigned long stop;
u32 size;
struct timer_list timer;
};
struct mthca_dev { struct mthca_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -311,6 +326,8 @@ struct mthca_dev { ...@@ -311,6 +326,8 @@ struct mthca_dev {
struct mthca_av_table av_table; struct mthca_av_table av_table;
struct mthca_mcg_table mcg_table; struct mthca_mcg_table mcg_table;
struct mthca_catas_err catas_err;
struct mthca_uar driver_uar; struct mthca_uar driver_uar;
struct mthca_db_table *db_tab; struct mthca_db_table *db_tab;
struct mthca_pd driver_pd; struct mthca_pd driver_pd;
...@@ -398,6 +415,9 @@ void mthca_cleanup_mcg_table(struct mthca_dev *dev); ...@@ -398,6 +415,9 @@ void mthca_cleanup_mcg_table(struct mthca_dev *dev);
int mthca_register_device(struct mthca_dev *dev); int mthca_register_device(struct mthca_dev *dev);
void mthca_unregister_device(struct mthca_dev *dev); void mthca_unregister_device(struct mthca_dev *dev);
void mthca_start_catas_poll(struct mthca_dev *dev);
void mthca_stop_catas_poll(struct mthca_dev *dev);
int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar);
void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
...@@ -447,6 +467,8 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, ...@@ -447,6 +467,8 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq); struct ib_srq_attr *attr, struct mthca_srq *srq);
void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
enum ib_srq_attr_mask attr_mask);
void mthca_srq_event(struct mthca_dev *dev, u32 srqn, void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
enum ib_event_type event_type); enum ib_event_type event_type);
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
......
...@@ -83,7 +83,8 @@ enum { ...@@ -83,7 +83,8 @@ enum {
MTHCA_EVENT_TYPE_PATH_MIG = 0x01, MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
MTHCA_EVENT_TYPE_COMM_EST = 0x02, MTHCA_EVENT_TYPE_COMM_EST = 0x02,
MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13, MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
...@@ -110,8 +111,9 @@ enum { ...@@ -110,8 +111,9 @@ enum {
(1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
(1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE) (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
#define MTHCA_EQ_DB_INC_CI (1 << 24) #define MTHCA_EQ_DB_INC_CI (1 << 24)
...@@ -141,6 +143,9 @@ struct mthca_eqe { ...@@ -141,6 +143,9 @@ struct mthca_eqe {
struct { struct {
__be32 qpn; __be32 qpn;
} __attribute__((packed)) qp; } __attribute__((packed)) qp;
struct {
__be32 srqn;
} __attribute__((packed)) srq;
struct { struct {
__be32 cqn; __be32 cqn;
u32 reserved1; u32 reserved1;
...@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
IB_EVENT_SQ_DRAINED); IB_EVENT_SQ_DRAINED);
break; break;
case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_LAST_WQE_REACHED);
break;
case MTHCA_EVENT_TYPE_SRQ_LIMIT:
mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
IB_EVENT_SRQ_LIMIT_REACHED);
break;
case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
IB_EVENT_QP_FATAL); IB_EVENT_QP_FATAL);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment