Commit da2dfaa3 authored by Ira Weiny's avatar Ira Weiny Committed by Doug Ledford

IB/mad: Support alternate Base Versions when creating MADs

In preparation to support the new OPA MAD Base version, add a base version
parameter to ib_create_send_mad and set it to IB_MGMT_BASE_VERSION for current
users.

Definition of the new base version and it's processing will occur in later
patches.
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 29869eaf
...@@ -108,7 +108,8 @@ void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh, ...@@ -108,7 +108,8 @@ void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_KERNEL); GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) { if (IS_ERR(send_buf)) {
dev_err(&device->dev, "ib_create_send_mad error\n"); dev_err(&device->dev, "ib_create_send_mad error\n");
goto err1; goto err1;
......
...@@ -267,7 +267,8 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, ...@@ -267,7 +267,8 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index, cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC); GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) { if (IS_ERR(m)) {
ib_destroy_ah(ah); ib_destroy_ah(ah);
return PTR_ERR(m); return PTR_ERR(m);
...@@ -297,7 +298,8 @@ static int cm_alloc_response_msg(struct cm_port *port, ...@@ -297,7 +298,8 @@ static int cm_alloc_response_msg(struct cm_port *port,
m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC); GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(m)) { if (IS_ERR(m)) {
ib_destroy_ah(ah); ib_destroy_ah(ah);
return PTR_ERR(m); return PTR_ERR(m);
......
...@@ -920,7 +920,8 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -920,7 +920,8 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index, u32 remote_qpn, u16 pkey_index,
int rmpp_active, int rmpp_active,
int hdr_len, int data_len, int hdr_len, int data_len,
gfp_t gfp_mask) gfp_t gfp_mask,
u8 base_version)
{ {
struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wr_private *mad_send_wr;
......
...@@ -139,7 +139,8 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, ...@@ -139,7 +139,8 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, hdr_len, recv_wc->wc->pkey_index, 1, hdr_len,
0, GFP_KERNEL); 0, GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(msg)) if (IS_ERR(msg))
return; return;
...@@ -165,7 +166,8 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, ...@@ -165,7 +166,8 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
recv_wc->wc->pkey_index, 1, recv_wc->wc->pkey_index, 1,
hdr_len, 0, GFP_KERNEL); hdr_len, 0, GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(msg)) if (IS_ERR(msg))
ib_destroy_ah(ah); ib_destroy_ah(ah);
else { else {
......
...@@ -583,7 +583,8 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) ...@@ -583,7 +583,8 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->mad_buf = ib_create_send_mad(query->port->agent, 1, query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index, query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
gfp_mask); gfp_mask,
IB_MGMT_BASE_VERSION);
if (IS_ERR(query->mad_buf)) { if (IS_ERR(query->mad_buf)) {
kref_put(&query->sm_ah->ref, free_sm_ah); kref_put(&query->sm_ah->ref, free_sm_ah);
return -ENOMEM; return -ENOMEM;
......
...@@ -520,7 +520,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -520,7 +520,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
packet->msg = ib_create_send_mad(agent, packet->msg = ib_create_send_mad(agent,
be32_to_cpu(packet->mad.hdr.qpn), be32_to_cpu(packet->mad.hdr.qpn),
packet->mad.hdr.pkey_index, rmpp_active, packet->mad.hdr.pkey_index, rmpp_active,
hdr_len, data_len, GFP_KERNEL); hdr_len, data_len, GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(packet->msg)) { if (IS_ERR(packet->msg)) {
ret = PTR_ERR(packet->msg); ret = PTR_ERR(packet->msg);
goto err_ah; goto err_ah;
......
...@@ -367,7 +367,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_m ...@@ -367,7 +367,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_m
if (agent) { if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC); IB_MGMT_MAD_DATA, GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) if (IS_ERR(send_buf))
return; return;
/* /*
......
...@@ -170,7 +170,8 @@ static void forward_trap(struct mthca_dev *dev, ...@@ -170,7 +170,8 @@ static void forward_trap(struct mthca_dev *dev,
if (agent) { if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC); IB_MGMT_MAD_DATA, GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) if (IS_ERR(send_buf))
return; return;
/* /*
......
...@@ -5502,7 +5502,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd) ...@@ -5502,7 +5502,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
goto retry; goto retry;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC); IB_MGMT_MAD_DATA, GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) if (IS_ERR(send_buf))
goto retry; goto retry;
......
...@@ -83,7 +83,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) ...@@ -83,7 +83,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
return; return;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC); IB_MGMT_MAD_DATA, GFP_ATOMIC,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) if (IS_ERR(send_buf))
return; return;
......
...@@ -476,7 +476,8 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, ...@@ -476,7 +476,8 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
mad_wc->wc->pkey_index, 0, mad_wc->wc->pkey_index, 0,
IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
GFP_KERNEL); GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(rsp)) if (IS_ERR(rsp))
goto err_rsp; goto err_rsp;
......
...@@ -618,6 +618,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent, ...@@ -618,6 +618,7 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
* automatically adjust the allocated buffer size to account for any * automatically adjust the allocated buffer size to account for any
* additional padding that may be necessary. * additional padding that may be necessary.
* @gfp_mask: GFP mask used for the memory allocation. * @gfp_mask: GFP mask used for the memory allocation.
* @base_version: Base Version of this MAD
* *
* This routine allocates a MAD for sending. The returned MAD send buffer * This routine allocates a MAD for sending. The returned MAD send buffer
* will reference a data buffer usable for sending a MAD, along * will reference a data buffer usable for sending a MAD, along
...@@ -633,7 +634,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -633,7 +634,8 @@ struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
u32 remote_qpn, u16 pkey_index, u32 remote_qpn, u16 pkey_index,
int rmpp_active, int rmpp_active,
int hdr_len, int data_len, int hdr_len, int data_len,
gfp_t gfp_mask); gfp_t gfp_mask,
u8 base_version);
/** /**
* ib_is_mad_class_rmpp - returns whether given management class * ib_is_mad_class_rmpp - returns whether given management class
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment