Commit 5d9fb044 authored by Ira Weiny's avatar Ira Weiny Committed by Doug Ledford

IB/core: Change rdma_protocol_iboe to roce

After discussion upstream, it was agreed to transition the usage of iboe
in the kernel to roce.  This keeps our terminology consistent with what
was finalized in the IBTA Annex 16 and IBTA Annex 17 publications.
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f9b22e35
...@@ -391,7 +391,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, ...@@ -391,7 +391,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
if (listen_id_priv) { if (listen_id_priv) {
cma_dev = listen_id_priv->cma_dev; cma_dev = listen_id_priv->cma_dev;
port = listen_id_priv->id.port_num; port = listen_id_priv->id.port_num;
gidp = rdma_protocol_iboe(cma_dev->device, port) ? gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid; &iboe_gid : &gid;
ret = cma_validate_port(cma_dev->device, port, gidp, ret = cma_validate_port(cma_dev->device, port, gidp,
...@@ -409,7 +409,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, ...@@ -409,7 +409,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
listen_id_priv->id.port_num == port) listen_id_priv->id.port_num == port)
continue; continue;
gidp = rdma_protocol_iboe(cma_dev->device, port) ? gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid; &iboe_gid : &gid;
ret = cma_validate_port(cma_dev->device, port, gidp, ret = cma_validate_port(cma_dev->device, port, gidp,
...@@ -647,7 +647,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, ...@@ -647,7 +647,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
BUG_ON(id_priv->cma_dev->device != id_priv->id.device); BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) { if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
if (ret) if (ret)
...@@ -1966,7 +1966,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) ...@@ -1966,7 +1966,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
atomic_inc(&id_priv->refcount); atomic_inc(&id_priv->refcount);
if (rdma_cap_ib_sa(id->device, id->port_num)) if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms); ret = cma_resolve_ib_route(id_priv, timeout_ms);
else if (rdma_protocol_iboe(id->device, id->port_num)) else if (rdma_protocol_roce(id->device, id->port_num))
ret = cma_resolve_iboe_route(id_priv); ret = cma_resolve_iboe_route(id_priv);
else if (rdma_protocol_iwarp(id->device, id->port_num)) else if (rdma_protocol_iwarp(id->device, id->port_num))
ret = cma_resolve_iw_route(id_priv, timeout_ms); ret = cma_resolve_iw_route(id_priv, timeout_ms);
...@@ -3325,7 +3325,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, ...@@ -3325,7 +3325,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
list_add(&mc->list, &id_priv->mc_list); list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock); spin_unlock(&id_priv->lock);
if (rdma_protocol_iboe(id->device, id->port_num)) { if (rdma_protocol_roce(id->device, id->port_num)) {
kref_init(&mc->mcref); kref_init(&mc->mcref);
ret = cma_iboe_join_multicast(id_priv, mc); ret = cma_iboe_join_multicast(id_priv, mc);
} else if (rdma_cap_ib_mcast(id->device, id->port_num)) } else if (rdma_cap_ib_mcast(id->device, id->port_num))
...@@ -3365,7 +3365,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) ...@@ -3365,7 +3365,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
if (rdma_cap_ib_mcast(id->device, id->port_num)) { if (rdma_cap_ib_mcast(id->device, id->port_num)) {
ib_sa_free_multicast(mc->multicast.ib); ib_sa_free_multicast(mc->multicast.ib);
kfree(mc); kfree(mc);
} else if (rdma_protocol_iboe(id->device, id->port_num)) } else if (rdma_protocol_roce(id->device, id->port_num))
kref_put(&mc->mcref, release_mc); kref_put(&mc->mcref, release_mc);
return; return;
......
...@@ -725,7 +725,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, ...@@ -725,7 +725,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_ib_route(&resp, &ctx->cm_id->route); ucma_copy_ib_route(&resp, &ctx->cm_id->route);
else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num)) else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iboe_route(&resp, &ctx->cm_id->route); ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iw_route(&resp, &ctx->cm_id->route); ucma_copy_iw_route(&resp, &ctx->cm_id->route);
......
...@@ -1832,7 +1832,7 @@ static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) ...@@ -1832,7 +1832,7 @@ static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num)
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
} }
static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) static inline bool rdma_protocol_roce(struct ib_device *device, u8 port_num)
{ {
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
} }
...@@ -1842,7 +1842,7 @@ static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) ...@@ -1842,7 +1842,7 @@ static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num)
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
} }
static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) static inline bool rdma_ib_or_roce(struct ib_device *device, u8 port_num)
{ {
return device->port_immutable[port_num].core_cap_flags & return device->port_immutable[port_num].core_cap_flags &
(RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);
......
...@@ -987,7 +987,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -987,7 +987,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
*/ */
if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device, if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
newxprt->sc_cm_id->port_num) && newxprt->sc_cm_id->port_num) &&
!rdma_ib_or_iboe(newxprt->sc_cm_id->device, !rdma_ib_or_roce(newxprt->sc_cm_id->device,
newxprt->sc_cm_id->port_num)) newxprt->sc_cm_id->port_num))
goto errout; goto errout;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment