Commit 0a514159 authored by Parav Pandit's avatar Parav Pandit Committed by Jason Gunthorpe

IB/core: Refactor ib_init_ah_attr_from_path() for RoCE

Resolving route for RoCE for a path record is needed only for the
received CM requests.
Therefore,
(a) ib_init_ah_attr_from_path() is refactored first to isolate the
code of resolving route.
(b) Setting dlid, path bits is not needed for RoCE.

Additionally ah attribute initialization is done from the path record
entry, so it is better to refer to path record entry type for
different link layer instead of ah attribute type while initializing
ah attribute itself.
Signed-off-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a22af59e
...@@ -1227,118 +1227,125 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num) ...@@ -1227,118 +1227,125 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask; return src_path_mask;
} }
int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, static int
struct sa_path_rec *rec, roce_resolve_route_from_path(struct ib_device *device, u8 port_num,
struct rdma_ah_attr *ah_attr) struct sa_path_rec *rec)
{ {
struct net_device *resolved_dev;
struct net_device *ndev;
struct net_device *idev;
struct rdma_dev_addr dev_addr = {
.bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
sa_path_get_ifindex(rec) : 0),
.net = sa_path_get_ndev(rec) ?
sa_path_get_ndev(rec) :
&init_net
};
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
int ret; int ret;
u16 gid_index;
int use_roce;
struct net_device *ndev = NULL;
memset(ah_attr, 0, sizeof *ah_attr); if (!device->get_netdev)
ah_attr->type = rdma_ah_find_type(device, port_num); return -EOPNOTSUPP;
rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec))); rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && /* validate the route */
(rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))) ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
rdma_ah_set_make_grd(ah_attr, true); &dgid_addr._sockaddr, &dev_addr);
if (ret)
rdma_ah_set_sl(ah_attr, rec->sl); return ret;
rdma_ah_set_path_bits(ah_attr, be32_to_cpu(sa_path_get_slid(rec)) &
get_src_path_mask(device, port_num));
rdma_ah_set_port_num(ah_attr, port_num);
rdma_ah_set_static_rate(ah_attr, rec->rate);
use_roce = rdma_cap_eth_ah(device, port_num);
if (use_roce) {
struct net_device *idev;
struct net_device *resolved_dev;
struct rdma_dev_addr dev_addr = {
.bound_dev_if = ((sa_path_get_ifindex(rec) >= 0) ?
sa_path_get_ifindex(rec) : 0),
.net = sa_path_get_ndev(rec) ?
sa_path_get_ndev(rec) :
&init_net
};
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
if (!device->get_netdev)
return -EOPNOTSUPP;
rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
/* validate the route */
ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
&dgid_addr._sockaddr, &dev_addr);
if (ret)
return ret;
if ((dev_addr.network == RDMA_NETWORK_IPV4 || if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
dev_addr.network == RDMA_NETWORK_IPV6) && dev_addr.network == RDMA_NETWORK_IPV6) &&
rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2) rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
return -EINVAL; return -EINVAL;
idev = device->get_netdev(device, port_num); idev = device->get_netdev(device, port_num);
if (!idev) if (!idev)
return -ENODEV; return -ENODEV;
resolved_dev = dev_get_by_index(dev_addr.net, resolved_dev = dev_get_by_index(dev_addr.net,
dev_addr.bound_dev_if); dev_addr.bound_dev_if);
if (!resolved_dev) { if (!resolved_dev) {
dev_put(idev); ret = -ENODEV;
return -ENODEV; goto done;
}
ndev = ib_get_ndev_from_path(rec);
rcu_read_lock();
if ((ndev && ndev != resolved_dev) ||
(resolved_dev != idev &&
!rdma_is_upper_dev_rcu(idev, resolved_dev)))
ret = -EHOSTUNREACH;
rcu_read_unlock();
dev_put(idev);
dev_put(resolved_dev);
if (ret) {
if (ndev)
dev_put(ndev);
return ret;
}
} }
ndev = ib_get_ndev_from_path(rec);
rcu_read_lock();
if ((ndev && ndev != resolved_dev) ||
(resolved_dev != idev &&
!rdma_is_upper_dev_rcu(idev, resolved_dev)))
ret = -EHOSTUNREACH;
rcu_read_unlock();
dev_put(resolved_dev);
if (ndev)
dev_put(ndev);
done:
dev_put(idev);
return ret;
}
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr)
{
enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
struct net_device *ndev;
u16 gid_index;
int ret;
if (rec->hop_limit > 0 || use_roce) { ndev = ib_get_ndev_from_path(rec);
enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec); ret = ib_find_cached_gid_by_port(device, &rec->sgid, type,
port_num, ndev, &gid_index);
if (ndev)
dev_put(ndev);
if (ret)
return ret;
ret = ib_find_cached_gid_by_port(device, &rec->sgid, type, rdma_ah_set_grh(ah_attr, &rec->dgid,
port_num, ndev, &gid_index); be32_to_cpu(rec->flow_label),
if (ret) { gid_index, rec->hop_limit,
if (ndev) rec->traffic_class);
dev_put(ndev); return 0;
return ret; }
}
rdma_ah_set_grh(ah_attr, &rec->dgid, int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
be32_to_cpu(rec->flow_label), struct sa_path_rec *rec,
gid_index, rec->hop_limit, struct rdma_ah_attr *ah_attr)
rec->traffic_class); {
if (ndev) int ret = 0;
dev_put(ndev);
} memset(ah_attr, 0, sizeof(*ah_attr));
ah_attr->type = rdma_ah_find_type(device, port_num);
rdma_ah_set_sl(ah_attr, rec->sl);
rdma_ah_set_port_num(ah_attr, port_num);
rdma_ah_set_static_rate(ah_attr, rec->rate);
if (use_roce) { if (sa_path_is_roce(rec)) {
u8 *dmac = sa_path_get_dmac(rec); ret = roce_resolve_route_from_path(device, port_num, rec);
if (ret)
return ret;
if (!dmac) memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
return -EINVAL; } else {
memcpy(ah_attr->roce.dmac, dmac, ETH_ALEN); rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
if (sa_path_is_opa(rec) &&
rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
rdma_ah_set_make_grd(ah_attr, true);
rdma_ah_set_path_bits(ah_attr,
be32_to_cpu(sa_path_get_slid(rec)) &
get_src_path_mask(device, port_num));
} }
return 0; if (rec->hop_limit > 0 || sa_path_is_roce(rec))
ret = init_ah_attr_grh_fields(device, port_num, rec, ah_attr);
return ret;
} }
EXPORT_SYMBOL(ib_init_ah_attr_from_path); EXPORT_SYMBOL(ib_init_ah_attr_from_path);
......
...@@ -590,6 +590,11 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec) ...@@ -590,6 +590,11 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
} }
static inline bool sa_path_is_opa(struct sa_path_rec *rec)
{
return (rec->rec_type == SA_PATH_REC_TYPE_OPA);
}
static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid) static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid)
{ {
if (rec->rec_type == SA_PATH_REC_TYPE_IB) if (rec->rec_type == SA_PATH_REC_TYPE_IB)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment