Commit 90ca127c authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5: Devcom, introduce devcom_for_each_peer_entry

Introduce generic APIs which will retrieve all peers.
This API replace mlx5_devcom_get/release_peer_data which retrieve
only a single peer.
Signed-off-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarVlad Buslov <vladbu@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 8611df72
...@@ -398,25 +398,64 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -398,25 +398,64 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
} }
} }
static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
struct mlx5_devcom *devcom,
struct mlx5e_rep_sq *rep_sq, int i)
{
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule;
int tmp;
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, tmp) {
int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev);
struct mlx5e_rep_sq_peer *sq_peer;
int err;
sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
if (!sq_peer)
return -ENOMEM;
flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
rep, rep_sq->sqn);
if (IS_ERR(flow_rule)) {
kfree(sq_peer);
return PTR_ERR(flow_rule);
}
sq_peer->rule = flow_rule;
sq_peer->peer = peer_esw;
err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
if (err) {
kfree(sq_peer);
mlx5_eswitch_del_send_to_vport_rule(flow_rule);
return err;
}
}
return 0;
}
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
u32 *sqns_array, int sqns_num) u32 *sqns_array, int sqns_num)
{ {
struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq; struct mlx5e_rep_sq *rep_sq;
struct mlx5_devcom *devcom;
bool devcom_locked = false;
int err; int err;
int i; int i;
if (esw->mode != MLX5_ESWITCH_OFFLOADS) if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0; return 0;
devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep); rpriv = mlx5e_rep_to_rep_priv(rep);
if (mlx5_devcom_comp_is_ready(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS)) if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom, mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
MLX5_DEVCOM_ESW_OFFLOADS); devcom_locked = true;
for (i = 0; i < sqns_num; i++) { for (i = 0; i < sqns_num; i++) {
rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
...@@ -424,7 +463,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -424,7 +463,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
err = -ENOMEM; err = -ENOMEM;
goto out_err; goto out_err;
} }
xa_init(&rep_sq->sq_peer);
/* Add re-inject rule to the PF/representor sqs */ /* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
...@@ -437,50 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -437,50 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
rep_sq->send_to_vport_rule = flow_rule; rep_sq->send_to_vport_rule = flow_rule;
rep_sq->sqn = sqns_array[i]; rep_sq->sqn = sqns_array[i];
if (peer_esw) { xa_init(&rep_sq->sq_peer);
int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev); if (devcom_locked) {
err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL); if (err) {
if (!sq_peer) { mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
err = -ENOMEM; xa_destroy(&rep_sq->sq_peer);
goto out_sq_peer_err; kfree(rep_sq);
} goto out_err;
flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
rep, sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out_flow_rule_err;
} }
sq_peer->rule = flow_rule;
sq_peer->peer = peer_esw;
err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
if (err)
goto out_xa_err;
} }
list_add(&rep_sq->list, &rpriv->vport_sqs_list); list_add(&rep_sq->list, &rpriv->vport_sqs_list);
} }
if (peer_esw) if (devcom_locked)
mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return 0; return 0;
out_xa_err:
mlx5_eswitch_del_send_to_vport_rule(flow_rule);
out_flow_rule_err:
kfree(sq_peer);
out_sq_peer_err:
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
xa_destroy(&rep_sq->sq_peer);
kfree(rep_sq);
out_err: out_err:
mlx5e_sqs2vport_stop(esw, rep); mlx5e_sqs2vport_stop(esw, rep);
if (peer_esw) if (devcom_locked)
mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err; return err;
} }
......
...@@ -1670,6 +1670,7 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1670,6 +1670,7 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
struct mlx5_eswitch *esw; struct mlx5_eswitch *esw;
u16 vhca_id; u16 vhca_id;
int err; int err;
int i;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch; esw = out_priv->mdev->priv.eswitch;
...@@ -1686,8 +1687,13 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1686,8 +1687,13 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
rcu_read_lock(); rcu_read_lock();
devcom = out_priv->mdev->priv.devcom; devcom = out_priv->mdev->priv.devcom;
esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS); err = -ENODEV;
err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV; mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
esw, i) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err)
break;
}
rcu_read_unlock(); rcu_read_unlock();
return err; return err;
...@@ -2025,15 +2031,14 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -2025,15 +2031,14 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
{ {
if (mlx5e_is_eswitch_flow(flow)) { if (mlx5e_is_eswitch_flow(flow)) {
struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom; struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
struct mlx5_eswitch *peer_esw;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
if (!peer_esw) {
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return; return;
} }
mlx5e_tc_del_fdb_peers_flow(flow); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
} else { } else {
mlx5e_tc_del_nic_flow(priv, flow); mlx5e_tc_del_nic_flow(priv, flow);
...@@ -4472,6 +4477,7 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4472,6 +4477,7 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *peer_esw; struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev); in_mdev);
...@@ -4483,23 +4489,27 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4483,23 +4489,27 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return 0; return 0;
} }
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
if (!peer_esw) {
err = -ENODEV; err = -ENODEV;
goto clean_flow; goto clean_flow;
} }
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw); mlx5_devcom_for_each_peer_entry(devcom,
if (err) MLX5_DEVCOM_ESW_OFFLOADS,
goto peer_clean; peer_esw, i) {
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err)
goto peer_clean;
}
*__flow = flow; mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
*__flow = flow;
return 0; return 0;
peer_clean: peer_clean:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
clean_flow: clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow); mlx5e_tc_del_fdb_flow(priv, flow);
return err; return err;
...@@ -4719,7 +4729,6 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4719,7 +4729,6 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
{ {
struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter; struct mlx5_fc *counter;
u64 lastuse = 0; u64 lastuse = 0;
...@@ -4754,8 +4763,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4754,8 +4763,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently /* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded. * un-offloaded while the other rule is offloaded.
*/ */
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!peer_esw)
goto out; goto out;
if (flow_flag_test(flow, DUP)) { if (flow_flag_test(flow, DUP)) {
...@@ -4786,7 +4794,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, ...@@ -4786,7 +4794,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
} }
no_peer_counter: no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out: out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse, flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED); FLOW_ACTION_HW_STATS_DELAYED);
......
...@@ -647,22 +647,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, ...@@ -647,22 +647,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
} }
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr, mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge) struct mlx5_esw_bridge *bridge)
{ {
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
struct mlx5_eswitch *peer_esw; int i;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!peer_esw)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return ERR_PTR(-ENODEV);
}
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw); bridge, peer_esw);
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return handle; return handle;
} }
...@@ -1369,8 +1382,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow ...@@ -1369,8 +1382,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
entry->ingress_counter = counter; entry->ingress_counter = counter;
handle = peer ? handle = peer ?
mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan, mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
mlx5_fc_id(counter), bridge) : addr, vlan, mlx5_fc_id(counter),
bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
mlx5_fc_id(counter), bridge); mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
......
...@@ -540,16 +540,29 @@ static struct mlx5_flow_handle * ...@@ -540,16 +540,29 @@ static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port) mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{ {
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom; struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle; static struct mlx5_flow_handle *handle;
struct mlx5_eswitch *peer_esw; int i;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
if (!peer_esw)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
mlx5_devcom_for_each_peer_entry(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
tmp, i) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
if (!peer_esw) {
mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return ERR_PTR(-ENODEV);
}
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw); handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return handle; return handle;
} }
......
...@@ -585,6 +585,13 @@ mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) ...@@ -585,6 +585,13 @@ mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
return esw->manager_vport == vport_num; return esw->manager_vport == vport_num;
} }
static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
u16 esw_owner_vhca_id)
{
return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
}
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{ {
return mlx5_core_is_ecpf_esw_manager(dev) ? return mlx5_core_is_ecpf_esw_manager(dev) ?
......
...@@ -239,55 +239,92 @@ bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, ...@@ -239,55 +239,92 @@ bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
return READ_ONCE(devcom->priv->components[id].ready); return READ_ONCE(devcom->priv->components[id].ready);
} }
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id) enum mlx5_devcom_components id)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_component *comp;
int i;
if (IS_ERR_OR_NULL(devcom)) if (IS_ERR_OR_NULL(devcom))
return NULL; return false;
comp = &devcom->priv->components[id]; comp = &devcom->priv->components[id];
down_read(&comp->sem); down_read(&comp->sem);
if (!READ_ONCE(comp->ready)) { if (!READ_ONCE(comp->ready)) {
up_read(&comp->sem); up_read(&comp->sem);
return NULL; return false;
} }
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) return true;
if (i != devcom->idx) }
break;
void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp = &devcom->priv->components[id];
return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem)); up_read(&comp->sem);
} }
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id) void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
int *i)
{ {
struct mlx5_devcom_component *comp; struct mlx5_devcom_component *comp;
int i; void *ret;
int idx;
if (IS_ERR_OR_NULL(devcom)) comp = &devcom->priv->components[id];
return NULL;
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
if (i != devcom->idx) return NULL;
break; for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
if (idx != devcom->idx) {
ret = rcu_dereference_protected(comp->device[idx].data,
lockdep_is_held(&comp->sem));
if (ret)
break;
}
}
comp = &devcom->priv->components[id]; if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
/* This can change concurrently, however 'data' pointer will remain *i = idx;
* valid for the duration of RCU read section.
*/
if (!READ_ONCE(comp->ready))
return NULL; return NULL;
}
*i = idx + 1;
return rcu_dereference(comp->device[i].data); return ret;
} }
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id) enum mlx5_devcom_components id,
int *i)
{ {
struct mlx5_devcom_component *comp = &devcom->priv->components[id]; struct mlx5_devcom_component *comp;
void *ret;
int idx;
up_read(&comp->sem); comp = &devcom->priv->components[id];
if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
if (idx != devcom->idx) {
/* This can change concurrently, however 'data' pointer will remain
* valid for the duration of RCU read section.
*/
if (!READ_ONCE(comp->ready))
return NULL;
ret = rcu_dereference(comp->device[idx].data);
if (ret)
break;
}
}
if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
*i = idx;
return NULL;
}
*i = idx + 1;
return ret;
} }
...@@ -39,11 +39,24 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom, ...@@ -39,11 +39,24 @@ void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom, bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id); void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id); enum mlx5_devcom_components id);
void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, int *i);
#endif #define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \
for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data(devcom, id, &i))
void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id, int *i);
#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \
for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment