Commit fab96ec8 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-next'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 SRIOV switchdev update

This series from Roi and Or further enhances the new SRIOV switchdev mode.

Roi's patches deal with allowing users to configure though devlink
the level of inline headers that the VF should be setting in order for
the eswitch HW to do proper matching. We also enforce that the matching
required for offloaded TC rules is aligned with that level on the PF driver.

Or's patches deals with allowing the user to control on the VF operational
link state through admin directives on the mlx5 VF rep link. Also in this series
is implementation of HW and SW counters for the mlx5 VF rep which is aligned
with the design set by commit a5ea31f5 'Merge branch net-offloaded-stats'.

v1 --> v2:
* constified the net-device param of get offloaded stats ndo in mlxsw
  (pointed by 0-day screaming on us...)
* added Or's Review-by tags for Roi's patches

This series was generated against commit
e796f49d ("net: ieee802154: constify ieee802154_ops structures")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ac32378f de0af0bf
...@@ -150,12 +150,6 @@ static inline int mlx5_max_log_rq_size(int wq_type) ...@@ -150,12 +150,6 @@ static inline int mlx5_max_log_rq_size(int wq_type)
} }
} }
enum {
MLX5E_INLINE_MODE_L2,
MLX5E_INLINE_MODE_VPORT_CONTEXT,
MLX5_INLINE_MODE_NOT_REQUIRED,
};
struct mlx5e_tx_wqe { struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth; struct mlx5_wqe_eth_seg eth;
...@@ -874,6 +868,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); ...@@ -874,6 +868,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
...@@ -890,12 +885,16 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, ...@@ -890,12 +885,16 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
void mlx5e_add_vxlan_port(struct net_device *netdev, void mlx5e_add_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti); struct udp_tunnel_info *ti);
void mlx5e_del_vxlan_port(struct net_device *netdev, void mlx5e_del_vxlan_port(struct net_device *netdev,
struct udp_tunnel_info *ti); struct udp_tunnel_info *ti);
int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp);
bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_H__ */ #endif /* __MLX5_EN_H__ */
...@@ -470,16 +470,6 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) ...@@ -470,16 +470,6 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
kfree(rq->mpwqe.info); kfree(rq->mpwqe.info);
} }
static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
return false;
}
static int mlx5e_create_rq(struct mlx5e_channel *c, static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param, struct mlx5e_rq_param *param,
struct mlx5e_rq *rq) struct mlx5e_rq *rq)
...@@ -967,7 +957,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -967,7 +957,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline; sq->max_inline = param->max_inline;
sq->min_inline_mode = sq->min_inline_mode =
MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ? MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
param->min_inline_mode : 0; param->min_inline_mode : 0;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
...@@ -2664,7 +2654,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, ...@@ -2664,7 +2654,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
return mlx5e_setup_tc(dev, tc->tc); return mlx5e_setup_tc(dev, tc->tc);
} }
struct rtnl_link_stats64 * static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
...@@ -2672,13 +2662,20 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -2672,13 +2662,20 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5e_pport_stats *pstats = &priv->stats.pport;
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
} else {
stats->rx_packets = sstats->rx_packets; stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes; stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets; stats->tx_packets = sstats->tx_packets;
stats->tx_bytes = sstats->tx_bytes; stats->tx_bytes = sstats->tx_bytes;
stats->tx_dropped = sstats->tx_queue_dropped;
}
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
stats->tx_dropped = sstats->tx_queue_dropped;
stats->rx_length_errors = stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) + PPORT_802_3_GET(pstats, a_in_range_length_errors) +
...@@ -3290,6 +3287,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { ...@@ -3290,6 +3287,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll, .ndo_poll_controller = mlx5e_netpoll,
#endif #endif
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
}; };
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
...@@ -3418,14 +3417,13 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, ...@@ -3418,14 +3417,13 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode) u8 *min_inline_mode)
{ {
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
case MLX5E_INLINE_MODE_L2: case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2; *min_inline_mode = MLX5_INLINE_MODE_L2;
break; break;
case MLX5E_INLINE_MODE_VPORT_CONTEXT: case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
mlx5_query_nic_vport_min_inline(mdev, mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
min_inline_mode);
break; break;
case MLX5_INLINE_MODE_NOT_REQUIRED: case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE; *min_inline_mode = MLX5_INLINE_MODE_NONE;
break; break;
} }
......
...@@ -72,7 +72,29 @@ static void mlx5e_rep_get_strings(struct net_device *dev, ...@@ -72,7 +72,29 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
} }
} }
static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct rtnl_link_stats64 *vport_stats;
struct ifla_vf_stats vf_stats;
int err;
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
if (err) {
pr_warn("vport %d error %d reading stats\n", rep->vport, err);
return;
}
vport_stats = &priv->stats.vf_vport;
/* flip tx/rx as we are reporting the counters for the switch vport */
vport_stats->rx_packets = vf_stats.tx_packets;
vport_stats->rx_bytes = vf_stats.tx_bytes;
vport_stats->tx_packets = vf_stats.rx_packets;
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_rq_stats *rq_stats; struct mlx5e_rq_stats *rq_stats;
...@@ -95,6 +117,12 @@ static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) ...@@ -95,6 +117,12 @@ static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
} }
} }
static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
{
mlx5e_rep_update_sw_counters(priv);
mlx5e_rep_update_hw_counters(priv);
}
static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
...@@ -106,7 +134,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, ...@@ -106,7 +134,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_sw_rep_counters(priv); mlx5e_rep_update_sw_counters(priv);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
...@@ -208,6 +236,35 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, ...@@ -208,6 +236,35 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
mlx5e_tc_init(priv); mlx5e_tc_init(priv);
} }
static int mlx5e_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
err = mlx5e_open(dev);
if (err)
return err;
err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP);
if (!err)
netif_carrier_on(dev);
return 0;
}
static int mlx5e_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
return mlx5e_close(dev);
}
static int mlx5e_rep_get_phys_port_name(struct net_device *dev, static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
char *buf, size_t len) char *buf, size_t len)
{ {
...@@ -245,19 +302,90 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, ...@@ -245,19 +302,90 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
} }
} }
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS)
return true;
return false;
}
bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
return false;
}
bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
return true;
}
return false;
}
static int
mlx5e_get_sw_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_sw_stats *sstats = &priv->stats.sw;
stats->rx_packets = sstats->rx_packets;
stats->rx_bytes = sstats->rx_bytes;
stats->tx_packets = sstats->tx_packets;
stats->tx_bytes = sstats->tx_bytes;
stats->tx_dropped = sstats->tx_queue_dropped;
return 0;
}
int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
return mlx5e_get_sw_stats64(dev, sp);
}
return -EINVAL;
}
static struct rtnl_link_stats64 *
mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
return stats;
}
static const struct switchdev_ops mlx5e_rep_switchdev_ops = { static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get, .switchdev_port_attr_get = mlx5e_attr_get,
}; };
static const struct net_device_ops mlx5e_netdev_ops_rep = { static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_open = mlx5e_open, .ndo_open = mlx5e_rep_open,
.ndo_stop = mlx5e_close, .ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit, .ndo_start_xmit = mlx5e_xmit,
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_ndo_setup_tc, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
.ndo_get_stats64 = mlx5e_get_stats, .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port, .ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port, .ndo_get_offload_stats = mlx5e_get_offload_stats,
}; };
static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev, static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
...@@ -407,7 +535,7 @@ static struct mlx5e_profile mlx5e_rep_profile = { ...@@ -407,7 +535,7 @@ static struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_rx = mlx5e_cleanup_rep_rx, .cleanup_rx = mlx5e_cleanup_rep_rx,
.init_tx = mlx5e_init_rep_tx, .init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx, .cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_update_sw_rep_counters, .update_stats = mlx5e_rep_update_stats,
.max_nch = mlx5e_get_rep_max_num_channels, .max_nch = mlx5e_get_rep_max_num_channels,
.max_tc = 1, .max_tc = 1,
}; };
......
...@@ -407,6 +407,7 @@ struct mlx5e_stats { ...@@ -407,6 +407,7 @@ struct mlx5e_stats {
struct mlx5e_vport_stats vport; struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport; struct mlx5e_pport_stats pport;
struct mlx5e_pcie_stats pcie; struct mlx5e_pcie_stats pcie;
struct rtnl_link_stats64 vf_vport;
}; };
static const struct counter_desc mlx5e_pme_status_desc[] = { static const struct counter_desc mlx5e_pme_status_desc[] = {
......
...@@ -279,8 +279,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -279,8 +279,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return 0; return 0;
} }
static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, static int __parse_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f) struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
u8 *min_inline)
{ {
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers); outer_headers);
...@@ -289,6 +291,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -289,6 +291,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
u16 addr_type = 0; u16 addr_type = 0;
u8 ip_proto = 0; u8 ip_proto = 0;
*min_inline = MLX5_INLINE_MODE_L2;
if (f->dissector->used_keys & if (f->dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_BASIC) |
...@@ -362,6 +366,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -362,6 +366,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
mask->ip_proto); mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
key->ip_proto); key->ip_proto);
if (mask->ip_proto)
*min_inline = MLX5_INLINE_MODE_IP;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
...@@ -432,6 +439,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -432,6 +439,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&key->dst, sizeof(key->dst)); &key->dst, sizeof(key->dst));
if (mask->src || mask->dst)
*min_inline = MLX5_INLINE_MODE_IP;
} }
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
...@@ -457,6 +467,10 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -457,6 +467,10 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6), dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&key->dst, sizeof(key->dst)); &key->dst, sizeof(key->dst));
if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
*min_inline = MLX5_INLINE_MODE_IP;
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
...@@ -497,11 +511,39 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec ...@@ -497,11 +511,39 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
"Only UDP and TCP transport are supported\n"); "Only UDP and TCP transport are supported\n");
return -EINVAL; return -EINVAL;
} }
if (mask->src || mask->dst)
*min_inline = MLX5_INLINE_MODE_TCP_UDP;
} }
return 0; return 0;
} }
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv;
u8 min_inline;
int err;
err = __parse_cls_flower(priv, spec, f, &min_inline);
if (!err && esw->mode == SRIOV_OFFLOADS &&
rep->vport != FDB_UPLINK_VPORT) {
if (min_inline > esw->offloads.inline_mode) {
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
min_inline, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
return err;
}
static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *flow_tag) u32 *action, u32 *flow_tag)
{ {
......
...@@ -1798,6 +1798,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1798,6 +1798,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->total_vports = total_vports; esw->total_vports = total_vports;
esw->enabled_vports = 0; esw->enabled_vports = 0;
esw->mode = SRIOV_NONE; esw->mode = SRIOV_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw; dev->priv.eswitch = esw;
return 0; return 0;
......
...@@ -200,6 +200,7 @@ struct mlx5_esw_offload { ...@@ -200,6 +200,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_group; struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps; struct mlx5_eswitch_rep *vport_reps;
DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(encap_tbl, 8);
u8 inline_mode;
}; };
struct mlx5_eswitch { struct mlx5_eswitch {
...@@ -309,6 +310,9 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -309,6 +310,9 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index, int vport_index,
struct mlx5_eswitch_rep *rep); struct mlx5_eswitch_rep *rep);
......
...@@ -657,6 +657,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) ...@@ -657,6 +657,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
if (err1) if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
} }
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
num_vfs,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
esw_warn(esw->dev, "Inline mode is different between vports\n");
}
}
return err; return err;
} }
...@@ -771,6 +779,50 @@ static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) ...@@ -771,6 +779,50 @@ static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
return 0; return 0;
} }
static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_INLINE_MODE_NONE:
*mlx5_mode = MLX5_INLINE_MODE_NONE;
break;
case DEVLINK_ESWITCH_INLINE_MODE_LINK:
*mlx5_mode = MLX5_INLINE_MODE_L2;
break;
case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
*mlx5_mode = MLX5_INLINE_MODE_IP;
break;
case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
*mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
break;
default:
return -EINVAL;
}
return 0;
}
static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
{
switch (mlx5_mode) {
case MLX5_INLINE_MODE_NONE:
*mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
break;
case MLX5_INLINE_MODE_L2:
*mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
break;
case MLX5_INLINE_MODE_IP:
*mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
break;
case MLX5_INLINE_MODE_TCP_UDP:
*mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
break;
default:
return -EINVAL;
}
return 0;
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{ {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
...@@ -815,6 +867,95 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -815,6 +867,95 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
} }
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
int num_vports = esw->enabled_vports;
int err;
int vport;
u8 mlx5_mode;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
if (err)
goto out;
for (vport = 1; vport < num_vports; vport++) {
err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
if (err) {
esw_warn(dev, "Failed to set min inline on vport %d\n",
vport);
goto revert_inline_mode;
}
}
esw->offloads.inline_mode = mlx5_mode;
return 0;
revert_inline_mode:
while (--vport > 0)
mlx5_modify_nic_vport_min_inline(dev,
vport,
esw->offloads.inline_mode);
out:
return err;
}
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
}
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
{
struct mlx5_core_dev *dev = esw->dev;
int vport;
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return -EOPNOTSUPP;
for (vport = 1; vport <= nvfs; vport++) {
mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
if (vport > 1 && prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
}
*mode = mlx5_mode;
return 0;
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
int vport_index, int vport_index,
struct mlx5_eswitch_rep *__rep) struct mlx5_eswitch_rep *__rep)
......
...@@ -1239,6 +1239,8 @@ static const struct devlink_ops mlx5_devlink_ops = { ...@@ -1239,6 +1239,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_CORE_EN #ifdef CONFIG_MLX5_CORE_EN
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
.eswitch_mode_get = mlx5_devlink_eswitch_mode_get, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
.eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
#endif #endif
}; };
......
...@@ -113,15 +113,17 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, ...@@ -113,15 +113,17 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
} }
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode) u16 vport, u8 *min_inline)
{ {
u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
int err;
mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out)); err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
if (!err)
*min_inline_mode = MLX5_GET(query_nic_vport_context_out, out, *min_inline = MLX5_GET(query_nic_vport_context_out, out,
nic_vport_context.min_wqe_inline_mode); nic_vport_context.min_wqe_inline_mode);
return err;
} }
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
......
...@@ -857,7 +857,7 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, ...@@ -857,7 +857,7 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
return 0; return 0;
} }
static bool mlxsw_sp_port_has_offload_stats(int attr_id) static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
{ {
switch (attr_id) { switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT: case IFLA_OFFLOAD_XSTATS_CPU_HIT:
......
...@@ -36,6 +36,12 @@ ...@@ -36,6 +36,12 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
enum {
MLX5_CAP_INLINE_MODE_L2,
MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
};
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport); u16 vport);
...@@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, ...@@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state); u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr); u16 vport, u8 *addr);
void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline); u16 vport, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline); u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
......
...@@ -925,7 +925,7 @@ struct netdev_xdp { ...@@ -925,7 +925,7 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define * 3. Update dev->stats asynchronously and atomically, and define
* neither operation. * neither operation.
* *
* bool (*ndo_has_offload_stats)(int attr_id) * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
* Return true if this device supports offload stats of this attr_id. * Return true if this device supports offload stats of this attr_id.
* *
* int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
...@@ -1165,7 +1165,7 @@ struct net_device_ops { ...@@ -1165,7 +1165,7 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage); struct rtnl_link_stats64 *storage);
bool (*ndo_has_offload_stats)(int attr_id); bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
int (*ndo_get_offload_stats)(int attr_id, int (*ndo_get_offload_stats)(int attr_id,
const struct net_device *dev, const struct net_device *dev,
void *attr_data); void *attr_data);
......
...@@ -92,6 +92,8 @@ struct devlink_ops { ...@@ -92,6 +92,8 @@ struct devlink_ops {
int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode); int (*eswitch_mode_get)(struct devlink *devlink, u16 *p_mode);
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode); int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
}; };
static inline void *devlink_priv(struct devlink *devlink) static inline void *devlink_priv(struct devlink *devlink)
......
...@@ -102,6 +102,13 @@ enum devlink_eswitch_mode { ...@@ -102,6 +102,13 @@ enum devlink_eswitch_mode {
DEVLINK_ESWITCH_MODE_SWITCHDEV, DEVLINK_ESWITCH_MODE_SWITCHDEV,
}; };
enum devlink_eswitch_inline_mode {
DEVLINK_ESWITCH_INLINE_MODE_NONE,
DEVLINK_ESWITCH_INLINE_MODE_LINK,
DEVLINK_ESWITCH_INLINE_MODE_NETWORK,
DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT,
};
enum devlink_attr { enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */ /* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC, DEVLINK_ATTR_UNSPEC,
...@@ -133,6 +140,7 @@ enum devlink_attr { ...@@ -133,6 +140,7 @@ enum devlink_attr {
DEVLINK_ATTR_SB_OCC_CUR, /* u32 */ DEVLINK_ATTR_SB_OCC_CUR, /* u32 */
DEVLINK_ATTR_SB_OCC_MAX, /* u32 */ DEVLINK_ATTR_SB_OCC_MAX, /* u32 */
DEVLINK_ATTR_ESWITCH_MODE, /* u16 */ DEVLINK_ATTR_ESWITCH_MODE, /* u16 */
DEVLINK_ATTR_ESWITCH_INLINE_MODE, /* u8 */
/* add new attributes above here, update the policy in devlink.c */ /* add new attributes above here, update the policy in devlink.c */
......
...@@ -1394,26 +1394,45 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb, ...@@ -1394,26 +1394,45 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
static int devlink_eswitch_fill(struct sk_buff *msg, struct devlink *devlink, static int devlink_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
enum devlink_command cmd, u32 portid, enum devlink_command cmd, u32 portid,
u32 seq, int flags, u16 mode) u32 seq, int flags)
{ {
const struct devlink_ops *ops = devlink->ops;
void *hdr; void *hdr;
int err = 0;
u16 mode;
u8 inline_mode;
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
if (!hdr) if (!hdr)
return -EMSGSIZE; return -EMSGSIZE;
if (devlink_nl_put_handle(msg, devlink)) err = devlink_nl_put_handle(msg, devlink);
goto nla_put_failure; if (err)
goto out;
if (nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode)) err = ops->eswitch_mode_get(devlink, &mode);
goto nla_put_failure; if (err)
goto out;
err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
if (err)
goto out;
if (ops->eswitch_inline_mode_get) {
err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
if (err)
goto out;
err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
inline_mode);
if (err)
goto out;
}
genlmsg_end(msg, hdr); genlmsg_end(msg, hdr);
return 0; return 0;
nla_put_failure: out:
genlmsg_cancel(msg, hdr); genlmsg_cancel(msg, hdr);
return -EMSGSIZE; return err;
} }
static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb, static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb,
...@@ -1422,22 +1441,17 @@ static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb, ...@@ -1422,22 +1441,17 @@ static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb,
struct devlink *devlink = info->user_ptr[0]; struct devlink *devlink = info->user_ptr[0];
const struct devlink_ops *ops = devlink->ops; const struct devlink_ops *ops = devlink->ops;
struct sk_buff *msg; struct sk_buff *msg;
u16 mode;
int err; int err;
if (!ops || !ops->eswitch_mode_get) if (!ops || !ops->eswitch_mode_get)
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = ops->eswitch_mode_get(devlink, &mode);
if (err)
return err;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) if (!msg)
return -ENOMEM; return -ENOMEM;
err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_MODE_GET, err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_MODE_GET,
info->snd_portid, info->snd_seq, 0, mode); info->snd_portid, info->snd_seq, 0);
if (err) { if (err) {
nlmsg_free(msg); nlmsg_free(msg);
...@@ -1453,15 +1467,32 @@ static int devlink_nl_cmd_eswitch_mode_set_doit(struct sk_buff *skb, ...@@ -1453,15 +1467,32 @@ static int devlink_nl_cmd_eswitch_mode_set_doit(struct sk_buff *skb,
struct devlink *devlink = info->user_ptr[0]; struct devlink *devlink = info->user_ptr[0];
const struct devlink_ops *ops = devlink->ops; const struct devlink_ops *ops = devlink->ops;
u16 mode; u16 mode;
u8 inline_mode;
int err = 0;
if (!info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) if (!ops)
return -EINVAL; return -EOPNOTSUPP;
if (info->attrs[DEVLINK_ATTR_ESWITCH_MODE]) {
if (!ops->eswitch_mode_set)
return -EOPNOTSUPP;
mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]); mode = nla_get_u16(info->attrs[DEVLINK_ATTR_ESWITCH_MODE]);
err = ops->eswitch_mode_set(devlink, mode);
if (err)
return err;
}
if (ops && ops->eswitch_mode_set) if (info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]) {
return ops->eswitch_mode_set(devlink, mode); if (!ops->eswitch_inline_mode_set)
return -EOPNOTSUPP; return -EOPNOTSUPP;
inline_mode = nla_get_u8(
info->attrs[DEVLINK_ATTR_ESWITCH_INLINE_MODE]);
err = ops->eswitch_inline_mode_set(devlink, inline_mode);
if (err)
return err;
}
return 0;
} }
static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
...@@ -1478,6 +1509,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { ...@@ -1478,6 +1509,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 }, [DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 }, [DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
[DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 }, [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
}; };
static const struct genl_ops devlink_nl_ops[] = { static const struct genl_ops devlink_nl_ops[] = {
......
...@@ -3671,7 +3671,7 @@ static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev, ...@@ -3671,7 +3671,7 @@ static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
if (!size) if (!size)
continue; continue;
if (!dev->netdev_ops->ndo_has_offload_stats(attr_id)) if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
continue; continue;
attr = nla_reserve_64bit(skb, attr_id, size, attr = nla_reserve_64bit(skb, attr_id, size,
...@@ -3712,7 +3712,7 @@ static int rtnl_get_offload_stats_size(const struct net_device *dev) ...@@ -3712,7 +3712,7 @@ static int rtnl_get_offload_stats_size(const struct net_device *dev)
for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST; for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) { attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
if (!dev->netdev_ops->ndo_has_offload_stats(attr_id)) if (!dev->netdev_ops->ndo_has_offload_stats(dev, attr_id))
continue; continue;
size = rtnl_get_offload_stats_attr_size(attr_id); size = rtnl_get_offload_stats_attr_size(attr_id);
nla_size += nla_total_size_64bit(size); nla_size += nla_total_size_64bit(size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment