Commit 9b818a34 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-11-06 (ice)

This series contains updates to ice driver only.

Dave removes SR-IOV LAG attribute for only the interface being disabled
to allow for proper unwinding of all interfaces.

Michal Schmidt changes some LAG allocations from GFP_KERNEL to GFP_ATOMIC
due to non-allowed sleeping.

Aniruddha and Marcin fix redirection and drop rules for switchdev by
properly setting and marking egress/ingress type.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: Fix VF-VF direction matching in drop rule in switchdev
  ice: Fix VF-VF filter rules in switchdev mode
  ice: lag: in RCU, use atomic allocation
  ice: Fix SRIOV LAG disable on non-compliant aggregate
====================

Link: https://lore.kernel.org/r/20231107004844.655549-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f1a3b283 68c51db3
...@@ -628,7 +628,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf) ...@@ -628,7 +628,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
INIT_LIST_HEAD(&ndlist.node); INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock(); rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_KERNEL); nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl) if (!nl)
break; break;
...@@ -1555,18 +1555,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr) ...@@ -1555,18 +1555,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/ */
static void ice_lag_disable_sriov_bond(struct ice_lag *lag) static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{ {
struct ice_lag_netdev_list *entry;
struct ice_netdev_priv *np; struct ice_netdev_priv *np;
struct net_device *netdev;
struct ice_pf *pf; struct ice_pf *pf;
list_for_each_entry(entry, lag->netdev_head, node) { np = netdev_priv(lag->netdev);
netdev = entry->netdev; pf = np->vsi->back;
np = netdev_priv(netdev); ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
} }
/** /**
...@@ -1698,7 +1692,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, ...@@ -1698,7 +1692,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
rcu_read_lock(); rcu_read_lock();
for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) { for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL); nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
if (!nd_list) if (!nd_list)
break; break;
...@@ -2075,7 +2069,7 @@ void ice_lag_rebuild(struct ice_pf *pf) ...@@ -2075,7 +2069,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
INIT_LIST_HEAD(&ndlist.node); INIT_LIST_HEAD(&ndlist.node);
rcu_read_lock(); rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) { for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
nl = kzalloc(sizeof(*nl), GFP_KERNEL); nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
if (!nl) if (!nl)
break; break;
......
...@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev) ...@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
return ice_tc_tun_get_type(dev) != TNL_LAST; return ice_tc_tun_get_type(dev) != TNL_LAST;
} }
static int static bool ice_tc_is_dev_uplink(struct net_device *dev)
ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, {
struct flow_action_entry *act) return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
}
static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct net_device *target_dev)
{ {
struct ice_repr *repr; struct ice_repr *repr;
fltr->action.fltr_act = ICE_FWD_TO_VSI;
if (ice_is_port_repr_netdev(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
ice_tc_is_dev_uplink(target_dev)) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unsupported netdevice in switchdev mode");
return -EINVAL;
}
return 0;
}
static int
ice_tc_setup_drop_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr)
{
fltr->action.fltr_act = ICE_DROP_PACKET;
if (ice_is_port_repr_netdev(filter_dev)) {
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev)) {
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unsupported netdevice in switchdev mode");
return -EINVAL;
}
return 0;
}
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
{
int err;
switch (act->id) { switch (act->id) {
case FLOW_ACTION_DROP: case FLOW_ACTION_DROP:
fltr->action.fltr_act = ICE_DROP_PACKET; err = ice_tc_setup_drop_action(filter_dev, fltr);
if (err)
return err;
break; break;
case FLOW_ACTION_REDIRECT: case FLOW_ACTION_REDIRECT:
fltr->action.fltr_act = ICE_FWD_TO_VSI; err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
if (err)
if (ice_is_port_repr_netdev(act->dev)) { return err;
repr = ice_netdev_to_repr(act->dev);
fltr->dest_vsi = repr->src_vsi;
fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else if (netif_is_ice(act->dev) ||
ice_is_tunnel_supported(act->dev)) {
fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
return -EINVAL;
}
break; break;
...@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) ...@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
goto exit; goto exit;
} }
/* egress traffic is always redirect to uplink */
if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
rule_info.sw_act.fltr_act = fltr->action.fltr_act; rule_info.sw_act.fltr_act = fltr->action.fltr_act;
if (fltr->action.fltr_act != ICE_DROP_PACKET) if (fltr->action.fltr_act != ICE_DROP_PACKET)
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
...@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) ...@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.flags_info.act_valid = true; rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
/* Uplink to VF */
rule_info.sw_act.flag |= ICE_FLTR_RX; rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id; rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else { } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx; rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
} else {
/* VF to VF */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} }
/* specify the cookie as filter_rule_id */ /* specify the cookie as filter_rule_id */
...@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, ...@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
/** /**
* ice_parse_tc_flower_actions - Parse the actions for a TC filter * ice_parse_tc_flower_actions - Parse the actions for a TC filter
* @filter_dev: Pointer to device on which filter is being added
* @vsi: Pointer to VSI * @vsi: Pointer to VSI
* @cls_flower: Pointer to TC flower offload structure * @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure * @fltr: Pointer to TC flower filter structure
* *
* Parse the actions for a TC filter * Parse the actions for a TC filter
*/ */
static int static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
ice_parse_tc_flower_actions(struct ice_vsi *vsi, struct ice_vsi *vsi,
struct flow_cls_offload *cls_flower, struct flow_cls_offload *cls_flower,
struct ice_tc_flower_fltr *fltr) struct ice_tc_flower_fltr *fltr)
{ {
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower); struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action; struct flow_action *flow_action = &rule->action;
...@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi, ...@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
flow_action_for_each(i, act, flow_action) { flow_action_for_each(i, act, flow_action) {
if (ice_is_eswitch_mode_switchdev(vsi->back)) if (ice_is_eswitch_mode_switchdev(vsi->back))
err = ice_eswitch_tc_parse_action(fltr, act); err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
else else
err = ice_tc_parse_action(vsi, fltr, act); err = ice_tc_parse_action(vsi, fltr, act);
if (err) if (err)
...@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, ...@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
if (err < 0) if (err < 0)
goto err; goto err;
err = ice_parse_tc_flower_actions(vsi, f, fltr); err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
if (err < 0) if (err < 0)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment