Commit 88c940cc authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'updates-to-enetc-txq-management'

Vladimir Oltean says:

====================
Updates to ENETC TXQ management

The set ensures that the number of TXQs given by enetc to the network
stack (mqprio or TX hashing) + the number of TXQs given to XDP never
exceeds the number of available TXQs.

These are the first 4 patches of series "[v5,net-next,00/17] ENETC
mqprio/taprio cleanup" from here:
https://patchwork.kernel.org/project/netdevbpf/cover/20230202003621.2679603-1-vladimir.oltean@nxp.com/

There is no change in this version compared to there. I split them off
because this contains a fix for net-next and it would be good if it
could go in quickly. I also did it to reduce the patch count of that
other series, if I need to respin it again.
====================

Link: https://lore.kernel.org/r/20230203001116.3814809-1-vladimir.oltean@nxp.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8788260e 800db2d1
...@@ -28,11 +28,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr); ...@@ -28,11 +28,9 @@ EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv) static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{ {
int num_tx_rings = priv->num_tx_rings; int num_tx_rings = priv->num_tx_rings;
int i;
for (i = 0; i < priv->num_rx_rings; i++) if (priv->xdp_prog)
if (priv->rx_ring[i]->xdp.prog) return num_tx_rings - num_possible_cpus();
return num_tx_rings - num_possible_cpus();
return num_tx_rings; return num_tx_rings;
} }
...@@ -2456,7 +2454,6 @@ int enetc_open(struct net_device *ndev) ...@@ -2456,7 +2454,6 @@ int enetc_open(struct net_device *ndev)
{ {
struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr_resource *tx_res, *rx_res; struct enetc_bdr_resource *tx_res, *rx_res;
int num_stack_tx_queues;
bool extended; bool extended;
int err; int err;
...@@ -2482,16 +2479,6 @@ int enetc_open(struct net_device *ndev) ...@@ -2482,16 +2479,6 @@ int enetc_open(struct net_device *ndev)
goto err_alloc_rx; goto err_alloc_rx;
} }
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
if (err)
goto err_set_queues;
err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
if (err)
goto err_set_queues;
enetc_tx_onestep_tstamp_init(priv); enetc_tx_onestep_tstamp_init(priv);
enetc_assign_tx_resources(priv, tx_res); enetc_assign_tx_resources(priv, tx_res);
enetc_assign_rx_resources(priv, rx_res); enetc_assign_rx_resources(priv, rx_res);
...@@ -2500,8 +2487,6 @@ int enetc_open(struct net_device *ndev) ...@@ -2500,8 +2487,6 @@ int enetc_open(struct net_device *ndev)
return 0; return 0;
err_set_queues:
enetc_free_rx_resources(rx_res, priv->num_rx_rings);
err_alloc_rx: err_alloc_rx:
enetc_free_tx_resources(tx_res, priv->num_tx_rings); enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx: err_alloc_tx:
...@@ -2576,8 +2561,11 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, ...@@ -2576,8 +2561,11 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
* without reconfiguration. * without reconfiguration.
*/ */
if (!netif_running(priv->ndev)) { if (!netif_running(priv->ndev)) {
if (cb) if (cb) {
cb(priv, ctx); err = cb(priv, ctx);
if (err)
return err;
}
return 0; return 0;
} }
...@@ -2598,8 +2586,11 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, ...@@ -2598,8 +2586,11 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
enetc_free_rxtx_rings(priv); enetc_free_rxtx_rings(priv);
/* Interface is down, run optional callback now */ /* Interface is down, run optional callback now */
if (cb) if (cb) {
cb(priv, ctx); err = cb(priv, ctx);
if (err)
goto out_restart;
}
enetc_assign_tx_resources(priv, tx_res); enetc_assign_tx_resources(priv, tx_res);
enetc_assign_rx_resources(priv, rx_res); enetc_assign_rx_resources(priv, rx_res);
...@@ -2608,6 +2599,10 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, ...@@ -2608,6 +2599,10 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
return 0; return 0;
out_restart:
enetc_setup_bdrs(priv, extended);
enetc_start(priv->ndev);
enetc_free_rx_resources(rx_res, priv->num_rx_rings);
out_free_tx_res: out_free_tx_res:
enetc_free_tx_resources(tx_res, priv->num_tx_rings); enetc_free_tx_resources(tx_res, priv->num_tx_rings);
out: out:
...@@ -2631,6 +2626,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) ...@@ -2631,6 +2626,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
if (!num_tc) { if (!num_tc) {
netdev_reset_tc(ndev); netdev_reset_tc(ndev);
netif_set_real_num_tx_queues(ndev, num_stack_tx_queues); netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
priv->min_num_stack_tx_queues = num_possible_cpus();
/* Reset all ring priorities to 0 */ /* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) { for (i = 0; i < priv->num_tx_rings; i++) {
...@@ -2661,6 +2657,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) ...@@ -2661,6 +2657,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset the number of netdev queues based on the TC count */ /* Reset the number of netdev queues based on the TC count */
netif_set_real_num_tx_queues(ndev, num_tc); netif_set_real_num_tx_queues(ndev, num_tc);
priv->min_num_stack_tx_queues = num_tc;
netdev_set_num_tc(ndev, num_tc); netdev_set_num_tc(ndev, num_tc);
...@@ -2675,9 +2672,18 @@ EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio); ...@@ -2675,9 +2672,18 @@ EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{ {
struct bpf_prog *old_prog, *prog = ctx; struct bpf_prog *old_prog, *prog = ctx;
int i; int num_stack_tx_queues;
int err, i;
old_prog = xchg(&priv->xdp_prog, prog); old_prog = xchg(&priv->xdp_prog, prog);
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
if (err) {
xchg(&priv->xdp_prog, old_prog);
return err;
}
if (old_prog) if (old_prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
...@@ -2698,9 +2704,20 @@ static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) ...@@ -2698,9 +2704,20 @@ static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_ndev_priv *priv = netdev_priv(ndev);
bool extended; bool extended;
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
"Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
return -EBUSY;
}
extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
/* The buffer layout is changing, so we need to drain the old /* The buffer layout is changing, so we need to drain the old
...@@ -2898,6 +2915,7 @@ EXPORT_SYMBOL_GPL(enetc_ioctl); ...@@ -2898,6 +2915,7 @@ EXPORT_SYMBOL_GPL(enetc_ioctl);
int enetc_alloc_msix(struct enetc_ndev_priv *priv) int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{ {
struct pci_dev *pdev = priv->si->pdev; struct pci_dev *pdev = priv->si->pdev;
int num_stack_tx_queues;
int first_xdp_tx_ring; int first_xdp_tx_ring;
int i, n, err, nvec; int i, n, err, nvec;
int v_tx_rings; int v_tx_rings;
...@@ -2974,6 +2992,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) ...@@ -2974,6 +2992,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
} }
} }
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
if (err)
goto fail;
err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
if (err)
goto fail;
priv->min_num_stack_tx_queues = num_possible_cpus();
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus(); first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring]; priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
......
...@@ -369,6 +369,9 @@ struct enetc_ndev_priv { ...@@ -369,6 +369,9 @@ struct enetc_ndev_priv {
struct psfp_cap psfp_cap; struct psfp_cap psfp_cap;
/* Minimum number of TX queues required by the network stack */
unsigned int min_num_stack_tx_queues;
struct phylink *phylink; struct phylink *phylink;
int ic_mode; int ic_mode;
u32 tx_ictt; u32 tx_ictt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment