Commit ac746c85 authored by Ong Boon Leong's avatar Ong Boon Leong Committed by David S. Miller

net: stmmac: enhance XDP ZC driver level switching performance

The previous stmmac_xdp_set_prog() implementation uses stmmac_release()
and stmmac_open() which tear down the PHY device and causes undesirable
autonegotiation which causes a delay whenever AFXDP ZC is setup.

This patch introduces two new functions that just sufficiently tear
down DMA descriptors, buffer, NAPI process, and IRQs and reestablish
them accordingly in both stmmac_xdp_release() and stammac_xdp_open().

As the results of this enhancement, we get rid of transient state
introduced by the link auto-negotiation:

$ ./xdpsock -i eth0 -t -z

 sock0@eth0:0 txonly xdp-drv
                   pps            pkts           1.00
rx                 0              0
tx                 634444         634560

 sock0@eth0:0 txonly xdp-drv
                   pps            pkts           1.00
rx                 0              0
tx                 632330         1267072

 sock0@eth0:0 txonly xdp-drv
                   pps            pkts           1.00
rx                 0              0
tx                 632438         1899584

 sock0@eth0:0 txonly xdp-drv
                   pps            pkts           1.00
rx                 0              0
tx                 632502         2532160
Reported-by: Kurt Kanzenbach's avatarKurt Kanzenbach <kurt@linutronix.de>
Signed-off-by: default avatarOng Boon Leong <boon.leong.ong@intel.com>
Tested-by: Kurt Kanzenbach's avatarKurt Kanzenbach <kurt@linutronix.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1274a4eb
...@@ -316,8 +316,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev); ...@@ -316,8 +316,8 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
void stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_open(struct net_device *dev); int stmmac_xdp_open(struct net_device *dev);
int stmmac_release(struct net_device *dev); void stmmac_xdp_release(struct net_device *dev);
int stmmac_resume(struct device *dev); int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev); int stmmac_suspend(struct device *dev);
int stmmac_dvr_remove(struct device *dev); int stmmac_dvr_remove(struct device *dev);
......
...@@ -3643,7 +3643,7 @@ static int stmmac_request_irq(struct net_device *dev) ...@@ -3643,7 +3643,7 @@ static int stmmac_request_irq(struct net_device *dev)
* 0 on success and an appropriate (-)ve integer as defined in errno.h * 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure. * file on failure.
*/ */
int stmmac_open(struct net_device *dev) static int stmmac_open(struct net_device *dev)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface; int mode = priv->plat->phy_interface;
...@@ -3767,7 +3767,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) ...@@ -3767,7 +3767,7 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
* Description: * Description:
* This is the stop entry point of the driver. * This is the stop entry point of the driver.
*/ */
int stmmac_release(struct net_device *dev) static int stmmac_release(struct net_device *dev)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
u32 chan; u32 chan;
...@@ -6429,6 +6429,139 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) ...@@ -6429,6 +6429,139 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
} }
void stmmac_xdp_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
/* Disable NAPI process */
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
/* Stop TX/RX DMA channels */
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
/* set trans_start so we don't get spurious
* watchdogs during reset
*/
netif_trans_update(dev);
netif_carrier_off(dev);
}
int stmmac_xdp_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 dma_csr_ch = max(rx_cnt, tx_cnt);
struct stmmac_rx_queue *rx_q;
struct stmmac_tx_queue *tx_q;
u32 buf_size;
bool sph_en;
u32 chan;
int ret;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors allocation failed\n",
__func__);
goto dma_desc_error;
}
ret = init_dma_desc_rings(dev, GFP_KERNEL);
if (ret < 0) {
netdev_err(dev, "%s: DMA descriptors initialization failed\n",
__func__);
goto init_error;
}
/* DMA CSR Channel configuration */
for (chan = 0; chan < dma_csr_ch; chan++)
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_cnt; chan++) {
rx_q = &priv->rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
(rx_q->buf_alloc_num *
sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
stmmac_set_dma_bfsize(priv, priv->ioaddr,
buf_size,
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
priv->dma_buf_sz,
rx_q->queue_index);
}
stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
}
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_cnt; chan++) {
tx_q = &priv->tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
tx_q->tx_tail_addr = tx_q->dma_tx_phy;
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
}
/* Enable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, true);
/* Start Rx & Tx DMA Channels */
stmmac_start_all_dma(priv);
stmmac_init_coalesce(priv);
ret = stmmac_request_irq(dev);
if (ret)
goto irq_error;
/* Enable NAPI process*/
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
return 0;
irq_error:
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
hrtimer_cancel(&priv->tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
return ret;
}
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
{ {
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
......
...@@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, ...@@ -119,7 +119,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
need_update = !!priv->xdp_prog != !!prog; need_update = !!priv->xdp_prog != !!prog;
if (if_running && need_update) if (if_running && need_update)
stmmac_release(dev); stmmac_xdp_release(dev);
old_prog = xchg(&priv->xdp_prog, prog); old_prog = xchg(&priv->xdp_prog, prog);
if (old_prog) if (old_prog)
...@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, ...@@ -129,7 +129,7 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv); priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
if (if_running && need_update) if (if_running && need_update)
stmmac_open(dev); stmmac_xdp_open(dev);
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment