Commit c73b0183 authored by Florian Fainelli's avatar Florian Fainelli Committed by David S. Miller

net: systemport: rewrite bcm_sysport_rx_refill

Currently, bcm_sysport_desc_rx() calls bcm_sysport_rx_refill() at the end of Rx
packet processing loop, after the current Rx packet has already been passed to
napi_gro_receive(). However, bcm_sysport_rx_refill() might fail to allocate a new
Rx skb, thus leaving a hole on the Rx queue where no valid Rx buffer exists.

To eliminate this situation:

1. Rewrite bcm_sysport_rx_refill() to retain the current Rx skb on the
Rx queue if a new replacement Rx skb can't be allocated and DMA-mapped.
In this case, the data on the current Rx skb is effectively dropped.

2. Modify bcm_sysport_desc_rx() to call bcm_sysport_rx_refill() at the
top of Rx packet processing loop, so that the new replacement Rx skb is
already in place before the current Rx skb is processed.

This is loosely inspired from d6707bec ("net: bcmgenet: rewrite
bcmgenet_rx_refill()")
Signed-off-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent baf387a8
...@@ -524,62 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) ...@@ -524,62 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0); dma_unmap_addr_set(cb, dma_addr, 0);
} }
static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
struct bcm_sysport_cb *cb) struct bcm_sysport_cb *cb)
{ {
struct device *kdev = &priv->pdev->dev; struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
struct sk_buff *skb, *rx_skb;
dma_addr_t mapping; dma_addr_t mapping;
int ret;
cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); /* Allocate a new SKB for a new packet */
if (!cb->skb) { skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
return -ENOMEM; return NULL;
} }
mapping = dma_map_single(kdev, cb->skb->data, mapping = dma_map_single(kdev, skb->data,
RX_BUF_LENGTH, DMA_FROM_DEVICE); RX_BUF_LENGTH, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping); if (dma_mapping_error(kdev, mapping)) {
if (ret) {
priv->mib.rx_dma_failed++; priv->mib.rx_dma_failed++;
bcm_sysport_free_cb(cb); dev_kfree_skb_any(skb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
return ret; return NULL;
} }
/* Grab the current SKB on the ring */
rx_skb = cb->skb;
if (likely(rx_skb))
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
/* Put the new SKB on the ring */
cb->skb = skb;
dma_unmap_addr_set(cb, dma_addr, mapping); dma_unmap_addr_set(cb, dma_addr, mapping);
dma_desc_set_addr(priv, cb->bd_addr, mapping); dma_desc_set_addr(priv, cb->bd_addr, mapping);
netif_dbg(priv, rx_status, ndev, "RX refill\n"); netif_dbg(priv, rx_status, ndev, "RX refill\n");
return 0; /* Return the current SKB to the caller */
return rx_skb;
} }
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{ {
struct bcm_sysport_cb *cb; struct bcm_sysport_cb *cb;
int ret = 0; struct sk_buff *skb;
unsigned int i; unsigned int i;
for (i = 0; i < priv->num_rx_bds; i++) { for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i]; cb = &priv->rx_cbs[i];
if (cb->skb) skb = bcm_sysport_rx_refill(priv, cb);
continue; if (skb)
dev_kfree_skb(skb);
ret = bcm_sysport_rx_refill(priv, cb); if (!cb->skb)
if (ret) return -ENOMEM;
break;
} }
return ret; return 0;
} }
/* Poll the hardware for up to budget packets to process */ /* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget) unsigned int budget)
{ {
struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process; unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb; struct bcm_sysport_cb *cb;
...@@ -587,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -587,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index; unsigned int p_index;
u16 len, status; u16 len, status;
struct bcm_rsb *rsb; struct bcm_rsb *rsb;
int ret;
/* Determine how much we should process since last call */ /* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX); p_index = rdma_readl(priv, RDMA_PROD_INDEX);
...@@ -605,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -605,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) { while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr]; cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb; skb = bcm_sysport_rx_refill(priv, cb);
processed++;
priv->rx_read_ptr++;
if (priv->rx_read_ptr == priv->num_rx_bds)
priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding /* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since * DMA mapping for this incoming packet since
...@@ -622,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -622,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_err, ndev, "out of memory!\n"); netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
goto refill; goto next;
} }
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
/* Extract the Receive Status Block prepended */ /* Extract the Receive Status Block prepended */
rsb = (struct bcm_rsb *)skb->data; rsb = (struct bcm_rsb *)skb->data;
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
...@@ -643,8 +642,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -643,8 +642,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_status, ndev, "fragmented packet!\n"); netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
bcm_sysport_free_cb(cb); dev_kfree_skb_any(skb);
goto refill; goto next;
} }
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
...@@ -653,8 +652,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -653,8 +652,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_over_errors++; ndev->stats.rx_over_errors++;
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
bcm_sysport_free_cb(cb); dev_kfree_skb_any(skb);
goto refill; goto next;
} }
skb_put(skb, len); skb_put(skb, len);
...@@ -681,10 +680,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -681,10 +680,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_bytes += len; ndev->stats.rx_bytes += len;
napi_gro_receive(&priv->napi, skb); napi_gro_receive(&priv->napi, skb);
refill: next:
ret = bcm_sysport_rx_refill(priv, cb); processed++;
if (ret) priv->rx_read_ptr++;
priv->mib.alloc_rx_buff_failed++;
if (priv->rx_read_ptr == priv->num_rx_bds)
priv->rx_read_ptr = 0;
} }
return processed; return processed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment