Commit 5289e4a0 authored by David S. Miller's avatar David S. Miller

Merge branch 'systemport-next'

Florian Fainelli says:

====================
net: systemport: misc improvements

These patches are highly inspired by changes from Petri on bcmgenet, last patch
is a misc fix that I had pending for a while, but is not a candidate for 'net'
at this point.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3d2f6d41 25977ac7
...@@ -524,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) ...@@ -524,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0); dma_unmap_addr_set(cb, dma_addr, 0);
} }
static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
struct bcm_sysport_cb *cb) struct bcm_sysport_cb *cb)
{ {
struct device *kdev = &priv->pdev->dev; struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
struct sk_buff *skb, *rx_skb;
dma_addr_t mapping; dma_addr_t mapping;
int ret;
cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); /* Allocate a new SKB for a new packet */
if (!cb->skb) { skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
if (!skb) {
priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
return -ENOMEM; return NULL;
} }
mapping = dma_map_single(kdev, cb->skb->data, mapping = dma_map_single(kdev, skb->data,
RX_BUF_LENGTH, DMA_FROM_DEVICE); RX_BUF_LENGTH, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping); if (dma_mapping_error(kdev, mapping)) {
if (ret) {
priv->mib.rx_dma_failed++; priv->mib.rx_dma_failed++;
bcm_sysport_free_cb(cb); dev_kfree_skb_any(skb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
return ret; return NULL;
} }
dma_unmap_addr_set(cb, dma_addr, mapping); /* Grab the current SKB on the ring */
dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); rx_skb = cb->skb;
if (likely(rx_skb))
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
priv->rx_bd_assign_index++; /* Put the new SKB on the ring */
priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); cb->skb = skb;
priv->rx_bd_assign_ptr = priv->rx_bds + dma_unmap_addr_set(cb, dma_addr, mapping);
(priv->rx_bd_assign_index * DESC_SIZE); dma_desc_set_addr(priv, cb->bd_addr, mapping);
netif_dbg(priv, rx_status, ndev, "RX refill\n"); netif_dbg(priv, rx_status, ndev, "RX refill\n");
return 0; /* Return the current SKB to the caller */
return rx_skb;
} }
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{ {
struct bcm_sysport_cb *cb; struct bcm_sysport_cb *cb;
int ret = 0; struct sk_buff *skb;
unsigned int i; unsigned int i;
for (i = 0; i < priv->num_rx_bds; i++) { for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[priv->rx_bd_assign_index]; cb = &priv->rx_cbs[i];
if (cb->skb) skb = bcm_sysport_rx_refill(priv, cb);
continue; if (skb)
dev_kfree_skb(skb);
ret = bcm_sysport_rx_refill(priv, cb); if (!cb->skb)
if (ret) return -ENOMEM;
break;
} }
return ret; return 0;
} }
/* Poll the hardware for up to budget packets to process */ /* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget) unsigned int budget)
{ {
struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev; struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process; unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb; struct bcm_sysport_cb *cb;
...@@ -592,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -592,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index; unsigned int p_index;
u16 len, status; u16 len, status;
struct bcm_rsb *rsb; struct bcm_rsb *rsb;
int ret;
/* Determine how much we should process since last call */ /* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX); p_index = rdma_readl(priv, RDMA_PROD_INDEX);
...@@ -610,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -610,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) { while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr]; cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb; skb = bcm_sysport_rx_refill(priv, cb);
processed++;
priv->rx_read_ptr++;
if (priv->rx_read_ptr == priv->num_rx_bds)
priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding /* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since * DMA mapping for this incoming packet since
...@@ -627,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -627,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_err, ndev, "out of memory!\n"); netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
goto refill; goto next;
} }
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
/* Extract the Receive Status Block prepended */ /* Extract the Receive Status Block prepended */
rsb = (struct bcm_rsb *)skb->data; rsb = (struct bcm_rsb *)skb->data;
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
...@@ -644,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -644,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index, priv->rx_c_index, priv->rx_read_ptr, p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status); len, status);
if (unlikely(len > RX_BUF_LENGTH)) {
netif_err(priv, rx_status, ndev, "oversized packet\n");
ndev->stats.rx_length_errors++;
ndev->stats.rx_errors++;
dev_kfree_skb_any(skb);
goto next;
}
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n"); netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
bcm_sysport_free_cb(cb); dev_kfree_skb_any(skb);
goto refill; goto next;
} }
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
...@@ -658,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -658,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_over_errors++; ndev->stats.rx_over_errors++;
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
ndev->stats.rx_errors++; ndev->stats.rx_errors++;
bcm_sysport_free_cb(cb); dev_kfree_skb_any(skb);
goto refill; goto next;
} }
skb_put(skb, len); skb_put(skb, len);
...@@ -686,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, ...@@ -686,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_bytes += len; ndev->stats.rx_bytes += len;
napi_gro_receive(&priv->napi, skb); napi_gro_receive(&priv->napi, skb);
refill: next:
ret = bcm_sysport_rx_refill(priv, cb); processed++;
if (ret) priv->rx_read_ptr++;
priv->mib.alloc_rx_buff_failed++;
if (priv->rx_read_ptr == priv->num_rx_bds)
priv->rx_read_ptr = 0;
} }
return processed; return processed;
...@@ -1330,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv, ...@@ -1330,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
{ {
struct bcm_sysport_cb *cb;
u32 reg; u32 reg;
int ret; int ret;
int i;
/* Initialize SW view of the RX ring */ /* Initialize SW view of the RX ring */
priv->num_rx_bds = NUM_RX_DESC; priv->num_rx_bds = NUM_RX_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
priv->rx_bd_assign_ptr = priv->rx_bds;
priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0; priv->rx_c_index = 0;
priv->rx_read_ptr = 0; priv->rx_read_ptr = 0;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
...@@ -1347,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) ...@@ -1347,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
return -ENOMEM; return -ENOMEM;
} }
for (i = 0; i < priv->num_rx_bds; i++) {
cb = priv->rx_cbs + i;
cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
}
ret = bcm_sysport_alloc_rx_bufs(priv); ret = bcm_sysport_alloc_rx_bufs(priv);
if (ret) { if (ret) {
netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
......
...@@ -663,8 +663,6 @@ struct bcm_sysport_priv { ...@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
/* Receive queue */ /* Receive queue */
void __iomem *rx_bds; void __iomem *rx_bds;
void __iomem *rx_bd_assign_ptr;
unsigned int rx_bd_assign_index;
struct bcm_sysport_cb *rx_cbs; struct bcm_sysport_cb *rx_cbs;
unsigned int num_rx_bds; unsigned int num_rx_bds;
unsigned int rx_read_ptr; unsigned int rx_read_ptr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment