Commit 141a7dbb authored by Thomas Bogendoerfer's avatar Thomas Bogendoerfer Committed by David S. Miller

net: sgi: ioc3-eth: use defines for constants dealing with desc rings

Descriptor ring sizes of the IOC3 are more or less fixed size. To
make clearer where there is a relation to ring sizes use defines.
Reviewed-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarThomas Bogendoerfer <tbogendoerfer@suse.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c1b6a3d8
...@@ -61,10 +61,16 @@ ...@@ -61,10 +61,16 @@
#include <asm/sn/ioc3.h> #include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h> #include <asm/pci/bridge.h>
/* 64 RX buffers. This is tunable in the range of 16 <= x < 512. The /* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
* value must be a power of two. * The value must be a power of two.
*/ */
#define RX_BUFFS 64 #define RX_BUFFS 64
#define RX_RING_ENTRIES 512 /* fixed in hardware */
#define RX_RING_MASK (RX_RING_ENTRIES - 1)
/* 128 TX buffers (not tunable) */
#define TX_RING_ENTRIES 128
#define TX_RING_MASK (TX_RING_ENTRIES - 1)
#define ETCSR_FD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21) #define ETCSR_FD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
#define ETCSR_HD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21) #define ETCSR_HD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
...@@ -76,8 +82,8 @@ struct ioc3_private { ...@@ -76,8 +82,8 @@ struct ioc3_private {
u32 *ssram; u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */ unsigned long *rxr; /* pointer to receiver ring */
struct ioc3_etxd *txr; struct ioc3_etxd *txr;
struct sk_buff *rx_skbs[512]; struct sk_buff *rx_skbs[RX_RING_ENTRIES];
struct sk_buff *tx_skbs[128]; struct sk_buff *tx_skbs[TX_RING_ENTRIES];
int rx_ci; /* RX consumer index */ int rx_ci; /* RX consumer index */
int rx_pi; /* RX producer index */ int rx_pi; /* RX producer index */
int tx_ci; /* TX consumer index */ int tx_ci; /* TX consumer index */
...@@ -573,10 +579,10 @@ static inline void ioc3_rx(struct net_device *dev) ...@@ -573,10 +579,10 @@ static inline void ioc3_rx(struct net_device *dev)
ip->rx_skbs[n_entry] = new_skb; ip->rx_skbs[n_entry] = new_skb;
rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1)); rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
rxb->w0 = 0; /* Clear valid flag */ rxb->w0 = 0; /* Clear valid flag */
n_entry = (n_entry + 1) & 511; /* Update erpir */ n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */
/* Now go on to the next ring entry. */ /* Now go on to the next ring entry. */
rx_entry = (rx_entry + 1) & 511; rx_entry = (rx_entry + 1) & RX_RING_MASK;
skb = ip->rx_skbs[rx_entry]; skb = ip->rx_skbs[rx_entry];
rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0); w0 = be32_to_cpu(rxb->w0);
...@@ -598,7 +604,7 @@ static inline void ioc3_tx(struct net_device *dev) ...@@ -598,7 +604,7 @@ static inline void ioc3_tx(struct net_device *dev)
spin_lock(&ip->ioc3_lock); spin_lock(&ip->ioc3_lock);
etcir = readl(&regs->etcir); etcir = readl(&regs->etcir);
tx_entry = (etcir >> 7) & 127; tx_entry = (etcir >> 7) & TX_RING_MASK;
o_entry = ip->tx_ci; o_entry = ip->tx_ci;
packets = 0; packets = 0;
bytes = 0; bytes = 0;
...@@ -610,17 +616,17 @@ static inline void ioc3_tx(struct net_device *dev) ...@@ -610,17 +616,17 @@ static inline void ioc3_tx(struct net_device *dev)
dev_consume_skb_irq(skb); dev_consume_skb_irq(skb);
ip->tx_skbs[o_entry] = NULL; ip->tx_skbs[o_entry] = NULL;
o_entry = (o_entry + 1) & 127; /* Next */ o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */
etcir = readl(&regs->etcir); /* More pkts sent? */ etcir = readl(&regs->etcir); /* More pkts sent? */
tx_entry = (etcir >> 7) & 127; tx_entry = (etcir >> 7) & TX_RING_MASK;
} }
dev->stats.tx_packets += packets; dev->stats.tx_packets += packets;
dev->stats.tx_bytes += bytes; dev->stats.tx_bytes += bytes;
ip->txqlen -= packets; ip->txqlen -= packets;
if (ip->txqlen < 128) if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
netif_wake_queue(dev); netif_wake_queue(dev);
ip->tx_ci = o_entry; ip->tx_ci = o_entry;
...@@ -765,10 +771,10 @@ static inline void ioc3_clean_rx_ring(struct ioc3_private *ip) ...@@ -765,10 +771,10 @@ static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci]; ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++]; ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
} }
ip->rx_pi &= 511; ip->rx_pi &= RX_RING_MASK;
ip->rx_ci &= 511; ip->rx_ci &= RX_RING_MASK;
for (i = ip->rx_ci; i != ip->rx_pi; i = (i + 1) & 511) { for (i = ip->rx_ci; i != ip->rx_pi; i = (i + 1) & RX_RING_MASK) {
skb = ip->rx_skbs[i]; skb = ip->rx_skbs[i];
rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET); rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
rxb->w0 = 0; rxb->w0 = 0;
...@@ -780,7 +786,7 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip) ...@@ -780,7 +786,7 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
struct sk_buff *skb; struct sk_buff *skb;
int i; int i;
for (i = 0; i < 128; i++) { for (i = 0; i < TX_RING_ENTRIES; i++) {
skb = ip->tx_skbs[i]; skb = ip->tx_skbs[i];
if (skb) { if (skb) {
ip->tx_skbs[i] = NULL; ip->tx_skbs[i] = NULL;
...@@ -812,7 +818,7 @@ static void ioc3_free_rings(struct ioc3_private *ip) ...@@ -812,7 +818,7 @@ static void ioc3_free_rings(struct ioc3_private *ip)
if (skb) if (skb)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
n_entry = (n_entry + 1) & 511; n_entry = (n_entry + 1) & RX_RING_MASK;
} }
free_page((unsigned long)ip->rxr); free_page((unsigned long)ip->rxr);
ip->rxr = NULL; ip->rxr = NULL;
...@@ -1425,13 +1431,13 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1425,13 +1431,13 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
mb(); /* make sure all descriptor changes are visible */ mb(); /* make sure all descriptor changes are visible */
ip->tx_skbs[produce] = skb; /* Remember skb */ ip->tx_skbs[produce] = skb; /* Remember skb */
produce = (produce + 1) & 127; produce = (produce + 1) & TX_RING_MASK;
ip->tx_pi = produce; ip->tx_pi = produce;
writel(produce << 7, &ip->regs->etpir); /* Fire ... */ writel(produce << 7, &ip->regs->etpir); /* Fire ... */
ip->txqlen++; ip->txqlen++;
if (ip->txqlen >= 127) if (ip->txqlen >= (TX_RING_ENTRIES - 1))
netif_stop_queue(dev); netif_stop_queue(dev);
spin_unlock_irq(&ip->ioc3_lock); spin_unlock_irq(&ip->ioc3_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment