Commit e2bbb966 authored by David S. Miller's avatar David S. Miller

Merge branch 'net_device_stats'

Tobias Klauser says:

====================
Use net_device_stats from struct net_device

Along the lines of previous patches, switch (almost) all remaining net
drivers to use net_device_stats from net_device instead of including a
copy of it in their netdev_priv struct.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7db4c0dd 82d82938
......@@ -283,7 +283,6 @@ struct typhoon {
spinlock_t command_lock ____cacheline_aligned;
struct basic_ring cmdRing;
struct basic_ring respRing;
struct net_device_stats stats;
struct net_device_stats stats_saved;
struct typhoon_shared * shared;
dma_addr_t shared_dma;
......@@ -898,7 +897,7 @@ typhoon_set_rx_mode(struct net_device *dev)
static int
typhoon_do_get_stats(struct typhoon *tp)
{
struct net_device_stats *stats = &tp->stats;
struct net_device_stats *stats = &tp->dev->stats;
struct net_device_stats *saved = &tp->stats_saved;
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[7];
......@@ -951,7 +950,7 @@ static struct net_device_stats *
typhoon_get_stats(struct net_device *dev)
{
struct typhoon *tp = netdev_priv(dev);
struct net_device_stats *stats = &tp->stats;
struct net_device_stats *stats = &tp->dev->stats;
struct net_device_stats *saved = &tp->stats_saved;
smp_rmb();
......@@ -1991,7 +1990,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
tp->card_state = Sleeping;
smp_wmb();
typhoon_do_get_stats(tp);
memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
......
......@@ -359,7 +359,6 @@ typedef struct _mace_statistics {
typedef struct _mace_private {
struct pcmcia_device *p_dev;
struct net_device_stats linux_stats; /* Linux statistics counters */
mace_statistics mace_stats; /* MACE chip statistics counters */
/* restore_multicast_list() state variables */
......@@ -879,7 +878,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
service a transmit interrupt while we are in here.
*/
lp->linux_stats.tx_bytes += skb->len;
dev->stats.tx_bytes += skb->len;
lp->tx_free_frames--;
/* WARNING: Write the _exact_ number of bytes written in the header! */
......@@ -967,7 +966,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
lp->linux_stats.tx_errors++;
dev->stats.tx_errors++;
outb(0xFF, ioaddr + AM2150_XMT_SKIP);
}
......@@ -1016,7 +1015,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
} /* if (xmtfs & MACE_XMTFS_XMTSV) */
lp->linux_stats.tx_packets++;
dev->stats.tx_packets++;
lp->tx_free_frames++;
netif_wake_queue(dev);
} /* if (status & MACE_IR_XMTINT) */
......@@ -1077,7 +1076,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
" 0x%X.\n", dev->name, rx_framecnt, rx_status);
if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
lp->linux_stats.rx_errors++;
dev->stats.rx_errors++;
if (rx_status & MACE_RCVFS_OFLO) {
lp->mace_stats.oflo++;
}
......@@ -1114,14 +1113,14 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
lp->linux_stats.rx_packets++;
lp->linux_stats.rx_bytes += pkt_len;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
continue;
} else {
pr_debug("%s: couldn't allocate a sk_buff of size"
" %d.\n", dev->name, pkt_len);
lp->linux_stats.rx_dropped++;
dev->stats.rx_dropped++;
}
}
outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
......@@ -1231,13 +1230,13 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
/* At this point, mace_stats is fully updated for this call.
We may now update the linux_stats. */
We may now update the netdev stats. */
/* The MACE has no equivalent for linux_stats field which are commented
/* The MACE has no equivalent for netdev stats field which are commented
out. */
/* lp->linux_stats.multicast; */
lp->linux_stats.collisions =
/* dev->stats.multicast; */
dev->stats.collisions =
lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
/* Collision: The MACE may retry sending a packet 15 times
before giving up. The retry count is in XMTRC.
......@@ -1245,22 +1244,22 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
If so, why doesn't the RCVCC record these collisions? */
/* detailed rx_errors: */
lp->linux_stats.rx_length_errors =
dev->stats.rx_length_errors =
lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
/* lp->linux_stats.rx_over_errors */
lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
lp->linux_stats.rx_missed_errors =
/* dev->stats.rx_over_errors */
dev->stats.rx_crc_errors = lp->mace_stats.fcs;
dev->stats.rx_frame_errors = lp->mace_stats.fram;
dev->stats.rx_fifo_errors = lp->mace_stats.oflo;
dev->stats.rx_missed_errors =
lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
/* detailed tx_errors */
lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
dev->stats.tx_aborted_errors = lp->mace_stats.rtry;
dev->stats.tx_carrier_errors = lp->mace_stats.lcar;
/* LCAR usually results from bad cabling. */
lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
/* lp->linux_stats.tx_window_errors; */
dev->stats.tx_fifo_errors = lp->mace_stats.uflo;
dev->stats.tx_heartbeat_errors = lp->mace_stats.cerr;
/* dev->stats.tx_window_errors; */
} /* update_stats */
/* ----------------------------------------------------------------------------
......@@ -1274,10 +1273,10 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev)
update_stats(dev->base_addr, dev);
pr_debug("%s: updating the statistics.\n", dev->name);
pr_linux_stats(&lp->linux_stats);
pr_linux_stats(&dev->stats);
pr_mace_stats(&lp->mace_stats);
return &lp->linux_stats;
return &dev->stats;
} /* net_device_stats */
/* ----------------------------------------------------------------------------
......
......@@ -684,8 +684,8 @@ static void macb_tx_error_task(struct work_struct *work)
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
bp->dev->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
}
} else {
/* "Buffers exhausted mid-frame" errors may only happen
......@@ -778,8 +778,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len;
bp->dev->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
}
/* Now we can safely release resources */
......@@ -911,14 +911,14 @@ static int gem_rx(struct macb *bp, int budget)
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev,
"not whole frame pointed by descriptor\n");
bp->stats.rx_dropped++;
bp->dev->stats.rx_dropped++;
break;
}
skb = bp->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n");
bp->stats.rx_dropped++;
bp->dev->stats.rx_dropped++;
break;
}
/* now everything is ready for receiving packet */
......@@ -938,8 +938,8 @@ static int gem_rx(struct macb *bp, int budget)
GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
bp->stats.rx_packets++;
bp->stats.rx_bytes += skb->len;
bp->dev->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
......@@ -984,7 +984,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
*/
skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
if (!skb) {
bp->stats.rx_dropped++;
bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) {
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
......@@ -1030,8 +1030,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
__skb_pull(skb, NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, bp->dev);
bp->stats.rx_packets++;
bp->stats.rx_bytes += skb->len;
bp->dev->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
skb->len, skb->csum);
netif_receive_skb(skb);
......@@ -2210,7 +2210,7 @@ static void gem_update_stats(struct macb *bp)
static struct net_device_stats *gem_get_stats(struct macb *bp)
{
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->stats;
struct net_device_stats *nstat = &bp->dev->stats;
gem_update_stats(bp);
......@@ -2281,7 +2281,7 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
struct net_device_stats *nstat = &bp->dev->stats;
struct macb_stats *hwstat = &bp->hw_stats.macb;
if (macb_is_gem(bp))
......@@ -2993,15 +2993,15 @@ static void at91ether_rx(struct net_device *dev)
memcpy(skb_put(skb, pktlen), p_recv, pktlen);
skb->protocol = eth_type_trans(skb, dev);
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pktlen;
netif_rx(skb);
} else {
lp->stats.rx_dropped++;
dev->stats.rx_dropped++;
}
if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
lp->stats.multicast++;
dev->stats.multicast++;
/* reset ownership bit */
desc->addr &= ~MACB_BIT(RX_USED);
......@@ -3036,15 +3036,15 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & MACB_BIT(TCOMP)) {
/* The TCOM bit is set even if the transmission failed */
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
lp->stats.tx_errors++;
dev->stats.tx_errors++;
if (lp->skb) {
dev_kfree_skb_irq(lp->skb);
lp->skb = NULL;
dma_unmap_single(NULL, lp->skb_physaddr,
lp->skb_length, DMA_TO_DEVICE);
lp->stats.tx_packets++;
lp->stats.tx_bytes += lp->skb_length;
dev->stats.tx_packets++;
dev->stats.tx_bytes += lp->skb_length;
}
netif_wake_queue(dev);
}
......
......@@ -919,7 +919,6 @@ struct macb {
struct clk *rx_clk;
struct net_device *dev;
struct napi_struct napi;
struct net_device_stats stats;
union {
struct macb_stats macb;
struct gem_stats gem;
......
......@@ -223,7 +223,6 @@ struct port_info {
struct cmac *mac;
struct cphy *phy;
struct link_config link_config;
struct net_device_stats netstats;
};
struct sge;
......
......@@ -296,7 +296,7 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev)
{
struct adapter *adapter = dev->ml_priv;
struct port_info *p = &adapter->port[dev->if_port];
struct net_device_stats *ns = &p->netstats;
struct net_device_stats *ns = &dev->stats;
const struct cmac_statistics *pstats;
/* Do a full update of the MAC stats */
......
......@@ -72,7 +72,6 @@ struct port_info {
struct cphy phy;
struct cmac mac;
struct link_config link_config;
struct net_device_stats netstats;
int activity;
__be32 iscsi_ipv4addr;
struct iscsi_config iscsic;
......
......@@ -1489,7 +1489,7 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
struct net_device_stats *ns = &pi->netstats;
struct net_device_stats *ns = &dev->stats;
const struct mac_stats *pstats;
spin_lock(&adapter->stats_lock);
......
......@@ -312,8 +312,6 @@ struct de_private {
u32 msg_enable;
struct net_device_stats net_stats;
struct pci_dev *pdev;
u16 setup_frame[DE_SETUP_FRAME_WORDS];
......@@ -388,14 +386,14 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
netif_warn(de, rx_err, de->dev,
"Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
status);
de->net_stats.rx_length_errors++;
de->dev->stats.rx_length_errors++;
}
} else if (status & RxError) {
/* There was a fatal error. */
de->net_stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) de->net_stats.rx_length_errors++;
if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
de->dev->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) de->dev->stats.rx_length_errors++;
if (status & RxErrCRC) de->dev->stats.rx_crc_errors++;
if (status & RxErrFIFO) de->dev->stats.rx_fifo_errors++;
}
}
......@@ -423,7 +421,7 @@ static void de_rx (struct de_private *de)
mapping = de->rx_skb[rx_tail].mapping;
if (unlikely(drop)) {
de->net_stats.rx_dropped++;
de->dev->stats.rx_dropped++;
goto rx_next;
}
......@@ -441,7 +439,7 @@ static void de_rx (struct de_private *de)
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
copy_skb = netdev_alloc_skb(de->dev, buflen);
if (unlikely(!copy_skb)) {
de->net_stats.rx_dropped++;
de->dev->stats.rx_dropped++;
drop = 1;
rx_work = 100;
goto rx_next;
......@@ -470,8 +468,8 @@ static void de_rx (struct de_private *de)
skb->protocol = eth_type_trans (skb, de->dev);
de->net_stats.rx_packets++;
de->net_stats.rx_bytes += skb->len;
de->dev->stats.rx_packets++;
de->dev->stats.rx_bytes += skb->len;
rc = netif_rx (skb);
if (rc == NET_RX_DROP)
drop = 1;
......@@ -572,18 +570,18 @@ static void de_tx (struct de_private *de)
netif_dbg(de, tx_err, de->dev,
"tx err, status 0x%x\n",
status);
de->net_stats.tx_errors++;
de->dev->stats.tx_errors++;
if (status & TxOWC)
de->net_stats.tx_window_errors++;
de->dev->stats.tx_window_errors++;
if (status & TxMaxCol)
de->net_stats.tx_aborted_errors++;
de->dev->stats.tx_aborted_errors++;
if (status & TxLinkFail)
de->net_stats.tx_carrier_errors++;
de->dev->stats.tx_carrier_errors++;
if (status & TxFIFOUnder)
de->net_stats.tx_fifo_errors++;
de->dev->stats.tx_fifo_errors++;
} else {
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
de->dev->stats.tx_packets++;
de->dev->stats.tx_bytes += skb->len;
netif_dbg(de, tx_done, de->dev,
"tx done, slot %d\n", tx_tail);
}
......@@ -814,9 +812,9 @@ static void de_set_rx_mode (struct net_device *dev)
static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
{
if (unlikely(rx_missed & RxMissedOver))
de->net_stats.rx_missed_errors += RxMissedMask;
de->dev->stats.rx_missed_errors += RxMissedMask;
else
de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
de->dev->stats.rx_missed_errors += (rx_missed & RxMissedMask);
}
static void __de_get_stats(struct de_private *de)
......@@ -836,7 +834,7 @@ static struct net_device_stats *de_get_stats(struct net_device *dev)
__de_get_stats(de);
spin_unlock_irq(&de->lock);
return &de->net_stats;
return &dev->stats;
}
static inline int de_is_running (struct de_private *de)
......@@ -1348,7 +1346,7 @@ static void de_clean_rings (struct de_private *de)
struct sk_buff *skb = de->tx_skb[i].skb;
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
de->net_stats.tx_dropped++;
de->dev->stats.tx_dropped++;
pci_unmap_single(de->pdev,
de->tx_skb[i].mapping,
skb->len, PCI_DMA_TODEVICE);
......
......@@ -878,10 +878,10 @@ tx_error (struct net_device *dev, int tx_status)
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
np->stats.tx_errors++;
dev->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
np->stats.tx_fifo_errors++;
dev->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
dw16(ASICCtrl + 2,
......@@ -903,7 +903,7 @@ tx_error (struct net_device *dev, int tx_status)
}
/* Late Collision */
if (tx_status & 0x04) {
np->stats.tx_fifo_errors++;
dev->stats.tx_fifo_errors++;
/* TxReset and clear FIFO */
dw16(ASICCtrl + 2, TxReset | FIFOReset);
/* Wait reset done */
......@@ -916,13 +916,8 @@ tx_error (struct net_device *dev, int tx_status)
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
#ifdef ETHER_STATS
if (tx_status & 0x08)
np->stats.collisions16++;
#else
if (tx_status & 0x08)
np->stats.collisions++;
#endif
dev->stats.collisions++;
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
......@@ -952,15 +947,15 @@ receive_packet (struct net_device *dev)
break;
/* Update rx error statistics, drop packet. */
if (frame_status & RFS_Errors) {
np->stats.rx_errors++;
dev->stats.rx_errors++;
if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++;
dev->stats.rx_length_errors++;
if (frame_status & RxFCSError)
np->stats.rx_crc_errors++;
dev->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
np->stats.rx_frame_errors++;
dev->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
np->stats.rx_fifo_errors++;
dev->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
......@@ -1096,23 +1091,23 @@ get_stats (struct net_device *dev)
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
np->stats.rx_packets += dr32(FramesRcvOk);
np->stats.tx_packets += dr32(FramesXmtOk);
np->stats.rx_bytes += dr32(OctetRcvOk);
np->stats.tx_bytes += dr32(OctetXmtOk);
dev->stats.rx_packets += dr32(FramesRcvOk);
dev->stats.tx_packets += dr32(FramesXmtOk);
dev->stats.rx_bytes += dr32(OctetRcvOk);
dev->stats.tx_bytes += dr32(OctetXmtOk);
np->stats.multicast = dr32(McstFramesRcvdOk);
np->stats.collisions += dr32(SingleColFrames)
dev->stats.multicast = dr32(McstFramesRcvdOk);
dev->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
/* detailed tx errors */
stat_reg = dr16(FramesAbortXSColls);
np->stats.tx_aborted_errors += stat_reg;
np->stats.tx_errors += stat_reg;
dev->stats.tx_aborted_errors += stat_reg;
dev->stats.tx_errors += stat_reg;
stat_reg = dr16(CarrierSenseErrors);
np->stats.tx_carrier_errors += stat_reg;
np->stats.tx_errors += stat_reg;
dev->stats.tx_carrier_errors += stat_reg;
dev->stats.tx_errors += stat_reg;
/* Clear all other statistic register. */
dr32(McstOctetXmtOk);
......@@ -1142,7 +1137,7 @@ get_stats (struct net_device *dev)
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
return &np->stats;
return &dev->stats;
}
static int
......
......@@ -377,7 +377,6 @@ struct netdev_private {
void __iomem *eeprom_addr;
spinlock_t tx_lock;
spinlock_t rx_lock;
struct net_device_stats stats;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
unsigned int speed; /* Operating speed */
unsigned int vlan; /* VLAN Id */
......
......@@ -1929,7 +1929,7 @@ static struct net_device_stats *emac_stats(struct net_device *ndev)
struct emac_instance *dev = netdev_priv(ndev);
struct emac_stats *st = &dev->stats;
struct emac_error_stats *est = &dev->estats;
struct net_device_stats *nst = &dev->nstats;
struct net_device_stats *nst = &ndev->stats;
unsigned long flags;
DBG2(dev, "stats" NL);
......
......@@ -265,7 +265,6 @@ struct emac_instance {
/* Stats
*/
struct emac_error_stats estats;
struct net_device_stats nstats;
struct emac_stats stats;
/* Misc
......
......@@ -228,8 +228,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
net_dbg_ratelimited("packet error\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
goto rx_next;
}
......@@ -245,8 +245,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(!skb)) {
net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
priv->stats.rx_dropped++;
priv->stats.rx_errors++;
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
goto rx_next;
}
......@@ -256,10 +256,10 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(&priv->napi, skb);
rx++;
priv->stats.rx_packets++;
priv->stats.rx_bytes += len;
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
if (desc0 & RX_DESC0_MULTICAST)
priv->stats.multicast++;
ndev->stats.multicast++;
rx_next:
wmb(); /* prevent setting ownership back too early */
......@@ -296,8 +296,8 @@ static void moxart_tx_finished(struct net_device *ndev)
dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE);
priv->stats.tx_packets++;
priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
priv->tx_skb[tx_tail] = NULL;
......@@ -349,7 +349,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n");
priv->stats.tx_dropped++;
ndev->stats.tx_dropped++;
goto out_unlock;
}
rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
......@@ -400,13 +400,6 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return ret;
}
static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
return &priv->stats;
}
static void moxart_mac_setmulticast(struct net_device *ndev)
{
struct moxart_mac_priv_t *priv = netdev_priv(ndev);
......@@ -456,7 +449,6 @@ static const struct net_device_ops moxart_netdev_ops = {
.ndo_open = moxart_mac_open,
.ndo_stop = moxart_mac_stop,
.ndo_start_xmit = moxart_mac_start_xmit,
.ndo_get_stats = moxart_mac_get_stats,
.ndo_set_rx_mode = moxart_mac_set_rx_mode,
.ndo_set_mac_address = moxart_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
......
......@@ -293,7 +293,6 @@
struct moxart_mac_priv_t {
void __iomem *base;
struct net_device_stats stats;
unsigned int reg_maccr;
unsigned int reg_imr;
struct napi_struct napi;
......
......@@ -152,7 +152,6 @@ struct w90p910_ether {
struct tran_pdesc *tdesc;
dma_addr_t rdesc_phys;
dma_addr_t tdesc_phys;
struct net_device_stats stats;
struct platform_device *pdev;
struct resource *res;
struct sk_buff *skb;
......@@ -584,15 +583,6 @@ static int w90p910_ether_close(struct net_device *dev)
return 0;
}
static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
{
struct w90p910_ether *ether;
ether = netdev_priv(dev);
return &ether->stats;
}
static int w90p910_send_frame(struct net_device *dev,
unsigned char *data, int length)
{
......@@ -671,10 +661,10 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
ether->finish_tx = 0;
if (txbd->sl & TXDS_TXCP) {
ether->stats.tx_packets++;
ether->stats.tx_bytes += txbd->sl & 0xFFFF;
dev->stats.tx_packets++;
dev->stats.tx_bytes += txbd->sl & 0xFFFF;
} else {
ether->stats.tx_errors++;
dev->stats.tx_errors++;
}
txbd->sl = 0x0;
......@@ -730,7 +720,7 @@ static void netdev_rx(struct net_device *dev)
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
ether->stats.rx_dropped++;
dev->stats.rx_dropped++;
return;
}
......@@ -738,24 +728,24 @@ static void netdev_rx(struct net_device *dev)
skb_put(skb, length);
skb_copy_to_linear_data(skb, data, length);
skb->protocol = eth_type_trans(skb, dev);
ether->stats.rx_packets++;
ether->stats.rx_bytes += length;
dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
netif_rx(skb);
} else {
ether->stats.rx_errors++;
dev->stats.rx_errors++;
if (status & RXDS_RP) {
dev_err(&pdev->dev, "rx runt err\n");
ether->stats.rx_length_errors++;
dev->stats.rx_length_errors++;
} else if (status & RXDS_CRCE) {
dev_err(&pdev->dev, "rx crc err\n");
ether->stats.rx_crc_errors++;
dev->stats.rx_crc_errors++;
} else if (status & RXDS_ALIE) {
dev_err(&pdev->dev, "rx alignment err\n");
ether->stats.rx_frame_errors++;
dev->stats.rx_frame_errors++;
} else if (status & RXDS_PTLE) {
dev_err(&pdev->dev, "rx longer err\n");
ether->stats.rx_over_errors++;
dev->stats.rx_over_errors++;
}
}
......@@ -912,7 +902,6 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_open = w90p910_ether_open,
.ndo_stop = w90p910_ether_close,
.ndo_start_xmit = w90p910_ether_start_xmit,
.ndo_get_stats = w90p910_ether_stats,
.ndo_set_rx_mode = w90p910_ether_set_multicast_list,
.ndo_set_mac_address = w90p910_set_mac_address,
.ndo_do_ioctl = w90p910_ether_ioctl,
......
......@@ -169,7 +169,7 @@ static void bigmac_stop(struct bigmac *bp)
static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs)
{
struct net_device_stats *stats = &bp->enet_stats;
struct net_device_stats *stats = &bp->dev->stats;
stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR);
sbus_writel(0, bregs + BMAC_RCRCECTR);
......@@ -774,8 +774,8 @@ static void bigmac_tx(struct bigmac *bp)
if (this->tx_flags & TXD_OWN)
break;
skb = bp->tx_skbs[elem];
bp->enet_stats.tx_packets++;
bp->enet_stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dma_unmap_single(&bp->bigmac_op->dev,
this->tx_addr, skb->len,
DMA_TO_DEVICE);
......@@ -811,12 +811,12 @@ static void bigmac_rx(struct bigmac *bp)
/* Check for errors. */
if (len < ETH_ZLEN) {
bp->enet_stats.rx_errors++;
bp->enet_stats.rx_length_errors++;
bp->dev->stats.rx_errors++;
bp->dev->stats.rx_length_errors++;
drop_it:
/* Return it to the BigMAC. */
bp->enet_stats.rx_dropped++;
bp->dev->stats.rx_dropped++;
this->rx_flags =
(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
goto next;
......@@ -875,8 +875,8 @@ static void bigmac_rx(struct bigmac *bp)
/* No checksums done by the BigMAC ;-( */
skb->protocol = eth_type_trans(skb, bp->dev);
netif_rx(skb);
bp->enet_stats.rx_packets++;
bp->enet_stats.rx_bytes += len;
bp->dev->stats.rx_packets++;
bp->dev->stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
......@@ -987,7 +987,7 @@ static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
struct bigmac *bp = netdev_priv(dev);
bigmac_get_counters(bp, bp->bregs);
return &bp->enet_stats;
return &dev->stats;
}
static void bigmac_set_multicast(struct net_device *dev)
......
......@@ -311,7 +311,6 @@ struct bigmac {
enum bigmac_timer_state timer_state;
unsigned int timer_ticks;
struct net_device_stats enet_stats;
struct platform_device *qec_op;
struct platform_device *bigmac_op;
struct net_device *dev;
......
......@@ -933,7 +933,7 @@ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
/* hp->happy_lock must be held */
static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
{
struct net_device_stats *stats = &hp->net_stats;
struct net_device_stats *stats = &hp->dev->stats;
stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
......@@ -1947,7 +1947,7 @@ static void happy_meal_tx(struct happy_meal *hp)
break;
}
hp->tx_skbs[elem] = NULL;
hp->net_stats.tx_bytes += skb->len;
dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
dma_addr = hme_read_desc32(hp, &this->tx_addr);
......@@ -1964,7 +1964,7 @@ static void happy_meal_tx(struct happy_meal *hp)
}
dev_kfree_skb_irq(skb);
hp->net_stats.tx_packets++;
dev->stats.tx_packets++;
}
hp->tx_old = elem;
TXD((">"));
......@@ -2009,17 +2009,17 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
RXD(("ERR(%08x)]", flags));
hp->net_stats.rx_errors++;
dev->stats.rx_errors++;
if (len < ETH_ZLEN)
hp->net_stats.rx_length_errors++;
dev->stats.rx_length_errors++;
if (len & (RXFLAG_OVERFLOW >> 16)) {
hp->net_stats.rx_over_errors++;
hp->net_stats.rx_fifo_errors++;
dev->stats.rx_over_errors++;
dev->stats.rx_fifo_errors++;
}
/* Return it to the Happy meal. */
drop_it:
hp->net_stats.rx_dropped++;
dev->stats.rx_dropped++;
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
dma_addr);
......@@ -2084,8 +2084,8 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
hp->net_stats.rx_packets++;
hp->net_stats.rx_bytes += len;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
......@@ -2396,7 +2396,7 @@ static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
happy_meal_get_counters(hp, hp->bigmacregs);
spin_unlock_irq(&hp->happy_lock);
return &hp->net_stats;
return &dev->stats;
}
static void happy_meal_set_multicast(struct net_device *dev)
......
......@@ -418,8 +418,6 @@ struct happy_meal {
int rx_new, tx_new, rx_old, tx_old;
struct net_device_stats net_stats; /* Statistical counters */
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
u32 (*read32)(void __iomem *);
void (*write32)(void __iomem *, u32);
......
......@@ -245,8 +245,6 @@ struct kaweth_device
__u16 packet_filter_bitmap;
struct kaweth_ethernet_configuration configuration;
struct net_device_stats stats;
};
/****************************************************************
......@@ -598,7 +596,7 @@ static void kaweth_usb_receive(struct urb *urb)
struct sk_buff *skb;
if (unlikely(status == -EPIPE)) {
kaweth->stats.rx_errors++;
net->stats.rx_errors++;
kaweth->end = 1;
wake_up(&kaweth->term_wait);
dev_dbg(dev, "Status was -EPIPE.\n");
......@@ -613,12 +611,12 @@ static void kaweth_usb_receive(struct urb *urb)
}
if (unlikely(status == -EPROTO || status == -ETIME ||
status == -EILSEQ)) {
kaweth->stats.rx_errors++;
net->stats.rx_errors++;
dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n");
return;
}
if (unlikely(status == -EOVERFLOW)) {
kaweth->stats.rx_errors++;
net->stats.rx_errors++;
dev_dbg(dev, "Status was -EOVERFLOW.\n");
}
spin_lock(&kaweth->device_lock);
......@@ -663,8 +661,8 @@ static void kaweth_usb_receive(struct urb *urb)
netif_rx(skb);
kaweth->stats.rx_packets++;
kaweth->stats.rx_bytes += pkt_len;
net->stats.rx_packets++;
net->stats.rx_bytes += pkt_len;
}
kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
......@@ -810,7 +808,7 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
dev_kfree_skb_irq(skb);
skb = copied_skb;
if (!copied_skb) {
kaweth->stats.tx_errors++;
net->stats.tx_errors++;
netif_start_queue(net);
spin_unlock_irq(&kaweth->device_lock);
return NETDEV_TX_OK;
......@@ -834,15 +832,15 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
{
dev_warn(&net->dev, "kaweth failed tx_urb %d\n", res);
skip:
kaweth->stats.tx_errors++;
net->stats.tx_errors++;
netif_start_queue(net);
dev_kfree_skb_irq(skb);
}
else
{
kaweth->stats.tx_packets++;
kaweth->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
}
spin_unlock_irq(&kaweth->device_lock);
......@@ -911,15 +909,6 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
}
}
/****************************************************************
* kaweth_netdev_stats
****************************************************************/
static struct net_device_stats *kaweth_netdev_stats(struct net_device *dev)
{
struct kaweth_device *kaweth = netdev_priv(dev);
return &kaweth->stats;
}
/****************************************************************
* kaweth_tx_timeout
****************************************************************/
......@@ -928,7 +917,7 @@ static void kaweth_tx_timeout(struct net_device *net)
struct kaweth_device *kaweth = netdev_priv(net);
dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
kaweth->stats.tx_errors++;
net->stats.tx_errors++;
netif_trans_update(net);
usb_unlink_urb(kaweth->tx_urb);
......@@ -981,7 +970,6 @@ static const struct net_device_ops kaweth_netdev_ops = {
.ndo_start_xmit = kaweth_start_xmit,
.ndo_tx_timeout = kaweth_tx_timeout,
.ndo_set_rx_mode = kaweth_set_rx_mode,
.ndo_get_stats = kaweth_netdev_stats,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
......
......@@ -501,13 +501,13 @@ static void read_bulk_callback(struct urb *urb)
if (rx_status & 0x1e) {
netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status);
pegasus->stats.rx_errors++;
net->stats.rx_errors++;
if (rx_status & 0x06) /* long or runt */
pegasus->stats.rx_length_errors++;
net->stats.rx_length_errors++;
if (rx_status & 0x08)
pegasus->stats.rx_crc_errors++;
net->stats.rx_crc_errors++;
if (rx_status & 0x10) /* extra bits */
pegasus->stats.rx_frame_errors++;
net->stats.rx_frame_errors++;
goto goon;
}
if (pegasus->chip == 0x8513) {
......@@ -535,8 +535,8 @@ static void read_bulk_callback(struct urb *urb)
skb_put(pegasus->rx_skb, pkt_len);
pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net);
netif_rx(pegasus->rx_skb);
pegasus->stats.rx_packets++;
pegasus->stats.rx_bytes += pkt_len;
net->stats.rx_packets++;
net->stats.rx_bytes += pkt_len;
if (pegasus->flags & PEGASUS_UNPLUG)
return;
......@@ -670,13 +670,13 @@ static void intr_callback(struct urb *urb)
/* byte 0 == tx_status1, reg 2B */
if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL
|LATE_COL|JABBER_TIMEOUT)) {
pegasus->stats.tx_errors++;
net->stats.tx_errors++;
if (d[0] & TX_UNDERRUN)
pegasus->stats.tx_fifo_errors++;
net->stats.tx_fifo_errors++;
if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT))
pegasus->stats.tx_aborted_errors++;
net->stats.tx_aborted_errors++;
if (d[0] & LATE_COL)
pegasus->stats.tx_window_errors++;
net->stats.tx_window_errors++;
}
/* d[5].LINK_STATUS lies on some adapters.
......@@ -685,7 +685,7 @@ static void intr_callback(struct urb *urb)
*/
/* bytes 3-4 == rx_lostpkt, reg 2E/2F */
pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
net->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
}
res = usb_submit_urb(urb, GFP_ATOMIC);
......@@ -701,7 +701,7 @@ static void pegasus_tx_timeout(struct net_device *net)
pegasus_t *pegasus = netdev_priv(net);
netif_warn(pegasus, timer, net, "tx timeout\n");
usb_unlink_urb(pegasus->tx_urb);
pegasus->stats.tx_errors++;
net->stats.tx_errors++;
}
static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
......@@ -731,23 +731,18 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
netif_device_detach(pegasus->net);
break;
default:
pegasus->stats.tx_errors++;
net->stats.tx_errors++;
netif_start_queue(net);
}
} else {
pegasus->stats.tx_packets++;
pegasus->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
}
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
{
return &((pegasus_t *) netdev_priv(dev))->stats;
}
static inline void disable_net_traffic(pegasus_t *pegasus)
{
__le16 tmp = cpu_to_le16(0);
......@@ -1294,7 +1289,6 @@ static const struct net_device_ops pegasus_netdev_ops = {
.ndo_do_ioctl = pegasus_ioctl,
.ndo_start_xmit = pegasus_start_xmit,
.ndo_set_rx_mode = pegasus_set_multicast,
.ndo_get_stats = pegasus_netdev_stats,
.ndo_tx_timeout = pegasus_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
......
......@@ -83,7 +83,6 @@ typedef struct pegasus {
struct usb_device *usb;
struct usb_interface *intf;
struct net_device *net;
struct net_device_stats stats;
struct mii_if_info mii;
unsigned flags;
unsigned features;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment