Commit 038403f6 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 1fcab22d 282d9b16
...@@ -176,6 +176,16 @@ ...@@ -176,6 +176,16 @@
* Steve Mead <steve.mead at comdev dot cc> * Steve Mead <steve.mead at comdev dot cc>
* - Port Gleb Natapov's multicast support patchs from 2.4.12 * - Port Gleb Natapov's multicast support patchs from 2.4.12
* to 2.4.18 adding support for multicast. * to 2.4.18 adding support for multicast.
*
* 2002/06/17 - Tony Cureington <tony.cureington * hp_com>
* - corrected uninitialized pointer (ifr.ifr_data) in bond_check_dev_link;
* actually changed function to use ETHTOOL, then MIIPHY, and finally
* MIIREG to determine the link status
* - fixed bad ifr_data pointer assignments in bond_ioctl
* - corrected mode 1 being reported as active-backup in bond_get_info;
* also added text to distinguish type of load balancing (rr or xor)
* - change arp_ip_target module param from "1-12s" (array of 12 ptrs)
* to "s" (a single ptr)
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -210,6 +220,9 @@ ...@@ -210,6 +220,9 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
/* monitor all links that often (in milliseconds). <=0 disables monitoring */ /* monitor all links that often (in milliseconds). <=0 disables monitoring */
#ifndef BOND_LINK_MON_INTERV #ifndef BOND_LINK_MON_INTERV
...@@ -253,7 +266,7 @@ MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); ...@@ -253,7 +266,7 @@ MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
MODULE_PARM(mode, "i"); MODULE_PARM(mode, "i");
MODULE_PARM(arp_interval, "i"); MODULE_PARM(arp_interval, "i");
MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
MODULE_PARM(arp_ip_target, "1-12s"); MODULE_PARM(arp_ip_target, "s");
MODULE_PARM_DESC(arp_ip_target, "arp target in n.n.n.n form"); MODULE_PARM_DESC(arp_ip_target, "arp target in n.n.n.n form");
MODULE_PARM_DESC(mode, "Mode of operation : 0 for round robin, 1 for active-backup, 2 for xor"); MODULE_PARM_DESC(mode, "Mode of operation : 0 for round robin, 1 for active-backup, 2 for xor");
MODULE_PARM(updelay, "i"); MODULE_PARM(updelay, "i");
...@@ -386,21 +399,51 @@ static u16 bond_check_dev_link(struct net_device *dev) ...@@ -386,21 +399,51 @@ static u16 bond_check_dev_link(struct net_device *dev)
{ {
static int (* ioctl)(struct net_device *, struct ifreq *, int); static int (* ioctl)(struct net_device *, struct ifreq *, int);
struct ifreq ifr; struct ifreq ifr;
u16 *data = (u16 *)&ifr.ifr_data; struct mii_ioctl_data mii;
struct ethtool_value etool;
/* data[0] automagically filled by the ioctl */
data[1] = 1; /* MII location 1 reports Link Status */ if ((ioctl = dev->do_ioctl) != NULL) { /* ioctl to access MII */
/* TODO: set pointer to correct ioctl on a per team member */
if (((ioctl = dev->do_ioctl) != NULL) && /* ioctl to access MII */ /* bases to make this more efficient. that is, once */
(ioctl(dev, &ifr, SIOCGMIIPHY) == 0)) { /* we determine the correct ioctl, we will always */
/* now, data[3] contains info about link status : /* call it and not the others for that team */
- data[3] & 0x04 means link up /* member. */
- data[3] & 0x20 means end of auto-negociation
*/ /* try SOICETHTOOL ioctl, some drivers cache ETHTOOL_GLINK */
return data[3]; /* for a period of time; we need to encourage link status */
} else { /* be reported by network drivers in real time; if the */
return MII_LINK_READY; /* spoof link up ( we can't check it) */ /* value is cached, the mmimon module parm may have no */
/* effect... */
etool.cmd = ETHTOOL_GLINK;
ifr.ifr_data = (char*)&etool;
if (ioctl(dev, &ifr, SIOCETHTOOL) == 0) {
if (etool.data == 1) {
return(MII_LINK_READY);
}
else {
return(0);
}
}
ifr.ifr_data = (char*)&mii;
/* try MIIPHY first then, if that doesn't work, try MIIREG */
if (ioctl(dev, &ifr, SIOCGMIIPHY) == 0) {
/* now, mii.phy_id contains info about link status :
- mii.phy_id & 0x04 means link up
- mii.phy_id & 0x20 means end of auto-negociation
*/
return mii.phy_id;
}
mii.reg_num = 1; /* the MII register we want to read */
if (ioctl(dev, &ifr, SIOCGMIIREG) == 0) {
/* mii.val_out contians the same link info as phy_id */
/* above */
return mii.val_out;
}
} }
return MII_LINK_READY; /* spoof link up ( we can't check it) */
} }
static u16 bond_check_mii_link(bonding_t *bond) static u16 bond_check_mii_link(bonding_t *bond)
...@@ -1707,7 +1750,7 @@ static int bond_ioctl(struct net_device *master_dev, struct ifreq *ifr, int cmd) ...@@ -1707,7 +1750,7 @@ static int bond_ioctl(struct net_device *master_dev, struct ifreq *ifr, int cmd)
switch (cmd) { switch (cmd) {
case SIOCGMIIPHY: case SIOCGMIIPHY:
data = (u16 *)&ifr->ifr_data; data = (u16 *)ifr->ifr_data;
if (data == NULL) { if (data == NULL) {
return -EINVAL; return -EINVAL;
} }
...@@ -1718,7 +1761,7 @@ static int bond_ioctl(struct net_device *master_dev, struct ifreq *ifr, int cmd) ...@@ -1718,7 +1761,7 @@ static int bond_ioctl(struct net_device *master_dev, struct ifreq *ifr, int cmd)
* We do this again just in case we were called by SIOCGMIIREG * We do this again just in case we were called by SIOCGMIIREG
* instead of SIOCGMIIPHY. * instead of SIOCGMIIPHY.
*/ */
data = (u16 *)&ifr->ifr_data; data = (u16 *)ifr->ifr_data;
if (data == NULL) { if (data == NULL) {
return -EINVAL; return -EINVAL;
} }
...@@ -2035,7 +2078,28 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length) ...@@ -2035,7 +2078,28 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length)
link = bond_check_mii_link(bond); link = bond_check_mii_link(bond);
len += sprintf(buf + len, "Bonding Mode: "); len += sprintf(buf + len, "Bonding Mode: ");
len += sprintf(buf + len, "%s\n", mode ? "active-backup" : "load balancing");
switch (mode) {
case BOND_MODE_ACTIVEBACKUP:
len += sprintf(buf + len, "%s\n",
"active-backup");
break;
case BOND_MODE_ROUNDROBIN:
len += sprintf(buf + len, "%s\n",
"load balancing (round-robin)");
break;
case BOND_MODE_XOR:
len += sprintf(buf + len, "%s\n",
"load balancing (xor)");
break;
default:
len += sprintf(buf + len, "%s\n",
"unknown");
break;
}
if (mode == BOND_MODE_ACTIVEBACKUP) { if (mode == BOND_MODE_ACTIVEBACKUP) {
read_lock_irqsave(&bond->lock, flags); read_lock_irqsave(&bond->lock, flags);
...@@ -2282,7 +2346,32 @@ static int __init bonding_init(void) ...@@ -2282,7 +2346,32 @@ static int __init bonding_init(void)
} }
memset(dev_bonds, 0, max_bonds*sizeof(struct net_device)); memset(dev_bonds, 0, max_bonds*sizeof(struct net_device));
if (updelay < 0) {
printk(KERN_WARNING
"bonding_init(): updelay module parameter (%d), "
"not in range 0-%d, so it was reset to 0\n",
updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
printk(KERN_WARNING
"bonding_init(): downdelay module parameter (%d), "
"not in range 0-%d, so it was reset to 0\n",
downdelay, INT_MAX);
downdelay = 0;
}
if (arp_interval < 0) {
printk(KERN_WARNING
"bonding_init(): arp_interval module parameter (%d), "
"not in range 0-%d, so it was reset to %d\n",
arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
arp_interval = BOND_LINK_ARP_INTERV;
}
if (arp_ip_target) { if (arp_ip_target) {
/* TODO: check and log bad ip address */
if (my_inet_aton(arp_ip_target, &arp_target) == 0) { if (my_inet_aton(arp_ip_target, &arp_target) == 0) {
arp_interval = 0; arp_interval = 0;
} }
......
...@@ -2201,7 +2201,7 @@ static int happy_meal_open(struct net_device *dev) ...@@ -2201,7 +2201,7 @@ static int happy_meal_open(struct net_device *dev)
*/ */
if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
if (request_irq(dev->irq, &happy_meal_interrupt, if (request_irq(dev->irq, &happy_meal_interrupt,
SA_SHIRQ, "HAPPY MEAL", (void *)dev)) { SA_SHIRQ, dev->name, (void *)dev)) {
HMD(("EAGAIN\n")); HMD(("EAGAIN\n"));
#ifdef __sparc__ #ifdef __sparc__
printk(KERN_ERR "happy_meal(SBUS): Can't order irq %s to go.\n", printk(KERN_ERR "happy_meal(SBUS): Can't order irq %s to go.\n",
......
...@@ -52,8 +52,8 @@ ...@@ -52,8 +52,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.99" #define DRV_MODULE_VERSION "0.99-NAPI"
#define DRV_MODULE_RELDATE "Jun 11, 2002" #define DRV_MODULE_RELDATE "Jun 20, 2002"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
...@@ -404,7 +404,8 @@ static int tg3_set_power_state(struct tg3 *tp, int state) ...@@ -404,7 +404,8 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
break; break;
default: default:
printk(KERN_WARNING "%s: Invalid power state (%d) requested.\n", printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
"requested.\n",
tp->dev->name, state); tp->dev->name, state);
return -EINVAL; return -EINVAL;
}; };
...@@ -490,9 +491,9 @@ static int tg3_set_power_state(struct tg3 *tp, int state) ...@@ -490,9 +491,9 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
static void tg3_link_report(struct tg3 *tp) static void tg3_link_report(struct tg3 *tp)
{ {
if (!netif_carrier_ok(tp->dev)) { if (!netif_carrier_ok(tp->dev)) {
printk("%s: Link is down.\n", tp->dev->name); printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
} else { } else {
printk("%s: Link is up at %d Mbps, %s duplex.\n", printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
tp->dev->name, tp->dev->name,
(tp->link_config.active_speed == SPEED_1000 ? (tp->link_config.active_speed == SPEED_1000 ?
1000 : 1000 :
...@@ -501,7 +502,8 @@ static void tg3_link_report(struct tg3 *tp) ...@@ -501,7 +502,8 @@ static void tg3_link_report(struct tg3 *tp)
(tp->link_config.active_duplex == DUPLEX_FULL ? (tp->link_config.active_duplex == DUPLEX_FULL ?
"full" : "half")); "full" : "half"));
printk("%s: Flow control is %s for TX and %s for RX.\n", printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
"%s for RX.\n",
tp->dev->name, tp->dev->name,
(tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off", (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
(tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off"); (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
...@@ -1725,7 +1727,7 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key, ...@@ -1725,7 +1727,7 @@ static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
#if TG3_VLAN_TAG_USED #if TG3_VLAN_TAG_USED
static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
{ {
return vlan_hwaccel_rx(skb, tp->vlgrp, vlan_tag); return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
} }
#endif #endif
...@@ -1753,16 +1755,18 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag) ...@@ -1753,16 +1755,18 @@ static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
* If both the host and chip were to write into the same ring, cache line * If both the host and chip were to write into the same ring, cache line
* eviction could occur since both entities want it in an exclusive state. * eviction could occur since both entities want it in an exclusive state.
*/ */
static void tg3_rx(struct tg3 *tp) static int tg3_rx(struct tg3 *tp, int budget)
{ {
u32 work_mask; u32 work_mask;
u32 rx_rcb_ptr = tp->rx_rcb_ptr; u32 rx_rcb_ptr = tp->rx_rcb_ptr;
u16 hw_idx, sw_idx; u16 hw_idx, sw_idx;
int received;
hw_idx = tp->hw_status->idx[0].rx_producer; hw_idx = tp->hw_status->idx[0].rx_producer;
sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE; sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE;
work_mask = 0; work_mask = 0;
while (sw_idx != hw_idx) { received = 0;
while (sw_idx != hw_idx && budget > 0) {
struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx]; struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
unsigned int len; unsigned int len;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1860,9 +1864,11 @@ static void tg3_rx(struct tg3 *tp) ...@@ -1860,9 +1864,11 @@ static void tg3_rx(struct tg3 *tp)
desc->err_vlan & RXD_VLAN_MASK); desc->err_vlan & RXD_VLAN_MASK);
} else } else
#endif #endif
netif_rx(skb); netif_receive_skb(skb);
tp->dev->last_rx = jiffies; tp->dev->last_rx = jiffies;
received++;
budget--;
next_pkt: next_pkt:
(*post_ptr)++; (*post_ptr)++;
...@@ -1894,110 +1900,17 @@ static void tg3_rx(struct tg3 *tp) ...@@ -1894,110 +1900,17 @@ static void tg3_rx(struct tg3 *tp)
sw_idx); sw_idx);
} }
#endif #endif
}
#define PKT_RATE_LOW 22000
#define PKT_RATE_HIGH 61000
static void tg3_rate_sample(struct tg3 *tp, unsigned long ticks)
{
u32 delta, rx_now, tx_now;
int new_vals, do_tx, do_rx;
rx_now = tp->hw_stats->rx_ucast_packets.low;
tx_now = tp->hw_stats->COS_out_packets[0].low;
delta = (rx_now - tp->last_rx_count);
delta += (tx_now - tp->last_tx_count);
delta /= (ticks / tp->coalesce_config.rate_sample_jiffies);
tp->last_rx_count = rx_now;
tp->last_tx_count = tx_now;
new_vals = 0;
do_tx = (tp->tg3_flags & TG3_FLAG_ADAPTIVE_TX) != 0;
do_rx = (tp->tg3_flags & TG3_FLAG_ADAPTIVE_RX) != 0;
if (delta < tp->coalesce_config.pkt_rate_low) {
if (do_rx &&
tp->coalesce_config.rx_max_coalesced_frames !=
tp->coalesce_config.rx_max_coalesced_frames_low) {
tp->coalesce_config.rx_max_coalesced_frames =
LOW_RXMAX_FRAMES;
tp->coalesce_config.rx_coalesce_ticks =
LOW_RXCOL_TICKS;
new_vals = 1;
}
if (do_tx &&
tp->coalesce_config.tx_max_coalesced_frames !=
tp->coalesce_config.tx_max_coalesced_frames_low) {
tp->coalesce_config.tx_max_coalesced_frames =
tp->coalesce_config.tx_max_coalesced_frames_low;
tp->coalesce_config.tx_coalesce_ticks =
tp->coalesce_config.tx_coalesce_ticks_low;
new_vals = 1;
}
} else if (delta < tp->coalesce_config.pkt_rate_high) {
if (do_rx &&
tp->coalesce_config.rx_max_coalesced_frames !=
tp->coalesce_config.rx_max_coalesced_frames_def) {
tp->coalesce_config.rx_max_coalesced_frames =
tp->coalesce_config.rx_max_coalesced_frames_def;
tp->coalesce_config.rx_coalesce_ticks =
tp->coalesce_config.rx_coalesce_ticks_def;
new_vals = 1;
}
if (do_tx &&
tp->coalesce_config.tx_max_coalesced_frames !=
tp->coalesce_config.tx_max_coalesced_frames_def) {
tp->coalesce_config.tx_max_coalesced_frames =
tp->coalesce_config.tx_max_coalesced_frames_def;
tp->coalesce_config.tx_coalesce_ticks =
tp->coalesce_config.tx_coalesce_ticks_def;
new_vals = 1;
}
} else {
if (do_rx &&
tp->coalesce_config.rx_max_coalesced_frames !=
tp->coalesce_config.rx_max_coalesced_frames_high) {
tp->coalesce_config.rx_max_coalesced_frames =
tp->coalesce_config.rx_max_coalesced_frames_high;
tp->coalesce_config.rx_coalesce_ticks =
tp->coalesce_config.rx_coalesce_ticks_high;
new_vals = 1;
}
if (do_tx &&
tp->coalesce_config.tx_max_coalesced_frames !=
tp->coalesce_config.tx_max_coalesced_frames_high) {
tp->coalesce_config.tx_max_coalesced_frames =
tp->coalesce_config.tx_max_coalesced_frames_high;
tp->coalesce_config.tx_coalesce_ticks =
tp->coalesce_config.tx_coalesce_ticks_high;
new_vals = 1;
}
}
if (new_vals) {
if (do_rx) {
tw32(HOSTCC_RXCOL_TICKS,
tp->coalesce_config.rx_coalesce_ticks);
tw32(HOSTCC_RXMAX_FRAMES,
tp->coalesce_config.rx_max_coalesced_frames);
}
if (do_tx) {
tw32(HOSTCC_TXCOL_TICKS,
tp->coalesce_config.tx_coalesce_ticks);
tw32(HOSTCC_TXMAX_FRAMES,
tp->coalesce_config.tx_max_coalesced_frames);
}
}
tp->last_rate_sample = jiffies; return received;
} }
static void tg3_interrupt_main_work(struct tg3 *tp) static int tg3_poll(struct net_device *netdev, int *budget)
{ {
struct tg3 *tp = netdev->priv;
struct tg3_hw_status *sblk = tp->hw_status; struct tg3_hw_status *sblk = tp->hw_status;
int did_pkts; int done;
spin_lock_irq(&tp->lock);
if (!(tp->tg3_flags & if (!(tp->tg3_flags &
(TG3_FLAG_USE_LINKCHG_REG | (TG3_FLAG_USE_LINKCHG_REG |
...@@ -2009,50 +1922,65 @@ static void tg3_interrupt_main_work(struct tg3 *tp) ...@@ -2009,50 +1922,65 @@ static void tg3_interrupt_main_work(struct tg3 *tp)
} }
} }
did_pkts = 0; if (sblk->idx[0].tx_consumer != tp->tx_cons)
tg3_tx(tp);
done = 1;
if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
tg3_rx(tp); int orig_budget = *budget;
did_pkts = 1; int work_done;
if (orig_budget > netdev->quota)
orig_budget = netdev->quota;
work_done = tg3_rx(tp, orig_budget);
*budget -= work_done;
netdev->quota -= work_done;
if (work_done >= orig_budget)
done = 0;
} }
if (sblk->idx[0].tx_consumer != tp->tx_cons) { if (done) {
tg3_tx(tp); netif_rx_complete(netdev);
did_pkts = 1; tg3_enable_ints(tp);
} }
if (did_pkts && spin_unlock_irq(&tp->lock);
(tp->tg3_flags & (TG3_FLAG_ADAPTIVE_RX | TG3_FLAG_ADAPTIVE_TX))) {
unsigned long ticks = jiffies - tp->last_rate_sample;
if (ticks >= tp->coalesce_config.rate_sample_jiffies) return (done ? 0 : 1);
tg3_rate_sample(tp, ticks);
}
} }
static void tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs) static __inline__ void tg3_interrupt_main_work(struct net_device *dev, struct tg3 *tp)
{ {
struct net_device *dev = dev_id;
struct tg3 *tp = dev->priv;
struct tg3_hw_status *sblk = tp->hw_status; struct tg3_hw_status *sblk = tp->hw_status;
int work_exists = 0;
spin_lock(&tp->lock); if (!(tp->tg3_flags &
(TG3_FLAG_USE_LINKCHG_REG |
while (sblk->status & SD_STATUS_UPDATED) { TG3_FLAG_POLL_SERDES))) {
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, if (sblk->status & SD_STATUS_LINK_CHG)
0x00000001); work_exists = 1;
sblk->status &= ~SD_STATUS_UPDATED; }
if (sblk->idx[0].tx_consumer != tp->tx_cons ||
sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
work_exists = 1;
tg3_interrupt_main_work(tp); if (!work_exists)
return;
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, if (netif_rx_schedule_prep(dev)) {
0x00000000); tw32(TG3PCI_MISC_HOST_CTRL,
tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
__netif_rx_schedule(dev);
} else {
printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
dev->name);
} }
spin_unlock(&tp->lock);
} }
static void tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs) static void tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
struct net_device *dev = dev_id; struct net_device *dev = dev_id;
struct tg3 *tp = dev->priv; struct tg3 *tp = dev->priv;
...@@ -2061,29 +1989,15 @@ static void tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs) ...@@ -2061,29 +1989,15 @@ static void tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
spin_lock(&tp->lock); spin_lock(&tp->lock);
if (sblk->status & SD_STATUS_UPDATED) { if (sblk->status & SD_STATUS_UPDATED) {
u32 oldtag;
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
0x00000001); 0x00000001);
oldtag = sblk->status_tag; sblk->status &= ~SD_STATUS_UPDATED;
while (1) {
u32 newtag;
sblk->status &= ~SD_STATUS_UPDATED;
barrier();
tg3_interrupt_main_work(tp); tg3_interrupt_main_work(dev, tp);
newtag = sblk->status_tag; tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
if (newtag == oldtag) { 0x00000000);
tw32_mailbox(MAILBOX_INTERRUPT_0 + tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
TG3_64BIT_REG_LOW,
newtag << 24);
break;
}
oldtag = newtag;
}
} }
spin_unlock(&tp->lock); spin_unlock(&tp->lock);
...@@ -2096,7 +2010,7 @@ static void tg3_tx_timeout(struct net_device *dev) ...@@ -2096,7 +2010,7 @@ static void tg3_tx_timeout(struct net_device *dev)
{ {
struct tg3 *tp = dev->priv; struct tg3 *tp = dev->priv;
printk(KERN_ERR "%s: transmit timed out, resetting\n", printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
dev->name); dev->name);
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
...@@ -3638,24 +3552,16 @@ static int tg3_reset_hw(struct tg3 *tp) ...@@ -3638,24 +3552,16 @@ static int tg3_reset_hw(struct tg3 *tp)
udelay(10); udelay(10);
} }
tw32(HOSTCC_RXCOL_TICKS, tw32(HOSTCC_RXCOL_TICKS, 0);
tp->coalesce_config.rx_coalesce_ticks); tw32(HOSTCC_RXMAX_FRAMES, 1);
tw32(HOSTCC_RXMAX_FRAMES, tw32(HOSTCC_RXCOAL_TICK_INT, 0);
tp->coalesce_config.rx_max_coalesced_frames); tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
tw32(HOSTCC_RXCOAL_TICK_INT, tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
tp->coalesce_config.rx_coalesce_ticks_during_int); tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
tw32(HOSTCC_RXCOAL_MAXF_INT, tw32(HOSTCC_TXCOAL_TICK_INT, 0);
tp->coalesce_config.rx_max_coalesced_frames_during_int); tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
tw32(HOSTCC_TXCOL_TICKS,
tp->coalesce_config.tx_coalesce_ticks);
tw32(HOSTCC_TXMAX_FRAMES,
tp->coalesce_config.tx_max_coalesced_frames);
tw32(HOSTCC_TXCOAL_TICK_INT,
tp->coalesce_config.tx_coalesce_ticks_during_int);
tw32(HOSTCC_TXCOAL_MAXF_INT,
tp->coalesce_config.tx_max_coalesced_frames_during_int);
tw32(HOSTCC_STAT_COAL_TICKS, tw32(HOSTCC_STAT_COAL_TICKS,
tp->coalesce_config.stats_coalesce_ticks); DEFAULT_STAT_COAL_TICKS);
/* Status/statistics block address. */ /* Status/statistics block address. */
tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
...@@ -3806,24 +3712,22 @@ static void tg3_timer(unsigned long __opaque) ...@@ -3806,24 +3712,22 @@ static void tg3_timer(unsigned long __opaque)
spin_lock_irq(&tp->lock); spin_lock_irq(&tp->lock);
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_IRQ_STATUS)) { /* All of this garbage is because when using non-tagged
/* All of this garbage is because on the 5700 the * IRQ status the mailbox/status_block protocol the chip
* mailbox/status_block protocol the chip uses with * uses with the cpu is race prone.
* the cpu is race prone. */
*/ if (tp->hw_status->status & SD_STATUS_UPDATED) {
if (tp->hw_status->status & SD_STATUS_UPDATED) { tw32(GRC_LOCAL_CTRL,
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); } else {
} else { tw32(HOSTCC_MODE,
tw32(HOSTCC_MODE, (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
(HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); }
}
if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
tg3_halt(tp); tg3_halt(tp);
tg3_init_rings(tp); tg3_init_rings(tp);
tg3_init_hw(tp); tg3_init_hw(tp);
}
} }
/* This part only runs once per second. */ /* This part only runs once per second. */
...@@ -3893,12 +3797,8 @@ static int tg3_open(struct net_device *dev) ...@@ -3893,12 +3797,8 @@ static int tg3_open(struct net_device *dev)
if (err) if (err)
return err; return err;
if (tp->tg3_flags & TG3_FLAG_TAGGED_IRQ_STATUS) err = request_irq(dev->irq, tg3_interrupt,
err = request_irq(dev->irq, tg3_interrupt_tagged, SA_SHIRQ, dev->name, dev);
SA_SHIRQ, dev->name, dev);
else
err = request_irq(dev->irq, tg3_interrupt,
SA_SHIRQ, dev->name, dev);
if (err) { if (err) {
tg3_free_consistent(tp); tg3_free_consistent(tp);
...@@ -3914,13 +3814,8 @@ static int tg3_open(struct net_device *dev) ...@@ -3914,13 +3814,8 @@ static int tg3_open(struct net_device *dev)
tg3_halt(tp); tg3_halt(tp);
tg3_free_rings(tp); tg3_free_rings(tp);
} else { } else {
if (tp->tg3_flags & TG3_FLAG_TAGGED_IRQ_STATUS) { tp->timer_offset = HZ / 10;
tp->timer_offset = HZ; tp->timer_counter = tp->timer_multiplier = 10;
tp->timer_counter = tp->timer_multiplier = 1;
} else {
tp->timer_offset = HZ / 10;
tp->timer_counter = tp->timer_multiplier = 10;
}
init_timer(&tp->timer); init_timer(&tp->timer);
tp->timer.expires = jiffies + tp->timer_offset; tp->timer.expires = jiffies + tp->timer_offset;
...@@ -4240,10 +4135,7 @@ static inline unsigned long get_stat64(tg3_stat64_t *val) ...@@ -4240,10 +4135,7 @@ static inline unsigned long get_stat64(tg3_stat64_t *val)
unsigned long ret; unsigned long ret;
#if (BITS_PER_LONG == 32) #if (BITS_PER_LONG == 32)
if (val->high != 0) ret = val->low;
ret = ~0UL;
else
ret = val->low;
#else #else
ret = ((u64)val->high << 32) | ((u64)val->low); ret = ((u64)val->high << 32) | ((u64)val->low);
#endif #endif
...@@ -4486,177 +4378,6 @@ do { p = orig_p + (reg); \ ...@@ -4486,177 +4378,6 @@ do { p = orig_p + (reg); \
return orig_p; return orig_p;
} }
static void tg3_to_ethtool_coal(struct tg3 *tp,
struct ethtool_coalesce *ecoal)
{
ecoal->rx_coalesce_usecs =
tp->coalesce_config.rx_coalesce_ticks_def;
ecoal->rx_max_coalesced_frames =
tp->coalesce_config.rx_max_coalesced_frames_def;
ecoal->rx_coalesce_usecs_irq =
tp->coalesce_config.rx_coalesce_ticks_during_int_def;
ecoal->rx_max_coalesced_frames_irq =
tp->coalesce_config.rx_max_coalesced_frames_during_int_def;
ecoal->tx_coalesce_usecs =
tp->coalesce_config.tx_coalesce_ticks_def;
ecoal->tx_max_coalesced_frames =
tp->coalesce_config.tx_max_coalesced_frames_def;
ecoal->tx_coalesce_usecs_irq =
tp->coalesce_config.tx_coalesce_ticks_during_int_def;
ecoal->tx_max_coalesced_frames_irq =
tp->coalesce_config.tx_max_coalesced_frames_during_int_def;
ecoal->stats_block_coalesce_usecs =
tp->coalesce_config.stats_coalesce_ticks_def;
ecoal->use_adaptive_rx_coalesce =
(tp->tg3_flags & TG3_FLAG_ADAPTIVE_RX) != 0;
ecoal->use_adaptive_tx_coalesce =
(tp->tg3_flags & TG3_FLAG_ADAPTIVE_TX) != 0;
ecoal->pkt_rate_low =
tp->coalesce_config.pkt_rate_low;
ecoal->rx_coalesce_usecs_low =
tp->coalesce_config.rx_coalesce_ticks_low;
ecoal->rx_max_coalesced_frames_low =
tp->coalesce_config.rx_max_coalesced_frames_low;
ecoal->tx_coalesce_usecs_low =
tp->coalesce_config.tx_coalesce_ticks_low;
ecoal->tx_max_coalesced_frames_low =
tp->coalesce_config.tx_max_coalesced_frames_low;
ecoal->pkt_rate_high =
tp->coalesce_config.pkt_rate_high;
ecoal->rx_coalesce_usecs_high =
tp->coalesce_config.rx_coalesce_ticks_high;
ecoal->rx_max_coalesced_frames_high =
tp->coalesce_config.rx_max_coalesced_frames_high;
ecoal->tx_coalesce_usecs_high =
tp->coalesce_config.tx_coalesce_ticks_high;
ecoal->tx_max_coalesced_frames_high =
tp->coalesce_config.tx_max_coalesced_frames_high;
ecoal->rate_sample_interval =
tp->coalesce_config.rate_sample_jiffies / HZ;
}
static int tg3_from_ethtool_coal(struct tg3 *tp,
struct ethtool_coalesce *ecoal)
{
/* Make sure we are not getting garbage. */
if ((ecoal->rx_coalesce_usecs == 0 &&
ecoal->rx_max_coalesced_frames == 0) ||
(ecoal->tx_coalesce_usecs == 0 &&
ecoal->tx_max_coalesced_frames == 0) ||
ecoal->stats_block_coalesce_usecs == 0)
return -EINVAL;
if (ecoal->use_adaptive_rx_coalesce ||
ecoal->use_adaptive_tx_coalesce) {
if (ecoal->pkt_rate_low > ecoal->pkt_rate_high)
return -EINVAL;
if (ecoal->rate_sample_interval == 0)
return -EINVAL;
if (ecoal->use_adaptive_rx_coalesce &&
((ecoal->rx_coalesce_usecs_low == 0 &&
ecoal->rx_max_coalesced_frames_low == 0) ||
(ecoal->rx_coalesce_usecs_high == 0 &&
ecoal->rx_max_coalesced_frames_high == 0)))
return -EINVAL;
if (ecoal->use_adaptive_tx_coalesce &&
((ecoal->tx_coalesce_usecs_low == 0 &&
ecoal->tx_max_coalesced_frames_low == 0) ||
(ecoal->tx_coalesce_usecs_high == 0 &&
ecoal->tx_max_coalesced_frames_high == 0)))
return -EINVAL;
}
/* Looks good, let it rip. */
spin_lock_irq(&tp->lock);
tp->coalesce_config.rx_coalesce_ticks =
tp->coalesce_config.rx_coalesce_ticks_def =
ecoal->rx_coalesce_usecs;
tp->coalesce_config.rx_max_coalesced_frames =
tp->coalesce_config.rx_max_coalesced_frames_def =
ecoal->rx_max_coalesced_frames;
tp->coalesce_config.rx_coalesce_ticks_during_int =
tp->coalesce_config.rx_coalesce_ticks_during_int_def =
ecoal->rx_coalesce_usecs_irq;
tp->coalesce_config.rx_max_coalesced_frames_during_int =
tp->coalesce_config.rx_max_coalesced_frames_during_int_def =
ecoal->rx_max_coalesced_frames_irq;
tp->coalesce_config.tx_coalesce_ticks =
tp->coalesce_config.tx_coalesce_ticks_def =
ecoal->tx_coalesce_usecs;
tp->coalesce_config.tx_max_coalesced_frames =
tp->coalesce_config.tx_max_coalesced_frames_def =
ecoal->tx_max_coalesced_frames;
tp->coalesce_config.tx_coalesce_ticks_during_int =
tp->coalesce_config.tx_coalesce_ticks_during_int_def =
ecoal->tx_coalesce_usecs_irq;
tp->coalesce_config.tx_max_coalesced_frames_during_int =
tp->coalesce_config.tx_max_coalesced_frames_during_int_def =
ecoal->tx_max_coalesced_frames_irq;
tp->coalesce_config.stats_coalesce_ticks =
tp->coalesce_config.stats_coalesce_ticks_def =
ecoal->stats_block_coalesce_usecs;
if (ecoal->use_adaptive_rx_coalesce)
tp->tg3_flags |= TG3_FLAG_ADAPTIVE_RX;
else
tp->tg3_flags &= ~TG3_FLAG_ADAPTIVE_RX;
if (ecoal->use_adaptive_tx_coalesce)
tp->tg3_flags |= TG3_FLAG_ADAPTIVE_TX;
else
tp->tg3_flags &= ~TG3_FLAG_ADAPTIVE_TX;
tp->coalesce_config.pkt_rate_low = ecoal->pkt_rate_low;
tp->coalesce_config.pkt_rate_high = ecoal->pkt_rate_high;
tp->coalesce_config.rate_sample_jiffies =
ecoal->rate_sample_interval * HZ;
tp->coalesce_config.rx_coalesce_ticks_low =
ecoal->rx_coalesce_usecs_low;
tp->coalesce_config.rx_max_coalesced_frames_low =
ecoal->rx_max_coalesced_frames_low;
tp->coalesce_config.tx_coalesce_ticks_low =
ecoal->tx_coalesce_usecs_low;
tp->coalesce_config.tx_max_coalesced_frames_low =
ecoal->tx_max_coalesced_frames_low;
tp->coalesce_config.rx_coalesce_ticks_high =
ecoal->rx_coalesce_usecs_high;
tp->coalesce_config.rx_max_coalesced_frames_high =
ecoal->rx_max_coalesced_frames_high;
tp->coalesce_config.tx_coalesce_ticks_high =
ecoal->tx_coalesce_usecs_high;
tp->coalesce_config.tx_max_coalesced_frames_high =
ecoal->tx_max_coalesced_frames_high;
tw32(HOSTCC_RXCOL_TICKS,
tp->coalesce_config.rx_coalesce_ticks_def);
tw32(HOSTCC_RXMAX_FRAMES,
tp->coalesce_config.rx_max_coalesced_frames_def);
tw32(HOSTCC_RXCOAL_TICK_INT,
tp->coalesce_config.rx_coalesce_ticks_during_int_def);
tw32(HOSTCC_RXCOAL_MAXF_INT,
tp->coalesce_config.rx_max_coalesced_frames_during_int_def);
tw32(HOSTCC_TXCOL_TICKS,
tp->coalesce_config.tx_coalesce_ticks_def);
tw32(HOSTCC_TXMAX_FRAMES,
tp->coalesce_config.tx_max_coalesced_frames_def);
tw32(HOSTCC_TXCOAL_TICK_INT,
tp->coalesce_config.tx_coalesce_ticks_during_int_def);
tw32(HOSTCC_TXCOAL_MAXF_INT,
tp->coalesce_config.tx_max_coalesced_frames_during_int_def);
tw32(HOSTCC_STAT_COAL_TICKS,
tp->coalesce_config.stats_coalesce_ticks_def);
spin_unlock_irq(&tp->lock);
return 0;
}
static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr) static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
{ {
struct tg3 *tp = dev->priv; struct tg3 *tp = dev->priv;
...@@ -4708,8 +4429,8 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr) ...@@ -4708,8 +4429,8 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
cmd.phy_address = PHY_ADDR; cmd.phy_address = PHY_ADDR;
cmd.transceiver = 0; cmd.transceiver = 0;
cmd.autoneg = tp->link_config.autoneg; cmd.autoneg = tp->link_config.autoneg;
cmd.maxtxpkt = tp->coalesce_config.tx_max_coalesced_frames_def; cmd.maxtxpkt = 0;
cmd.maxrxpkt = tp->coalesce_config.rx_max_coalesced_frames_def; cmd.maxrxpkt = 0;
if (copy_to_user(useraddr, &cmd, sizeof(cmd))) if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -4761,22 +4482,6 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr) ...@@ -4761,22 +4482,6 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
tp->link_config.duplex = cmd.duplex; tp->link_config.duplex = cmd.duplex;
} }
if (cmd.maxtxpkt || cmd.maxrxpkt) {
tp->coalesce_config.tx_max_coalesced_frames_def =
tp->coalesce_config.tx_max_coalesced_frames =
cmd.maxtxpkt;
tp->coalesce_config.rx_max_coalesced_frames_def =
tp->coalesce_config.rx_max_coalesced_frames =
cmd.maxrxpkt;
/* Coalescing config bits can be updated without
* a full chip reset.
*/
tw32(HOSTCC_TXMAX_FRAMES,
tp->coalesce_config.tx_max_coalesced_frames);
tw32(HOSTCC_RXMAX_FRAMES,
tp->coalesce_config.rx_max_coalesced_frames);
}
tg3_setup_phy(tp); tg3_setup_phy(tp);
spin_unlock_irq(&tp->lock); spin_unlock_irq(&tp->lock);
...@@ -4873,22 +4578,6 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr) ...@@ -4873,22 +4578,6 @@ static int tg3_ethtool_ioctl (struct net_device *dev, void *useraddr)
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
case ETHTOOL_GCOALESCE: {
struct ethtool_coalesce ecoal = { ETHTOOL_GCOALESCE };
tg3_to_ethtool_coal(tp, &ecoal);
if (copy_to_user(useraddr, &ecoal, sizeof(ecoal)))
return -EFAULT;
return 0;
}
case ETHTOOL_SCOALESCE: {
struct ethtool_coalesce ecoal;
if (copy_from_user(&ecoal, useraddr, sizeof(ecoal)))
return -EINVAL;
return tg3_from_ethtool_coal(tp, &ecoal);
}
case ETHTOOL_GRINGPARAM: { case ETHTOOL_GRINGPARAM: {
struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM }; struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
...@@ -5641,42 +5330,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) ...@@ -5641,42 +5330,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB; tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
} }
/* Only 5701 and later support tagged irq status mode. */ /* Only 5701 and later support tagged irq status mode.
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) { *
tp->tg3_flags |= TG3_FLAG_TAGGED_IRQ_STATUS; * However, since we are using NAPI avoid tagged irq status
tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS; * because the interrupt condition is more difficult to
* fully clear in that mode.
/* ??? Due to a glitch Broadcom's driver ALWAYS sets */
* ??? these bits in coalesce_mode. Because MM_GetConfig tp->coalesce_mode = 0;
* ??? always sets pDevice->UseTaggedStatus correctly
* ??? the following test at tigon3.c:LM_GetAdapterInfo()
* ???
* ??? pDevice->UseTaggedStatus &&
* ??? (pDevice->ChipRevId == T3_CHIP_ID_5700_C0 ||
* ??? T3_CHIP_REV(pDevice->ChipRevId) == T3_CHIP_REV_5700_AX ||
* ??? T3_CHIP_REV(pDevice->ChipRevId) == T3_CHIP_REV_5700_BX)
* ???
* ??? will never pass and thus pDevice->CoalesceMode will never
* ??? get set to zero. For now I'll mirror what I believe is
* ??? the intention of their driver.
* ???
* ??? Update: This is fixed in Broadcom's 2.2.3 and later
* ??? drivers. All the current 2.0.x drivers still
* ??? have the bug.
*/
tp->coalesce_mode = (HOSTCC_MODE_CLRTICK_RXBD |
HOSTCC_MODE_CLRTICK_TXBD);
} else {
tp->coalesce_mode = 0;
/* If not using tagged status, set the *_during_int
* coalesce default config values to zero.
*/
tp->coalesce_config.rx_coalesce_ticks_during_int_def = 0;
tp->coalesce_config.rx_max_coalesced_frames_during_int_def = 0;
tp->coalesce_config.tx_coalesce_ticks_during_int_def = 0;
tp->coalesce_config.tx_max_coalesced_frames_during_int_def = 0;
}
if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
...@@ -6121,61 +5781,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) ...@@ -6121,61 +5781,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
tp->link_config.orig_autoneg = AUTONEG_INVALID; tp->link_config.orig_autoneg = AUTONEG_INVALID;
} }
static void __devinit tg3_init_coalesce_config(struct tg3 *tp)
{
tp->coalesce_config.rx_coalesce_ticks_def = DEFAULT_RXCOL_TICKS;
tp->coalesce_config.rx_max_coalesced_frames_def = DEFAULT_RXMAX_FRAMES;
tp->coalesce_config.rx_coalesce_ticks_during_int_def =
DEFAULT_RXCOAL_TICK_INT;
tp->coalesce_config.rx_max_coalesced_frames_during_int_def =
DEFAULT_RXCOAL_MAXF_INT;
tp->coalesce_config.tx_coalesce_ticks_def = DEFAULT_TXCOL_TICKS;
tp->coalesce_config.tx_max_coalesced_frames_def = DEFAULT_TXMAX_FRAMES;
tp->coalesce_config.tx_coalesce_ticks_during_int_def =
DEFAULT_TXCOAL_TICK_INT;
tp->coalesce_config.tx_max_coalesced_frames_during_int_def =
DEFAULT_TXCOAL_MAXF_INT;
tp->coalesce_config.stats_coalesce_ticks_def =
DEFAULT_STAT_COAL_TICKS;
tp->coalesce_config.rx_coalesce_ticks_low =
LOW_RXCOL_TICKS;
tp->coalesce_config.rx_max_coalesced_frames_low =
LOW_RXMAX_FRAMES;
tp->coalesce_config.tx_coalesce_ticks_low =
LOW_TXCOL_TICKS;
tp->coalesce_config.tx_max_coalesced_frames_low =
LOW_TXMAX_FRAMES;
tp->coalesce_config.rx_coalesce_ticks_high =
HIGH_RXCOL_TICKS;
tp->coalesce_config.rx_max_coalesced_frames_high =
HIGH_RXMAX_FRAMES;
tp->coalesce_config.tx_coalesce_ticks_high =
HIGH_TXCOL_TICKS;
tp->coalesce_config.tx_max_coalesced_frames_high =
HIGH_TXMAX_FRAMES;
/* Active == default */
tp->coalesce_config.rx_coalesce_ticks =
tp->coalesce_config.rx_coalesce_ticks_def;
tp->coalesce_config.rx_max_coalesced_frames =
tp->coalesce_config.rx_max_coalesced_frames_def;
tp->coalesce_config.tx_coalesce_ticks =
tp->coalesce_config.tx_coalesce_ticks_def;
tp->coalesce_config.tx_max_coalesced_frames =
tp->coalesce_config.tx_max_coalesced_frames_def;
tp->coalesce_config.stats_coalesce_ticks =
tp->coalesce_config.stats_coalesce_ticks_def;
tp->coalesce_config.rate_sample_jiffies = (1 * HZ);
tp->coalesce_config.pkt_rate_low = 22000;
tp->coalesce_config.pkt_rate_high = 61000;
tp->tg3_flags |= TG3_FLAG_ADAPTIVE_RX;
tp->tg3_flags &= ~(TG3_FLAG_ADAPTIVE_TX);
}
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{ {
tp->bufmgr_config.mbuf_read_dma_low_water = tp->bufmgr_config.mbuf_read_dma_low_water =
...@@ -6333,8 +5938,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -6333,8 +5938,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_link_config(tp); tg3_init_link_config(tp);
tg3_init_coalesce_config(tp);
tg3_init_bufmgr_config(tp); tg3_init_bufmgr_config(tp);
tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_pending = TG3_DEF_RX_RING_PENDING;
...@@ -6351,6 +5954,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, ...@@ -6351,6 +5954,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
dev->set_mac_address = tg3_set_mac_addr; dev->set_mac_address = tg3_set_mac_addr;
dev->do_ioctl = tg3_ioctl; dev->do_ioctl = tg3_ioctl;
dev->tx_timeout = tg3_tx_timeout; dev->tx_timeout = tg3_tx_timeout;
dev->poll = tg3_poll;
dev->weight = 64;
dev->watchdog_timeo = TG3_TX_TIMEOUT; dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->change_mtu = tg3_change_mtu; dev->change_mtu = tg3_change_mtu;
dev->irq = pdev->irq; dev->irq = pdev->irq;
......
...@@ -1687,45 +1687,6 @@ struct tg3_link_config { ...@@ -1687,45 +1687,6 @@ struct tg3_link_config {
u8 orig_autoneg; u8 orig_autoneg;
}; };
struct tg3_coalesce_config {
/* Current settings. */
u32 rx_coalesce_ticks;
u32 rx_max_coalesced_frames;
u32 rx_coalesce_ticks_during_int;
u32 rx_max_coalesced_frames_during_int;
u32 tx_coalesce_ticks;
u32 tx_max_coalesced_frames;
u32 tx_coalesce_ticks_during_int;
u32 tx_max_coalesced_frames_during_int;
u32 stats_coalesce_ticks;
/* Default settings. */
u32 rx_coalesce_ticks_def;
u32 rx_max_coalesced_frames_def;
u32 rx_coalesce_ticks_during_int_def;
u32 rx_max_coalesced_frames_during_int_def;
u32 tx_coalesce_ticks_def;
u32 tx_max_coalesced_frames_def;
u32 tx_coalesce_ticks_during_int_def;
u32 tx_max_coalesced_frames_during_int_def;
u32 stats_coalesce_ticks_def;
/* Adaptive RX/TX coalescing parameters. */
u32 rate_sample_jiffies;
u32 pkt_rate_low;
u32 pkt_rate_high;
u32 rx_coalesce_ticks_low;
u32 rx_max_coalesced_frames_low;
u32 tx_coalesce_ticks_low;
u32 tx_max_coalesced_frames_low;
u32 rx_coalesce_ticks_high;
u32 rx_max_coalesced_frames_high;
u32 tx_coalesce_ticks_high;
u32 tx_max_coalesced_frames_high;
};
struct tg3_bufmgr_config { struct tg3_bufmgr_config {
u32 mbuf_read_dma_low_water; u32 mbuf_read_dma_low_water;
u32 mbuf_mac_rx_low_water; u32 mbuf_mac_rx_low_water;
...@@ -1772,8 +1733,7 @@ struct tg3 { ...@@ -1772,8 +1733,7 @@ struct tg3 {
#define TG3_FLAG_POLL_SERDES 0x00000080 #define TG3_FLAG_POLL_SERDES 0x00000080
#define TG3_FLAG_PHY_RESET_ON_INIT 0x00000100 #define TG3_FLAG_PHY_RESET_ON_INIT 0x00000100
#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200 #define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
#define TG3_FLAG_TAGGED_IRQ_STATUS 0x00000400 #define TG3_FLAG_WOL_SPEED_100MB 0x00000400
#define TG3_FLAG_WOL_SPEED_100MB 0x00000800
#define TG3_FLAG_WOL_ENABLE 0x00001000 #define TG3_FLAG_WOL_ENABLE 0x00001000
#define TG3_FLAG_NVRAM 0x00002000 #define TG3_FLAG_NVRAM 0x00002000
#define TG3_FLAG_NVRAM_BUFFERED 0x00004000 #define TG3_FLAG_NVRAM_BUFFERED 0x00004000
...@@ -1802,7 +1762,6 @@ struct tg3 { ...@@ -1802,7 +1762,6 @@ struct tg3 {
u32 timer_offset; u32 timer_offset;
struct tg3_link_config link_config; struct tg3_link_config link_config;
struct tg3_coalesce_config coalesce_config;
struct tg3_bufmgr_config bufmgr_config; struct tg3_bufmgr_config bufmgr_config;
u32 rx_pending; u32 rx_pending;
......
...@@ -141,9 +141,10 @@ struct vlan_skb_tx_cookie { ...@@ -141,9 +141,10 @@ struct vlan_skb_tx_cookie {
(VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC) (VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
#define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag) #define vlan_tx_tag_get(__skb) (VLAN_TX_SKB_CB(__skb)->vlan_tag)
/* VLAN rx hw acceleration helper. This acts like netif_rx(). */ /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
unsigned short vlan_tag) struct vlan_group *grp,
unsigned short vlan_tag, int polling)
{ {
struct net_device_stats *stats; struct net_device_stats *stats;
...@@ -182,9 +183,22 @@ static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, ...@@ -182,9 +183,22 @@ static inline int vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
break; break;
}; };
return netif_rx(skb); return (polling ? netif_receive_skb(skb) : netif_rx(skb));
} }
static inline int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
unsigned short vlan_tag)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tag, 0);
}
static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
struct vlan_group *grp,
unsigned short vlan_tag)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tag, 1);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* VLAN IOCTLs are found in sockios.h */ /* VLAN IOCTLs are found in sockios.h */
......
...@@ -110,6 +110,8 @@ extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int ...@@ -110,6 +110,8 @@ extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int
extern void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, extern void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, int allocation); __u32 group, int allocation);
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
/* /*
* skb should fit one page. This choice is good for headerless malloc. * skb should fit one page. This choice is good for headerless malloc.
...@@ -129,6 +131,12 @@ struct netlink_callback ...@@ -129,6 +131,12 @@ struct netlink_callback
long args[4]; long args[4];
}; };
struct netlink_notify
{
int pid;
int protocol;
};
static __inline__ struct nlmsghdr * static __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len) __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
{ {
......
...@@ -58,5 +58,7 @@ extern int notifier_call_chain(struct notifier_block **n, unsigned long val, voi ...@@ -58,5 +58,7 @@ extern int notifier_call_chain(struct notifier_block **n, unsigned long val, voi
#define SYS_HALT 0x0002 /* Notify of system halt */ #define SYS_HALT 0x0002 /* Notify of system halt */
#define SYS_POWER_OFF 0x0003 /* Notify of system power off */ #define SYS_POWER_OFF 0x0003 /* Notify of system power off */
#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */ #endif /* _LINUX_NOTIFIER_H */
...@@ -51,29 +51,4 @@ static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask, ...@@ -51,29 +51,4 @@ static inline void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
} }
#if 0 /* put this later into asm-i386 or such ... */
static inline void ip_change_dsfield(struct iphdr *iph,__u16 dsfield)
{
__u16 check;
__asm__ __volatile__("
movw 10(%1),%0
xchg %b0,%h0
addb 1(%1),%b0
adcb $0,%h0
adcw $1,%0
cmc
sbbw %2,%0
sbbw $0,%0
movb %b2,1(%1)
xchg %b0,%h0
movw %0,10(%1)"
: "=&r" (check)
: "r" (iph), "r" (dsfield)
: "cc");
}
#endif
#endif #endif
...@@ -29,7 +29,9 @@ ...@@ -29,7 +29,9 @@
* 4) All operations modify state, so a spinlock is used. * 4) All operations modify state, so a spinlock is used.
*/ */
static struct dst_entry *dst_garbage_list; static struct dst_entry *dst_garbage_list;
#if RT_CACHE_DEBUG >= 2
static atomic_t dst_total = ATOMIC_INIT(0); static atomic_t dst_total = ATOMIC_INIT(0);
#endif
static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED; static spinlock_t dst_lock = SPIN_LOCK_UNLOCKED;
static unsigned long dst_gc_timer_expires; static unsigned long dst_gc_timer_expires;
...@@ -108,7 +110,9 @@ void * dst_alloc(struct dst_ops * ops) ...@@ -108,7 +110,9 @@ void * dst_alloc(struct dst_ops * ops)
dst->lastuse = jiffies; dst->lastuse = jiffies;
dst->input = dst_discard; dst->input = dst_discard;
dst->output = dst_blackhole; dst->output = dst_blackhole;
#if RT_CACHE_DEBUG >= 2
atomic_inc(&dst_total); atomic_inc(&dst_total);
#endif
atomic_inc(&ops->entries); atomic_inc(&ops->entries);
return dst; return dst;
} }
...@@ -158,7 +162,9 @@ void dst_destroy(struct dst_entry * dst) ...@@ -158,7 +162,9 @@ void dst_destroy(struct dst_entry * dst)
dst->ops->destroy(dst); dst->ops->destroy(dst);
if (dst->dev) if (dst->dev)
dev_put(dst->dev); dev_put(dst->dev);
#if RT_CACHE_DEBUG >= 2
atomic_dec(&dst_total); atomic_dec(&dst_total);
#endif
kmem_cache_free(dst->ops->kmem_cachep, dst); kmem_cache_free(dst->ops->kmem_cachep, dst);
} }
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/notifier.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/scm.h> #include <net/scm.h>
...@@ -81,6 +82,8 @@ atomic_t netlink_sock_nr; ...@@ -81,6 +82,8 @@ atomic_t netlink_sock_nr;
static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED; static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
static atomic_t nl_table_users = ATOMIC_INIT(0); static atomic_t nl_table_users = ATOMIC_INIT(0);
static struct notifier_block *netlink_chain;
static void netlink_sock_destruct(struct sock *sk) static void netlink_sock_destruct(struct sock *sk)
{ {
skb_queue_purge(&sk->receive_queue); skb_queue_purge(&sk->receive_queue);
...@@ -276,6 +279,12 @@ static int netlink_release(struct socket *sock) ...@@ -276,6 +279,12 @@ static int netlink_release(struct socket *sock)
skb_queue_purge(&sk->write_queue); skb_queue_purge(&sk->write_queue);
if (nlk->pid && !nlk->groups) {
struct netlink_notify n = { protocol:sk->protocol,
pid:nlk->pid };
notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
}
sock_put(sk); sock_put(sk);
return 0; return 0;
} }
...@@ -967,6 +976,16 @@ static int netlink_read_proc(char *buffer, char **start, off_t offset, ...@@ -967,6 +976,16 @@ static int netlink_read_proc(char *buffer, char **start, off_t offset,
} }
#endif #endif
int netlink_register_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&netlink_chain, nb);
}
int netlink_unregister_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&netlink_chain, nb);
}
struct proto_ops netlink_ops = { struct proto_ops netlink_ops = {
family: PF_NETLINK, family: PF_NETLINK,
......
...@@ -402,6 +402,8 @@ EXPORT_SYMBOL(netlink_unicast); ...@@ -402,6 +402,8 @@ EXPORT_SYMBOL(netlink_unicast);
EXPORT_SYMBOL(netlink_kernel_create); EXPORT_SYMBOL(netlink_kernel_create);
EXPORT_SYMBOL(netlink_dump_start); EXPORT_SYMBOL(netlink_dump_start);
EXPORT_SYMBOL(netlink_ack); EXPORT_SYMBOL(netlink_ack);
EXPORT_SYMBOL(netlink_register_notifier);
EXPORT_SYMBOL(netlink_unregister_notifier);
#if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE) #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
EXPORT_SYMBOL(netlink_attach); EXPORT_SYMBOL(netlink_attach);
EXPORT_SYMBOL(netlink_detach); EXPORT_SYMBOL(netlink_detach);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment