Commit 205c911d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
  sis900 warning fixes
  mv643xx_eth: Place explicit port number in mv643xx_eth_platform_data
  pcnet32: Fix PCnet32 performance bug on non-coherent architecutres
  __devinit & __devexit cleanups for de2104x driver
  3c59x: Handle pci_enable_device() failure while resuming
  dmfe: Fix link detection
  dmfe: fix two bugs
  dmfe: trivial/spelling fixes
  revert "drivers/net/tulip/dmfe: support basic carrier detection"
  ucc_geth: returns NETDEV_TX_BUSY when BD ring is full
  ucc_geth: Fix BD processing
  natsemi: netpoll fixes
  bonding: Improve IGMP join processing
  bonding: only receive ARPs for us
  bonding: fix double dev_add_pack
parents c7276fde f3be9742
......@@ -48,6 +48,8 @@ static struct resource mv64x60_eth0_resources[] = {
};
static struct mv643xx_eth_platform_data eth0_pd = {
.port_number = 0,
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
......@@ -77,6 +79,8 @@ static struct resource mv64x60_eth1_resources[] = {
};
static struct mv643xx_eth_platform_data eth1_pd = {
.port_number = 1,
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
......@@ -105,7 +109,9 @@ static struct resource mv64x60_eth2_resources[] = {
},
};
static struct mv643xx_eth_platform_data eth2_pd;
static struct mv643xx_eth_platform_data eth2_pd = {
.port_number = 2,
};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
......
......@@ -48,6 +48,8 @@ static struct resource mv64x60_eth0_resources[] = {
};
static struct mv643xx_eth_platform_data eth0_pd = {
.port_number = 0,
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
......@@ -77,6 +79,8 @@ static struct resource mv64x60_eth1_resources[] = {
};
static struct mv643xx_eth_platform_data eth1_pd = {
.port_number = 1,
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
......@@ -105,7 +109,9 @@ static struct resource mv64x60_eth2_resources[] = {
},
};
static struct mv643xx_eth_platform_data eth2_pd;
static struct mv643xx_eth_platform_data eth2_pd = {
.port_number = 2,
};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
......
......@@ -47,6 +47,8 @@ static struct resource mv64x60_eth0_resources[] = {
};
static struct mv643xx_eth_platform_data eth0_pd = {
.port_number = 0,
.tx_sram_addr = MV_SRAM_BASE_ETH0,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
......@@ -76,6 +78,8 @@ static struct resource mv64x60_eth1_resources[] = {
};
static struct mv643xx_eth_platform_data eth1_pd = {
.port_number = 1,
.tx_sram_addr = MV_SRAM_BASE_ETH1,
.tx_sram_size = MV_SRAM_TXRING_SIZE,
.tx_queue_size = MV_SRAM_TXRING_SIZE / 16,
......
......@@ -58,6 +58,7 @@ static struct resource mv643xx_eth0_resources[] = {
static struct mv643xx_eth_platform_data eth0_pd = {
.port_number = 0,
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
......@@ -87,6 +88,7 @@ static struct resource mv643xx_eth1_resources[] = {
};
static struct mv643xx_eth_platform_data eth1_pd = {
.port_number = 1,
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
......
......@@ -339,7 +339,9 @@ static struct resource mv64x60_eth0_resources[] = {
},
};
static struct mv643xx_eth_platform_data eth0_pd;
static struct mv643xx_eth_platform_data eth0_pd = {
.port_number = 0,
};
static struct platform_device eth0_device = {
.name = MV643XX_ETH_NAME,
......@@ -362,7 +364,9 @@ static struct resource mv64x60_eth1_resources[] = {
},
};
static struct mv643xx_eth_platform_data eth1_pd;
static struct mv643xx_eth_platform_data eth1_pd = {
.port_number = 1,
};
static struct platform_device eth1_device = {
.name = MV643XX_ETH_NAME,
......@@ -385,7 +389,9 @@ static struct resource mv64x60_eth2_resources[] = {
},
};
static struct mv643xx_eth_platform_data eth2_pd;
static struct mv643xx_eth_platform_data eth2_pd = {
.port_number = 2,
};
static struct platform_device eth2_device = {
.name = MV643XX_ETH_NAME,
......
......@@ -822,11 +822,17 @@ static int vortex_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct vortex_private *vp = netdev_priv(dev);
int err;
if (dev && vp) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_device(pdev);
err = pci_enable_device(pdev);
if (err) {
printk(KERN_WARNING "%s: Could not enable device \n",
dev->name);
return err;
}
pci_set_master(pdev);
if (request_irq(dev->irq, vp->full_bus_master_rx ?
&boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev)) {
......
......@@ -60,6 +60,7 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
......@@ -861,6 +862,28 @@ static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
}
}
/*
* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
* slave.
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
struct in_device *in_dev;
struct ip_mc_list *im;
rcu_read_lock();
in_dev = __in_dev_get_rcu(bond->dev);
if (in_dev) {
for (im = in_dev->mc_list; im; im = im->next) {
ip_mc_rejoin_group(im);
}
}
rcu_read_unlock();
}
/*
* Totally destroys the mc_list in bond
*/
......@@ -874,6 +897,7 @@ static void bond_mc_list_destroy(struct bonding *bond)
kfree(dmi);
dmi = bond->mc_list;
}
bond->mc_list = NULL;
}
/*
......@@ -967,6 +991,7 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct
for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add(new_active->dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
bond_resend_igmp_join_requests(bond);
}
}
......@@ -3423,15 +3448,21 @@ void bond_register_arp(struct bonding *bond)
{
struct packet_type *pt = &bond->arp_mon_pt;
if (pt->type)
return;
pt->type = htons(ETH_P_ARP);
pt->dev = NULL; /*bond->dev;XXX*/
pt->dev = bond->dev;
pt->func = bond_arp_rcv;
dev_add_pack(pt);
}
void bond_unregister_arp(struct bonding *bond)
{
dev_remove_pack(&bond->arp_mon_pt);
struct packet_type *pt = &bond->arp_mon_pt;
dev_remove_pack(pt);
pt->type = 0;
}
/*---------------------------- Hashing Policies -----------------------------*/
......@@ -4011,42 +4042,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
return 0;
}
static void bond_activebackup_xmit_copy(struct sk_buff *skb,
struct bonding *bond,
struct slave *slave)
{
struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
struct ethhdr *eth_data;
u8 *hwaddr;
int res;
if (!skb2) {
printk(KERN_ERR DRV_NAME ": Error: "
"bond_activebackup_xmit_copy(): skb_copy() failed\n");
return;
}
skb2->mac.raw = (unsigned char *)skb2->data;
eth_data = eth_hdr(skb2);
/* Pick an appropriate source MAC address
* -- use slave's perm MAC addr, unless used by bond
* -- otherwise, borrow active slave's perm MAC addr
* since that will not be used
*/
hwaddr = slave->perm_hwaddr;
if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
hwaddr = bond->curr_active_slave->perm_hwaddr;
/* Set source MAC address appropriately */
memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
res = bond_dev_queue_xmit(bond, skb2, slave->dev);
if (res)
dev_kfree_skb(skb2);
return;
}
/*
* in active-backup mode, we know that bond->curr_active_slave is always valid if
......@@ -4067,21 +4062,6 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
if (!bond->curr_active_slave)
goto out;
/* Xmit IGMP frames on all slaves to ensure rapid fail-over
for multicast traffic on snooping switches */
if (skb->protocol == __constant_htons(ETH_P_IP) &&
skb->nh.iph->protocol == IPPROTO_IGMP) {
struct slave *slave, *active_slave;
int i;
active_slave = bond->curr_active_slave;
bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
active_slave->prev)
if (IS_UP(slave->dev) &&
(slave->link == BOND_LINK_UP))
bond_activebackup_xmit_copy(skb, bond, slave);
}
res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
out:
......
......@@ -1309,7 +1309,7 @@ static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
static int mv643xx_eth_probe(struct platform_device *pdev)
{
struct mv643xx_eth_platform_data *pd;
int port_num = pdev->id;
int port_num;
struct mv643xx_private *mp;
struct net_device *dev;
u8 *p;
......@@ -1319,6 +1319,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
int duplex = DUPLEX_HALF;
int speed = 0; /* default to auto-negotiation */
pd = pdev->dev.platform_data;
if (pd == NULL) {
printk(KERN_ERR "No mv643xx_eth_platform_data\n");
return -ENODEV;
}
dev = alloc_etherdev(sizeof(struct mv643xx_private));
if (!dev)
return -ENOMEM;
......@@ -1331,8 +1337,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
BUG_ON(!res);
dev->irq = res->start;
mp->port_num = port_num;
dev->open = mv643xx_eth_open;
dev->stop = mv643xx_eth_stop;
dev->hard_start_xmit = mv643xx_eth_start_xmit;
......@@ -1373,39 +1377,40 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
spin_lock_init(&mp->lock);
port_num = pd->port_number;
/* set default config values */
eth_port_uc_addr_get(dev, dev->dev_addr);
mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
pd = pdev->dev.platform_data;
if (pd) {
if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, 6);
if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, 6);
if (pd->phy_addr || pd->force_phy_addr)
ethernet_phy_set(port_num, pd->phy_addr);
if (pd->phy_addr || pd->force_phy_addr)
ethernet_phy_set(port_num, pd->phy_addr);
if (pd->rx_queue_size)
mp->rx_ring_size = pd->rx_queue_size;
if (pd->rx_queue_size)
mp->rx_ring_size = pd->rx_queue_size;
if (pd->tx_queue_size)
mp->tx_ring_size = pd->tx_queue_size;
if (pd->tx_queue_size)
mp->tx_ring_size = pd->tx_queue_size;
if (pd->tx_sram_size) {
mp->tx_sram_size = pd->tx_sram_size;
mp->tx_sram_addr = pd->tx_sram_addr;
}
if (pd->rx_sram_size) {
mp->rx_sram_size = pd->rx_sram_size;
mp->rx_sram_addr = pd->rx_sram_addr;
}
if (pd->tx_sram_size) {
mp->tx_sram_size = pd->tx_sram_size;
mp->tx_sram_addr = pd->tx_sram_addr;
}
duplex = pd->duplex;
speed = pd->speed;
if (pd->rx_sram_size) {
mp->rx_sram_size = pd->rx_sram_size;
mp->rx_sram_addr = pd->rx_sram_addr;
}
duplex = pd->duplex;
speed = pd->speed;
mp->port_num = port_num;
/* Hook up MII support for ethtool */
mp->mii.dev = dev;
mp->mii.mdio_read = mv643xx_mdio_read;
......
......@@ -2024,6 +2024,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem * ioaddr = ns_ioaddr(dev);
unsigned entry;
unsigned long flags;
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
......@@ -2037,7 +2038,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
spin_lock_irq(&np->lock);
spin_lock_irqsave(&np->lock, flags);
if (!np->hands_off) {
np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
......@@ -2056,7 +2057,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_irq(skb);
np->stats.tx_dropped++;
}
spin_unlock_irq(&np->lock);
spin_unlock_irqrestore(&np->lock, flags);
dev->trans_start = jiffies;
......@@ -2222,6 +2223,8 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
pkt_len = (desc_status & DescSizeMask) - 4;
if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
if (desc_status & DescMore) {
unsigned long flags;
if (netif_msg_rx_err(np))
printk(KERN_WARNING
"%s: Oversized(?) Ethernet "
......@@ -2236,12 +2239,12 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
* reset procedure documented in
* AN-1287. */
spin_lock_irq(&np->lock);
spin_lock_irqsave(&np->lock, flags);
reset_rx(dev);
reinit_rx(dev);
writel(np->ring_dma, ioaddr + RxRingPtr);
check_link(dev);
spin_unlock_irq(&np->lock);
spin_unlock_irqrestore(&np->lock, flags);
/* We'll enable RX on exit from this
* function. */
......@@ -2396,8 +2399,19 @@ static struct net_device_stats *get_stats(struct net_device *dev)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void natsemi_poll_controller(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
disable_irq(dev->irq);
intr_handler(dev->irq, dev);
/*
* A real interrupt might have already reached us at this point
* but NAPI might still haven't called us back. As the interrupt
* status register is cleared by reading, we should prevent an
* interrupt loss in this case...
*/
if (!np->intr_status)
intr_handler(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
......
......@@ -1234,14 +1234,14 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb_put(skb, pkt_len); /* Make room */
pci_dma_sync_single_for_cpu(lp->pci_dev,
lp->rx_dma_addr[entry],
PKT_BUF_SZ - 2,
pkt_len,
PCI_DMA_FROMDEVICE);
eth_copy_and_sum(skb,
(unsigned char *)(lp->rx_skbuff[entry]->data),
pkt_len, 0);
pci_dma_sync_single_for_device(lp->pci_dev,
lp->rx_dma_addr[entry],
PKT_BUF_SZ - 2,
pkt_len,
PCI_DMA_FROMDEVICE);
}
lp->stats.rx_bytes += skb->len;
......
......@@ -968,10 +968,10 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
{
int i = 0;
int i;
u16 status;
while (i++ < 2)
for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
......@@ -1430,7 +1430,7 @@ static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
int i = 0;
u32 status;
while (i++ < 2)
for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
if (!(status & MII_STAT_LINK)){
......@@ -1466,9 +1466,9 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
int phy_addr = sis_priv->cur_phy;
u32 status;
u16 autoadv, autorec;
int i = 0;
int i;
while (i++ < 2)
for (i = 0; i < 2; i++)
status = mdio_read(net_dev, phy_addr, MII_STATUS);
if (!(status & MII_STAT_LINK))
......
......@@ -1685,7 +1685,7 @@ static const struct ethtool_ops de_ethtool_ops = {
.get_regs = de_get_regs,
};
static void __init de21040_get_mac_address (struct de_private *de)
static void __devinit de21040_get_mac_address (struct de_private *de)
{
unsigned i;
......@@ -1703,7 +1703,7 @@ static void __init de21040_get_mac_address (struct de_private *de)
}
}
static void __init de21040_get_media_info(struct de_private *de)
static void __devinit de21040_get_media_info(struct de_private *de)
{
unsigned int i;
......@@ -1765,7 +1765,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in
return retval;
}
static void __init de21041_get_srom_info (struct de_private *de)
static void __devinit de21041_get_srom_info (struct de_private *de)
{
unsigned i, sa_offset = 0, ofs;
u8 ee_data[DE_EEPROM_SIZE + 6] = {};
......
This diff is collapsed.
......@@ -3598,17 +3598,20 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Move to next BD in the ring */
if (!(bd_status & T_W))
ugeth->txBd[txQ] = bd + sizeof(struct qe_bd);
bd += sizeof(struct qe_bd);
else
ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
bd = ugeth->p_tx_bd_ring[txQ];
/* If the next BD still needs to be cleaned up, then the bds
are full. We need to tell the kernel to stop sending us stuff. */
if (bd == ugeth->confBd[txQ]) {
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
ugeth->txBd[txQ] = bd;
if (ugeth->p_scheduler) {
ugeth->cpucount[txQ]++;
/* Indicate to QE that there are more Tx bds ready for
......@@ -3620,7 +3623,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&ugeth->lock);
return 0;
return NETDEV_TX_OK;
}
static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
......@@ -3722,7 +3725,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
break;
ugeth->stats.tx_packets++;
......@@ -3741,10 +3744,12 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W))
ugeth->confBd[txQ] += sizeof(struct qe_bd);
bd += sizeof(struct qe_bd);
else
ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
bd = ugeth->p_tx_bd_ring[txQ];
bd_status = in_be32((u32 *)bd);
}
ugeth->confBd[txQ] = bd;
return 0;
}
......
......@@ -218,5 +218,7 @@ extern void ip_mc_up(struct in_device *);
extern void ip_mc_down(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_rejoin_group(struct ip_mc_list *im);
#endif
#endif
......@@ -1288,6 +1288,7 @@ struct mv64xxx_i2c_pdata {
#define MV643XX_ETH_NAME "mv643xx_eth"
struct mv643xx_eth_platform_data {
int port_number;
u16 force_phy_addr; /* force override if phy_addr == 0 */
u16 phy_addr;
......
......@@ -1250,6 +1250,28 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
return;
}
/*
* Resend IGMP JOIN report; used for bonding.
*/
void ip_mc_rejoin_group(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
igmp_mod_timer(im, IGMP_Initial_Report_Delay);
return;
}
/* else, v3 */
im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
igmp_ifc_event(in_dev);
#endif
}
/*
* A socket has left a multicast group on device dev
*/
......@@ -2596,3 +2618,4 @@ int __init igmp_mc_proc_init(void)
EXPORT_SYMBOL(ip_mc_dec_group);
EXPORT_SYMBOL(ip_mc_inc_group);
EXPORT_SYMBOL(ip_mc_join_group);
EXPORT_SYMBOL(ip_mc_rejoin_group);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment