Commit fa8d24b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/davem/BK/net-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 50da7d0e 15fea123
...@@ -2779,6 +2779,10 @@ N: Christopher Smith ...@@ -2779,6 +2779,10 @@ N: Christopher Smith
E: x@xman.org E: x@xman.org
D: Tulip net driver hacker D: Tulip net driver hacker
N: Mark Smith
E: mark.smith@comdev.cc
D: Multicast support in bonding driver
N: Miquel van Smoorenburg N: Miquel van Smoorenburg
E: miquels@cistron.nl E: miquels@cistron.nl
D: Kernel and net hacker. Sysvinit, minicom. doing Debian stuff. D: Kernel and net hacker. Sysvinit, minicom. doing Debian stuff.
......
...@@ -161,6 +161,21 @@ ...@@ -161,6 +161,21 @@
* - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr * - Remove possibility of calling bond_sethwaddr with NULL slave_dev ptr
* - Handle hot swap ethernet interface deregistration events to remove * - Handle hot swap ethernet interface deregistration events to remove
* kernel oops following hot swap of enslaved interface * kernel oops following hot swap of enslaved interface
*
* 2002/1/2 - Chad N. Tindel <ctindel at ieee dot org>
* - Restore original slave flags at release time.
*
* 2002/02/18 - Erik Habbinga <erik_habbinga at hp dot com>
* - bond_release(): calling kfree on our_slave after call to
* bond_restore_slave_flags, not before
* - bond_enslave(): saving slave flags into original_flags before
* call to netdev_set_master, so the IFF_SLAVE flag doesn't end
* up in original_flags
*
* 2002/04/05 - Mark Smith <mark.smith at comdev dot cc> and
* Steve Mead <steve.mead at comdev dot cc>
* - Port Gleb Natapov's multicast support patchs from 2.4.12
* to 2.4.18 adding support for multicast.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -208,11 +223,8 @@ ...@@ -208,11 +223,8 @@
#define MII_ENDOF_NWAY 0x20 #define MII_ENDOF_NWAY 0x20
#undef MII_LINK_READY #undef MII_LINK_READY
/*#define MII_LINK_READY (MII_LINK_UP | MII_ENDOF_NWAY)*/
#define MII_LINK_READY (MII_LINK_UP) #define MII_LINK_READY (MII_LINK_UP)
#define MAX_BOND_ADDR 256
#ifndef BOND_LINK_ARP_INTERV #ifndef BOND_LINK_ARP_INTERV
#define BOND_LINK_ARP_INTERV 0 #define BOND_LINK_ARP_INTERV 0
#endif #endif
...@@ -223,7 +235,7 @@ static unsigned long arp_target = 0; ...@@ -223,7 +235,7 @@ static unsigned long arp_target = 0;
static u32 my_ip = 0; static u32 my_ip = 0;
char *arp_target_hw_addr = NULL; char *arp_target_hw_addr = NULL;
static int max_bonds = MAX_BONDS; static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int miimon = BOND_LINK_MON_INTERV; static int miimon = BOND_LINK_MON_INTERV;
static int mode = BOND_MODE_ROUNDROBIN; static int mode = BOND_MODE_ROUNDROBIN;
static int updelay = 0; static int updelay = 0;
...@@ -234,7 +246,7 @@ int bond_cnt; ...@@ -234,7 +246,7 @@ int bond_cnt;
static struct bonding *these_bonds = NULL; static struct bonding *these_bonds = NULL;
static struct net_device *dev_bonds = NULL; static struct net_device *dev_bonds = NULL;
MODULE_PARM(max_bonds, "1-" __MODULE_STRING(INT_MAX) "i"); MODULE_PARM(max_bonds, "i");
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
MODULE_PARM(miimon, "i"); MODULE_PARM(miimon, "i");
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
...@@ -260,6 +272,15 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev); ...@@ -260,6 +272,15 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev);
static void bond_mii_monitor(struct net_device *dev); static void bond_mii_monitor(struct net_device *dev);
static void bond_arp_monitor(struct net_device *dev); static void bond_arp_monitor(struct net_device *dev);
static int bond_event(struct notifier_block *this, unsigned long event, void *ptr); static int bond_event(struct notifier_block *this, unsigned long event, void *ptr);
static void bond_restore_slave_flags(slave_t *slave);
static void bond_mc_list_destroy(struct bonding *bond);
static void bond_mc_add(bonding_t *bond, void *addr, int alen);
static void bond_mc_delete(bonding_t *bond, void *addr, int alen);
static int bond_mc_list_copy (struct dev_mc_list *src, struct bonding *dst, int gpf_flag);
static inline int dmi_same(struct dev_mc_list *dmi1, struct dev_mc_list *dmi2);
static void bond_set_promiscuity(bonding_t *bond, int inc);
static void bond_set_allmulti(bonding_t *bond, int inc);
static struct dev_mc_list* bond_mc_list_find_dmi(struct dev_mc_list *dmi, struct dev_mc_list *mc_list);
static void bond_set_slave_inactive_flags(slave_t *slave); static void bond_set_slave_inactive_flags(slave_t *slave);
static void bond_set_slave_active_flags(slave_t *slave); static void bond_set_slave_active_flags(slave_t *slave);
static int bond_enslave(struct net_device *master, struct net_device *slave); static int bond_enslave(struct net_device *master, struct net_device *slave);
...@@ -282,6 +303,11 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length); ...@@ -282,6 +303,11 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length);
#define IS_UP(dev) ((((dev)->flags & (IFF_UP)) == (IFF_UP)) && \ #define IS_UP(dev) ((((dev)->flags & (IFF_UP)) == (IFF_UP)) && \
(netif_running(dev) && netif_carrier_ok(dev))) (netif_running(dev) && netif_carrier_ok(dev)))
static void bond_restore_slave_flags(slave_t *slave)
{
slave->dev->flags = slave->original_flags;
}
static void bond_set_slave_inactive_flags(slave_t *slave) static void bond_set_slave_inactive_flags(slave_t *slave)
{ {
slave->state = BOND_STATE_BACKUP; slave->state = BOND_STATE_BACKUP;
...@@ -431,6 +457,7 @@ static int bond_close(struct net_device *master) ...@@ -431,6 +457,7 @@ static int bond_close(struct net_device *master)
/* Release the bonded slaves */ /* Release the bonded slaves */
bond_release_all(master); bond_release_all(master);
bond_mc_list_destroy (bond);
write_unlock_irqrestore(&bond->lock, flags); write_unlock_irqrestore(&bond->lock, flags);
...@@ -438,19 +465,180 @@ static int bond_close(struct net_device *master) ...@@ -438,19 +465,180 @@ static int bond_close(struct net_device *master)
return 0; return 0;
} }
static void set_multicast_list(struct net_device *master) /*
* flush all members of flush->mc_list from device dev->mc_list
*/
static void bond_mc_list_flush(struct net_device *dev, struct net_device *flush)
{
struct dev_mc_list *dmi;
for (dmi = flush->mc_list; dmi != NULL; dmi = dmi->next)
dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
/*
* Totally destroys the mc_list in bond
*/
static void bond_mc_list_destroy(struct bonding *bond)
{ {
struct dev_mc_list *dmi;
dmi = bond->mc_list;
while (dmi) {
bond->mc_list = dmi->next;
kfree(dmi);
dmi = bond->mc_list;
}
}
/* /*
bonding_t *bond = master->priv; * Add a Multicast address to every slave in the bonding group
*/
static void bond_mc_add(bonding_t *bond, void *addr, int alen)
{
slave_t *slave; slave_t *slave;
for (slave = bond->next; slave != (slave_t*)bond; slave = slave->next) { for (slave = bond->prev; slave != (slave_t*)bond; slave = slave->prev) {
slave->dev->mc_list = master->mc_list; dev_mc_add(slave->dev, addr, alen, 0);
slave->dev->mc_count = master->mc_count;
slave->dev->flags = master->flags;
slave->dev->set_multicast_list(slave->dev);
} }
}
/*
* Remove a multicast address from every slave in the bonding group
*/
static void bond_mc_delete(bonding_t *bond, void *addr, int alen)
{
slave_t *slave;
for (slave = bond->prev; slave != (slave_t*)bond; slave = slave->prev)
dev_mc_delete(slave->dev, addr, alen, 0);
}
/*
* Copy all the Multicast addresses from src to the bonding device dst
*/
static int bond_mc_list_copy (struct dev_mc_list *src, struct bonding *dst,
int gpf_flag)
{
struct dev_mc_list *dmi, *new_dmi;
for (dmi = src; dmi != NULL; dmi = dmi->next) {
new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag);
if (new_dmi == NULL) {
return -ENOMEM;
}
new_dmi->next = dst->mc_list;
dst->mc_list = new_dmi;
new_dmi->dmi_addrlen = dmi->dmi_addrlen;
memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
new_dmi->dmi_users = dmi->dmi_users;
new_dmi->dmi_gusers = dmi->dmi_gusers;
}
return 0;
}
/*
* Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
*/
static inline int dmi_same(struct dev_mc_list *dmi1, struct dev_mc_list *dmi2)
{
return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
dmi1->dmi_addrlen == dmi2->dmi_addrlen;
}
/*
* Push the promiscuity flag down to all slaves
*/
static void bond_set_promiscuity(bonding_t *bond, int inc)
{
slave_t *slave;
for (slave = bond->prev; slave != (slave_t*)bond; slave = slave->prev)
dev_set_promiscuity(slave->dev, inc);
}
/*
* Push the allmulti flag down to all slaves
*/ */
static void bond_set_allmulti(bonding_t *bond, int inc)
{
slave_t *slave;
for (slave = bond->prev; slave != (slave_t*)bond; slave = slave->prev)
dev_set_allmulti(slave->dev, inc);
}
/*
* returns dmi entry if found, NULL otherwise
*/
static struct dev_mc_list* bond_mc_list_find_dmi(struct dev_mc_list *dmi,
struct dev_mc_list *mc_list)
{
struct dev_mc_list *idmi;
for (idmi = mc_list; idmi != NULL; idmi = idmi->next) {
if (dmi_same(dmi, idmi)) {
return idmi;
}
}
return NULL;
}
static void set_multicast_list(struct net_device *master)
{
bonding_t *bond = master->priv;
struct dev_mc_list *dmi;
unsigned long flags = 0;
/*
* Lock the private data for the master
*/
write_lock_irqsave(&bond->lock, flags);
/*
* Lock the master device so that noone trys to transmit
* while we're changing things
*/
spin_lock_bh(&master->xmit_lock);
/* set promiscuity flag to slaves */
if ( (master->flags & IFF_PROMISC) && !(bond->flags & IFF_PROMISC) )
bond_set_promiscuity(bond, 1);
if ( !(master->flags & IFF_PROMISC) && (bond->flags & IFF_PROMISC) )
bond_set_promiscuity(bond, -1);
/* set allmulti flag to slaves */
if ( (master->flags & IFF_ALLMULTI) && !(bond->flags & IFF_ALLMULTI) )
bond_set_allmulti(bond, 1);
if ( !(master->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI) )
bond_set_allmulti(bond, -1);
bond->flags = master->flags;
/* looking for addresses to add to slaves' mc list */
for (dmi = master->mc_list; dmi != NULL; dmi = dmi->next) {
if (bond_mc_list_find_dmi(dmi, bond->mc_list) == NULL)
bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
}
/* looking for addresses to delete from slaves' list */
for (dmi = bond->mc_list; dmi != NULL; dmi = dmi->next) {
if (bond_mc_list_find_dmi(dmi, master->mc_list) == NULL)
bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
}
/* save master's multicast list */
bond_mc_list_destroy (bond);
bond_mc_list_copy (master->mc_list, bond, GFP_KERNEL);
spin_unlock_bh(&master->xmit_lock);
write_unlock_irqrestore(&bond->lock, flags);
} }
/* /*
...@@ -476,6 +664,7 @@ static int bond_enslave(struct net_device *master_dev, ...@@ -476,6 +664,7 @@ static int bond_enslave(struct net_device *master_dev,
unsigned long flags = 0; unsigned long flags = 0;
int ndx = 0; int ndx = 0;
int err = 0; int err = 0;
struct dev_mc_list *dmi;
if (master_dev == NULL || slave_dev == NULL) { if (master_dev == NULL || slave_dev == NULL) {
return -ENODEV; return -ENODEV;
...@@ -513,6 +702,8 @@ static int bond_enslave(struct net_device *master_dev, ...@@ -513,6 +702,8 @@ static int bond_enslave(struct net_device *master_dev,
} }
memset(new_slave, 0, sizeof(slave_t)); memset(new_slave, 0, sizeof(slave_t));
/* save flags before call to netdev_set_master */
new_slave->original_flags = slave_dev->flags;
err = netdev_set_master(slave_dev, master_dev); err = netdev_set_master(slave_dev, master_dev);
if (err) { if (err) {
...@@ -526,10 +717,38 @@ static int bond_enslave(struct net_device *master_dev, ...@@ -526,10 +717,38 @@ static int bond_enslave(struct net_device *master_dev,
new_slave->dev = slave_dev; new_slave->dev = slave_dev;
/* set promiscuity level to new slave */
if (master_dev->flags & IFF_PROMISC)
dev_set_promiscuity(slave_dev, 1);
/* set allmulti level to new slave */
if (master_dev->flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, 1);
/* upload master's mc_list to new slave */
for (dmi = master_dev->mc_list; dmi != NULL; dmi = dmi->next)
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
/* /*
* queue to the end of the slaves list, make the first element its * queue to the end of the slaves list, make the first element its
* successor, the last one its predecessor, and make it the bond's * successor, the last one its predecessor, and make it the bond's
* predecessor. * predecessor.
*
* Just to clarify, so future bonding driver hackers don't go through
* the same confusion stage I did trying to figure this out, the
* slaves are stored in a double linked circular list, sortof.
* In the ->next direction, the last slave points to the first slave,
* bypassing bond; only the slaves are in the ->next direction.
* In the ->prev direction, however, the first slave points to bond
* and bond points to the last slave.
*
* It looks like a circle with a little bubble hanging off one side
* in the ->prev direction only.
*
* When going through the list once, its best to start at bond->prev
* and go in the ->prev direction, testing for bond. Doing this
* in the ->next direction doesn't work. Trust me, I know this now.
* :) -mts 2002.03.14
*/ */
new_slave->prev = bond->prev; new_slave->prev = bond->prev;
new_slave->prev->next = new_slave; new_slave->prev->next = new_slave;
...@@ -838,10 +1057,20 @@ static int bond_release(struct net_device *master, struct net_device *slave) ...@@ -838,10 +1057,20 @@ static int bond_release(struct net_device *master, struct net_device *slave)
} else { } else {
printk(".\n"); printk(".\n");
} }
kfree(our_slave);
/* release the slave from its bond */ /* release the slave from its bond */
/* flush master's mc_list from slave */
bond_mc_list_flush (slave, master);
/* unset promiscuity level from slave */
if (master->flags & IFF_PROMISC)
dev_set_promiscuity(slave, -1);
/* unset allmulti level from slave */
if (master->flags & IFF_ALLMULTI)
dev_set_allmulti(slave, -1);
netdev_set_master(slave, NULL); netdev_set_master(slave, NULL);
/* only restore its RUNNING flag if monitoring set it down */ /* only restore its RUNNING flag if monitoring set it down */
...@@ -854,6 +1083,9 @@ static int bond_release(struct net_device *master, struct net_device *slave) ...@@ -854,6 +1083,9 @@ static int bond_release(struct net_device *master, struct net_device *slave)
dev_close(slave); dev_close(slave);
} }
bond_restore_slave_flags(our_slave);
kfree(our_slave);
if (bond->current_slave == NULL) { if (bond->current_slave == NULL) {
printk(KERN_INFO printk(KERN_INFO
"%s: now running without any active interface !\n", "%s: now running without any active interface !\n",
...@@ -1121,8 +1353,8 @@ static void bond_mii_monitor(struct net_device *master) ...@@ -1121,8 +1353,8 @@ static void bond_mii_monitor(struct net_device *master)
master->name, bestslave->dev->name, master->name, bestslave->dev->name,
(updelay - bestslave->delay) * miimon); (updelay - bestslave->delay) * miimon);
bestslave->delay= 0; bestslave->delay = 0;
bestslave->link = BOND_LINK_UP; bestslave->link = BOND_LINK_UP;
} }
if (mode == BOND_MODE_ACTIVEBACKUP) { if (mode == BOND_MODE_ACTIVEBACKUP) {
...@@ -1192,7 +1424,7 @@ static void bond_arp_monitor(struct net_device *master) ...@@ -1192,7 +1424,7 @@ static void bond_arp_monitor(struct net_device *master)
read_lock(&bond->ptrlock); read_lock(&bond->ptrlock);
if ( (!(slave->link == BOND_LINK_UP)) if ( (!(slave->link == BOND_LINK_UP))
&& (slave!= bond->current_slave) ) { && (slave != bond->current_slave) ) {
read_unlock(&bond->ptrlock); read_unlock(&bond->ptrlock);
...@@ -1207,7 +1439,7 @@ static void bond_arp_monitor(struct net_device *master) ...@@ -1207,7 +1439,7 @@ static void bond_arp_monitor(struct net_device *master)
slave->state = BOND_STATE_ACTIVE; slave->state = BOND_STATE_ACTIVE;
bond->current_slave = slave; bond->current_slave = slave;
} }
if (slave!=bond->current_slave) { if (slave != bond->current_slave) {
slave->dev->flags |= IFF_NOARP; slave->dev->flags |= IFF_NOARP;
} }
write_unlock(&bond->ptrlock); write_unlock(&bond->ptrlock);
...@@ -1311,7 +1543,7 @@ static void bond_arp_monitor(struct net_device *master) ...@@ -1311,7 +1543,7 @@ static void bond_arp_monitor(struct net_device *master)
#define isdigit(c) (c >= '0' && c <= '9') #define isdigit(c) (c >= '0' && c <= '9')
__inline static int atoi( char **s) __inline static int atoi( char **s)
{ {
int i=0; int i = 0;
while (isdigit(**s)) while (isdigit(**s))
i = i*20 + *((*s)++) - '0'; i = i*20 + *((*s)++) - '0';
return i; return i;
...@@ -1388,7 +1620,7 @@ my_inet_aton(char *cp, unsigned long *the_addr) { ...@@ -1388,7 +1620,7 @@ my_inet_aton(char *cp, unsigned long *the_addr) {
goto ret_0; goto ret_0;
} }
if (the_addr!= NULL) { if (the_addr != NULL) {
*the_addr = res.word | htonl (val); *the_addr = res.word | htonl (val);
} }
...@@ -1420,7 +1652,7 @@ static int bond_info_query(struct net_device *master, struct ifbond *info) ...@@ -1420,7 +1652,7 @@ static int bond_info_query(struct net_device *master, struct ifbond *info)
info->miimon = miimon; info->miimon = miimon;
read_lock_irqsave(&bond->lock, flags); read_lock_irqsave(&bond->lock, flags);
for (slave = bond->prev; slave!=(slave_t *)bond; slave = slave->prev) { for (slave = bond->prev; slave != (slave_t *)bond; slave = slave->prev) {
info->num_slaves++; info->num_slaves++;
} }
read_unlock_irqrestore(&bond->lock, flags); read_unlock_irqrestore(&bond->lock, flags);
...@@ -1696,7 +1928,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev) ...@@ -1696,7 +1928,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev)
/* if we are sending arp packets, try to at least /* if we are sending arp packets, try to at least
identify our own ip address */ identify our own ip address */
if ( (arp_interval > 0) && (my_ip==0) && if ( (arp_interval > 0) && (my_ip == 0) &&
(skb->protocol == __constant_htons(ETH_P_ARP) ) ) { (skb->protocol == __constant_htons(ETH_P_ARP) ) ) {
char *the_ip = (((char *)skb->data)) char *the_ip = (((char *)skb->data))
+ sizeof(struct ethhdr) + sizeof(struct ethhdr)
...@@ -1708,7 +1940,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev) ...@@ -1708,7 +1940,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *dev)
/* if we are sending arp packets and don't know /* if we are sending arp packets and don't know
the target hw address, save it so we don't need the target hw address, save it so we don't need
to use a broadcast address */ to use a broadcast address */
if ( (arp_interval > 0) && (arp_target_hw_addr==NULL) && if ( (arp_interval > 0) && (arp_target_hw_addr == NULL) &&
(skb->protocol == __constant_htons(ETH_P_IP) ) ) { (skb->protocol == __constant_htons(ETH_P_IP) ) ) {
struct ethhdr *eth_hdr = struct ethhdr *eth_hdr =
(struct ethhdr *) (((char *)skb->data)); (struct ethhdr *) (((char *)skb->data));
...@@ -1751,7 +1983,7 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev) ...@@ -1751,7 +1983,7 @@ static struct net_device_stats *bond_get_stats(struct net_device *dev)
read_lock_irqsave(&bond->lock, flags); read_lock_irqsave(&bond->lock, flags);
for (slave = bond->prev; slave!=(slave_t *)bond; slave = slave->prev) { for (slave = bond->prev; slave != (slave_t *)bond; slave = slave->prev) {
sstats = slave->dev->get_stats(slave->dev); sstats = slave->dev->get_stats(slave->dev);
stats->rx_packets += sstats->rx_packets; stats->rx_packets += sstats->rx_packets;
...@@ -1861,7 +2093,7 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length) ...@@ -1861,7 +2093,7 @@ static int bond_get_info(char *buf, char **start, off_t offset, int length)
static int bond_event(struct notifier_block *this, unsigned long event, static int bond_event(struct notifier_block *this, unsigned long event,
void *ptr) void *ptr)
{ {
struct bonding *this_bond=(struct bonding *)these_bonds; struct bonding *this_bond = (struct bonding *)these_bonds;
struct bonding *last_bond; struct bonding *last_bond;
struct net_device *event_dev = (struct net_device *)ptr; struct net_device *event_dev = (struct net_device *)ptr;
...@@ -1905,10 +2137,8 @@ static int bond_event(struct notifier_block *this, unsigned long event, ...@@ -1905,10 +2137,8 @@ static int bond_event(struct notifier_block *this, unsigned long event,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static struct notifier_block bond_netdev_notifier={ static struct notifier_block bond_netdev_notifier = {
bond_event, notifier_call: bond_event,
NULL,
0
}; };
static int __init bond_init(struct net_device *dev) static int __init bond_init(struct net_device *dev)
...@@ -2038,6 +2268,13 @@ static int __init bonding_init(void) ...@@ -2038,6 +2268,13 @@ static int __init bonding_init(void)
/* Find a name for this unit */ /* Find a name for this unit */
static struct net_device *dev_bond = NULL; static struct net_device *dev_bond = NULL;
if (max_bonds < 1 || max_bonds > INT_MAX) {
printk(KERN_WARNING
"bonding_init(): max_bonds (%d) not in range %d-%d, "
"so it was reset to BOND_DEFAULT_MAX_BONDS (%d)",
max_bonds, 1, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
dev_bond = dev_bonds = kmalloc(max_bonds*sizeof(struct net_device), dev_bond = dev_bonds = kmalloc(max_bonds*sizeof(struct net_device),
GFP_KERNEL); GFP_KERNEL);
if (dev_bond == NULL) { if (dev_bond == NULL) {
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#define BOND_STATE_ACTIVE 0 /* link is active */ #define BOND_STATE_ACTIVE 0 /* link is active */
#define BOND_STATE_BACKUP 1 /* link is backup */ #define BOND_STATE_BACKUP 1 /* link is backup */
#define MAX_BONDS 1 /* Maximum number of devices to support */ #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */
typedef struct ifbond { typedef struct ifbond {
__s32 bond_mode; __s32 bond_mode;
...@@ -76,6 +76,7 @@ typedef struct slave { ...@@ -76,6 +76,7 @@ typedef struct slave {
short delay; short delay;
char link; /* one of BOND_LINK_XXXX */ char link; /* one of BOND_LINK_XXXX */
char state; /* one of BOND_STATE_XXXX */ char state; /* one of BOND_STATE_XXXX */
unsigned short original_flags;
u32 link_failure_count; u32 link_failure_count;
} slave_t; } slave_t;
...@@ -104,6 +105,8 @@ typedef struct bonding { ...@@ -104,6 +105,8 @@ typedef struct bonding {
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
struct bonding *next_bond; struct bonding *next_bond;
struct net_device *device; struct net_device *device;
struct dev_mc_list *mc_list;
unsigned short flags;
} bonding_t; } bonding_t;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES #define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h>
#include <linux/pkt_sched.h> #include <linux/pkt_sched.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
...@@ -221,7 +222,7 @@ extern psched_time_t psched_time_base; ...@@ -221,7 +222,7 @@ extern psched_time_t psched_time_base;
#define PSCHED_EXPORTLIST_2 #define PSCHED_EXPORTLIST_2
#if ~0UL == 0xFFFFFFFF #if BITS_PER_LONG <= 32
#define PSCHED_WATCHER unsigned long #define PSCHED_WATCHER unsigned long
......
...@@ -20,17 +20,18 @@ ...@@ -20,17 +20,18 @@
* Pekka Riikonen <priikone@poesidon.pspt.fi> * Pekka Riikonen <priikone@poesidon.pspt.fi>
* *
* Changes: * Changes:
* D.J. Barrow : Fixed bug where dev->refcnt gets set to 2 * D.J. Barrow : Fixed bug where dev->refcnt gets set
* if register_netdev gets called before * to 2 if register_netdev gets called
* net_dev_init & also removed a few lines * before net_dev_init & also removed a
* of code in the process. * few lines of code in the process.
* Alan Cox : device private ioctl copies fields back. * Alan Cox : device private ioctl copies fields back.
* Alan Cox : Transmit queue code does relevant stunts to * Alan Cox : Transmit queue code does relevant
* keep the queue safe. * stunts to keep the queue safe.
* Alan Cox : Fixed double lock. * Alan Cox : Fixed double lock.
* Alan Cox : Fixed promisc NULL pointer trap * Alan Cox : Fixed promisc NULL pointer trap
* ???????? : Support the full private ioctl range * ???????? : Support the full private ioctl range
* Alan Cox : Moved ioctl permission check into drivers * Alan Cox : Moved ioctl permission check into
* drivers
* Tim Kordas : SIOCADDMULTI/SIOCDELMULTI * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
* Alan Cox : 100 backlog just doesn't cut it when * Alan Cox : 100 backlog just doesn't cut it when
* you start doing multicast video 8) * you start doing multicast video 8)
...@@ -38,16 +39,19 @@ ...@@ -38,16 +39,19 @@
* Alan Cox : Fix ETH_P_ALL echoback lengths. * Alan Cox : Fix ETH_P_ALL echoback lengths.
* Alan Cox : Took out transmit every packet pass * Alan Cox : Took out transmit every packet pass
* Saved a few bytes in the ioctl handler * Saved a few bytes in the ioctl handler
* Alan Cox : Network driver sets packet type before calling netif_rx. Saves * Alan Cox : Network driver sets packet type before
* a function call a packet. * calling netif_rx. Saves a function
* call a packet.
* Alan Cox : Hashed net_bh() * Alan Cox : Hashed net_bh()
* Richard Kooijman: Timestamp fixes. * Richard Kooijman: Timestamp fixes.
* Alan Cox : Wrong field in SIOCGIFDSTADDR * Alan Cox : Wrong field in SIOCGIFDSTADDR
* Alan Cox : Device lock protection. * Alan Cox : Device lock protection.
* Alan Cox : Fixed nasty side effect of device close changes. * Alan Cox : Fixed nasty side effect of device close
* Rudi Cilibrasi : Pass the right thing to set_mac_address() * changes.
* Dave Miller : 32bit quantity for the device lock to make it work out * Rudi Cilibrasi : Pass the right thing to
* on a Sparc. * set_mac_address()
* Dave Miller : 32bit quantity for the device lock to
* make it work out on a Sparc.
* Bjorn Ekwall : Added KERNELD hack. * Bjorn Ekwall : Added KERNELD hack.
* Alan Cox : Cleaned up the backlog initialise. * Alan Cox : Cleaned up the backlog initialise.
* Craig Metz : SIOCGIFCONF fix if space for under * Craig Metz : SIOCGIFCONF fix if space for under
...@@ -62,9 +66,10 @@ ...@@ -62,9 +66,10 @@
* the backlog queue. * the backlog queue.
* Paul Rusty Russell : SIOCSIFNAME * Paul Rusty Russell : SIOCSIFNAME
* Pekka Riikonen : Netdev boot-time settings code * Pekka Riikonen : Netdev boot-time settings code
* Andrew Morton : Make unregister_netdevice wait indefinitely on dev->refcnt * Andrew Morton : Make unregister_netdevice wait
* indefinitely on dev->refcnt
* J Hadi Salim : - Backlog queue sampling * J Hadi Salim : - Backlog queue sampling
* - netif_rx() feedback * - netif_rx() feedback
*/ */
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -162,8 +167,8 @@ const char *if_port_text[] = { ...@@ -162,8 +167,8 @@ const char *if_port_text[] = {
* 86DD IPv6 * 86DD IPv6
*/ */
static struct packet_type *ptype_base[16]; /* 16 way hashed list */ static struct packet_type *ptype_base[16]; /* 16 way hashed list */
static struct packet_type *ptype_all = NULL; /* Taps */ static struct packet_type *ptype_all; /* Taps */
#ifdef OFFLINE_SAMPLE #ifdef OFFLINE_SAMPLE
static void sample_queue(unsigned long dummy); static void sample_queue(unsigned long dummy);
...@@ -179,8 +184,8 @@ static int net_run_sbin_hotplug(struct net_device *dev, char *action); ...@@ -179,8 +184,8 @@ static int net_run_sbin_hotplug(struct net_device *dev, char *action);
/* /*
* Our notifier list * Our notifier list
*/ */
static struct notifier_block *netdev_chain=NULL; static struct notifier_block *netdev_chain;
/* /*
* Device drivers call our routines to queue packets here. We empty the * Device drivers call our routines to queue packets here. We empty the
...@@ -194,17 +199,17 @@ int netdev_fastroute_obstacles; ...@@ -194,17 +199,17 @@ int netdev_fastroute_obstacles;
#endif #endif
/****************************************************************************************** /*******************************************************************************
Protocol management and registration routines Protocol management and registration routines
*******************************************************************************************/ *******************************************************************************/
/* /*
* For efficiency * For efficiency
*/ */
int netdev_nit=0; int netdev_nit;
/* /*
* Add a protocol ID to the list. Now that the input handler is * Add a protocol ID to the list. Now that the input handler is
...@@ -225,12 +230,12 @@ int netdev_nit=0; ...@@ -225,12 +230,12 @@ int netdev_nit=0;
/** /**
* dev_add_pack - add packet handler * dev_add_pack - add packet handler
* @pt: packet type declaration * @pt: packet type declaration
* *
* Add a protocol handler to the networking stack. The passed &packet_type * Add a protocol handler to the networking stack. The passed &packet_type
* is linked into kernel lists and may not be freed until it has been * is linked into kernel lists and may not be freed until it has been
* removed from the kernel lists. * removed from the kernel lists.
*/ */
void dev_add_pack(struct packet_type *pt) void dev_add_pack(struct packet_type *pt)
{ {
int hash; int hash;
...@@ -239,17 +244,17 @@ void dev_add_pack(struct packet_type *pt) ...@@ -239,17 +244,17 @@ void dev_add_pack(struct packet_type *pt)
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
/* Hack to detect packet socket */ /* Hack to detect packet socket */
if ((pt->data) && ((int)(pt->data)!=1)) { if (pt->data && (long)(pt->data) != 1) {
netdev_fastroute_obstacles++; netdev_fastroute_obstacles++;
dev_clear_fastroute(pt->dev); dev_clear_fastroute(pt->dev);
} }
#endif #endif
if (pt->type == htons(ETH_P_ALL)) { if (pt->type == htons(ETH_P_ALL)) {
netdev_nit++; netdev_nit++;
pt->next=ptype_all; pt->next = ptype_all;
ptype_all=pt; ptype_all = pt;
} else { } else {
hash=ntohs(pt->type)&15; hash = ntohs(pt->type) & 15;
pt->next = ptype_base[hash]; pt->next = ptype_base[hash];
ptype_base[hash] = pt; ptype_base[hash] = pt;
} }
...@@ -260,13 +265,12 @@ void dev_add_pack(struct packet_type *pt) ...@@ -260,13 +265,12 @@ void dev_add_pack(struct packet_type *pt)
/** /**
* dev_remove_pack - remove packet handler * dev_remove_pack - remove packet handler
* @pt: packet type declaration * @pt: packet type declaration
* *
* Remove a protocol handler that was previously added to the kernel * Remove a protocol handler that was previously added to the kernel
* protocol handlers by dev_add_pack(). The passed &packet_type is removed * protocol handlers by dev_add_pack(). The passed &packet_type is removed
* from the kernel lists and can be freed or reused once this function * from the kernel lists and can be freed or reused once this function
* returns. * returns.
*/ */
void dev_remove_pack(struct packet_type *pt) void dev_remove_pack(struct packet_type *pt)
{ {
struct packet_type **pt1; struct packet_type **pt1;
...@@ -275,24 +279,23 @@ void dev_remove_pack(struct packet_type *pt) ...@@ -275,24 +279,23 @@ void dev_remove_pack(struct packet_type *pt)
if (pt->type == htons(ETH_P_ALL)) { if (pt->type == htons(ETH_P_ALL)) {
netdev_nit--; netdev_nit--;
pt1=&ptype_all; pt1 = &ptype_all;
} else { } else
pt1=&ptype_base[ntohs(pt->type)&15]; pt1 = &ptype_base[ntohs(pt->type) & 15];
}
for (; (*pt1) != NULL; pt1 = &((*pt1)->next)) { for (; *pt1; pt1 = &((*pt1)->next)) {
if (pt == (*pt1)) { if (pt == *pt1) {
*pt1 = pt->next; *pt1 = pt->next;
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
if (pt->data) if (pt->data)
netdev_fastroute_obstacles--; netdev_fastroute_obstacles--;
#endif #endif
br_write_unlock_bh(BR_NETPROTO_LOCK); goto out;
return;
} }
} }
br_write_unlock_bh(BR_NETPROTO_LOCK);
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt); printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
br_write_unlock_bh(BR_NETPROTO_LOCK);
} }
/****************************************************************************** /******************************************************************************
...@@ -328,10 +331,7 @@ int netdev_boot_setup_add(char *name, struct ifmap *map) ...@@ -328,10 +331,7 @@ int netdev_boot_setup_add(char *name, struct ifmap *map)
} }
} }
if (i >= NETDEV_BOOT_SETUP_MAX) return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
return 0;
return 1;
} }
/** /**
...@@ -345,10 +345,9 @@ int netdev_boot_setup_add(char *name, struct ifmap *map) ...@@ -345,10 +345,9 @@ int netdev_boot_setup_add(char *name, struct ifmap *map)
*/ */
int netdev_boot_setup_check(struct net_device *dev) int netdev_boot_setup_check(struct net_device *dev)
{ {
struct netdev_boot_setup *s; struct netdev_boot_setup *s = dev_boot_setup;
int i; int i;
s = dev_boot_setup;
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) { for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] != '\0' && s[i].name[0] != ' ' && if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
!strncmp(dev->name, s[i].name, strlen(s[i].name))) { !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
...@@ -385,20 +384,20 @@ int __init netdev_boot_setup(char *str) ...@@ -385,20 +384,20 @@ int __init netdev_boot_setup(char *str)
if (ints[0] > 3) if (ints[0] > 3)
map.mem_end = ints[4]; map.mem_end = ints[4];
/* Add new entry to the list */ /* Add new entry to the list */
return netdev_boot_setup_add(str, &map); return netdev_boot_setup_add(str, &map);
} }
__setup("netdev=", netdev_boot_setup); __setup("netdev=", netdev_boot_setup);
/***************************************************************************************** /*******************************************************************************
Device Interface Subroutines Device Interface Subroutines
******************************************************************************************/ *******************************************************************************/
/** /**
* __dev_get_by_name - find a device by its name * __dev_get_by_name - find a device by its name
* @name: name to find * @name: name to find
* *
* Find an interface by name. Must be called under RTNL semaphore * Find an interface by name. Must be called under RTNL semaphore
...@@ -407,24 +406,22 @@ __setup("netdev=", netdev_boot_setup); ...@@ -407,24 +406,22 @@ __setup("netdev=", netdev_boot_setup);
* reference counters are not incremented so the caller must be * reference counters are not incremented so the caller must be
* careful with locks. * careful with locks.
*/ */
struct net_device *__dev_get_by_name(const char *name) struct net_device *__dev_get_by_name(const char *name)
{ {
struct net_device *dev; struct net_device *dev;
for (dev = dev_base; dev != NULL; dev = dev->next) { for (dev = dev_base; dev; dev = dev->next)
if (strncmp(dev->name, name, IFNAMSIZ) == 0) if (!strncmp(dev->name, name, IFNAMSIZ))
return dev; break;
} return dev;
return NULL;
} }
/** /**
* dev_get_by_name - find a device by its name * dev_get_by_name - find a device by its name
* @name: name to find * @name: name to find
* *
* Find an interface by name. This can be called from any * Find an interface by name. This can be called from any
* context and does its own locking. The returned handle has * context and does its own locking. The returned handle has
* the usage count incremented and the caller must use dev_put() to * the usage count incremented and the caller must use dev_put() to
* release it when it is no longer needed. %NULL is returned if no * release it when it is no longer needed. %NULL is returned if no
...@@ -443,7 +440,7 @@ struct net_device *dev_get_by_name(const char *name) ...@@ -443,7 +440,7 @@ struct net_device *dev_get_by_name(const char *name)
return dev; return dev;
} }
/* /*
Return value is changed to int to prevent illegal usage in future. Return value is changed to int to prevent illegal usage in future.
It is still legal to use to check for device existence. It is still legal to use to check for device existence.
...@@ -460,9 +457,8 @@ struct net_device *dev_get_by_name(const char *name) ...@@ -460,9 +457,8 @@ struct net_device *dev_get_by_name(const char *name)
* caller must hold the rtnl semaphore. * caller must hold the rtnl semaphore.
* *
* This function primarily exists for back compatibility with older * This function primarily exists for back compatibility with older
* drivers. * drivers.
*/ */
int dev_get(const char *name) int dev_get(const char *name)
{ {
struct net_device *dev; struct net_device *dev;
...@@ -484,15 +480,14 @@ int dev_get(const char *name) ...@@ -484,15 +480,14 @@ int dev_get(const char *name)
* or @dev_base_lock. * or @dev_base_lock.
*/ */
struct net_device * __dev_get_by_index(int ifindex) struct net_device *__dev_get_by_index(int ifindex)
{ {
struct net_device *dev; struct net_device *dev;
for (dev = dev_base; dev != NULL; dev = dev->next) { for (dev = dev_base; dev; dev = dev->next)
if (dev->ifindex == ifindex) if (dev->ifindex == ifindex)
return dev; break;
} return dev;
return NULL;
} }
...@@ -501,12 +496,12 @@ struct net_device * __dev_get_by_index(int ifindex) ...@@ -501,12 +496,12 @@ struct net_device * __dev_get_by_index(int ifindex)
* @ifindex: index of device * @ifindex: index of device
* *
* Search for an interface by index. Returns NULL if the device * Search for an interface by index. Returns NULL if the device
* is not found or a pointer to the device. The device returned has * is not found or a pointer to the device. The device returned has
* had a reference added and the pointer is safe until the user calls * had a reference added and the pointer is safe until the user calls
* dev_put to indicate they have finished with it. * dev_put to indicate they have finished with it.
*/ */
struct net_device * dev_get_by_index(int ifindex) struct net_device *dev_get_by_index(int ifindex)
{ {
struct net_device *dev; struct net_device *dev;
...@@ -538,17 +533,16 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) ...@@ -538,17 +533,16 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
ASSERT_RTNL(); ASSERT_RTNL();
for (dev = dev_base; dev != NULL; dev = dev->next) { for (dev = dev_base; dev; dev = dev->next)
if (dev->type == type && if (dev->type == type &&
memcmp(dev->dev_addr, ha, dev->addr_len) == 0) !memcmp(dev->dev_addr, ha, dev->addr_len))
return dev; break;
} return dev;
return NULL;
} }
/** /**
* dev_alloc_name - allocate a name for a device * dev_alloc_name - allocate a name for a device
* @dev: device * @dev: device
* @name: name format string * @name: name format string
* *
* Passed a format string - eg "lt%d" it will try and find a suitable * Passed a format string - eg "lt%d" it will try and find a suitable
...@@ -570,15 +564,15 @@ int dev_alloc_name(struct net_device *dev, const char *name) ...@@ -570,15 +564,15 @@ int dev_alloc_name(struct net_device *dev, const char *name)
* characters, or no "%" characters at all. * characters, or no "%" characters at all.
*/ */
p = strchr(name, '%'); p = strchr(name, '%');
if (p && (p[1] != 'd' || strchr(p+2, '%'))) if (p && (p[1] != 'd' || strchr(p + 2, '%')))
return -EINVAL; return -EINVAL;
/* /*
* If you need over 100 please also fix the algorithm... * If you need over 100 please also fix the algorithm...
*/ */
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
snprintf(buf,sizeof(buf),name,i); snprintf(buf, sizeof(buf), name, i);
if (__dev_get_by_name(buf) == NULL) { if (!__dev_get_by_name(buf)) {
strcpy(dev->name, buf); strcpy(dev->name, buf);
return i; return i;
} }
...@@ -593,7 +587,7 @@ int dev_alloc_name(struct net_device *dev, const char *name) ...@@ -593,7 +587,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
* *
* Passed a format string, eg. "lt%d", it will allocate a network device * Passed a format string, eg. "lt%d", it will allocate a network device
* and space for the name. %NULL is returned if no memory is available. * and space for the name. %NULL is returned if no memory is available.
* If the allocation succeeds then the name is assigned and the * If the allocation succeeds then the name is assigned and the
* device pointer returned. %NULL is returned if the name allocation * device pointer returned. %NULL is returned if the name allocation
* failed. The cause of an error is returned as a negative errno code * failed. The cause of an error is returned as a negative errno code
* in the variable @err points to. * in the variable @err points to.
...@@ -604,16 +598,17 @@ int dev_alloc_name(struct net_device *dev, const char *name) ...@@ -604,16 +598,17 @@ int dev_alloc_name(struct net_device *dev, const char *name)
struct net_device *dev_alloc(const char *name, int *err) struct net_device *dev_alloc(const char *name, int *err)
{ {
struct net_device *dev=kmalloc(sizeof(struct net_device), GFP_KERNEL); struct net_device *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
if (!dev)
*err = -ENOBUFS; *err = -ENOBUFS;
return NULL; else {
} memset(dev, 0, sizeof(*dev));
memset(dev, 0, sizeof(struct net_device)); *err = dev_alloc_name(dev, name);
*err = dev_alloc_name(dev, name); if (*err < 0) {
if (*err < 0) { kfree(dev);
kfree(dev); dev = NULL;
return NULL; }
} }
return dev; return dev;
} }
...@@ -626,10 +621,9 @@ struct net_device *dev_alloc(const char *name, int *err) ...@@ -626,10 +621,9 @@ struct net_device *dev_alloc(const char *name, int *err)
* the notifier chains for netdev_chain and sends a NEWLINK message * the notifier chains for netdev_chain and sends a NEWLINK message
* to the routing socket. * to the routing socket.
*/ */
void netdev_state_change(struct net_device *dev) void netdev_state_change(struct net_device *dev)
{ {
if (dev->flags&IFF_UP) { if (dev->flags & IFF_UP) {
notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
rtmsg_ifinfo(RTM_NEWLINK, dev, 0); rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
} }
...@@ -661,13 +655,14 @@ extern inline void dev_load(const char *unused){;} ...@@ -661,13 +655,14 @@ extern inline void dev_load(const char *unused){;}
static int default_rebuild_header(struct sk_buff *skb) static int default_rebuild_header(struct sk_buff *skb)
{ {
printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", skb->dev ? skb->dev->name : "NULL!!!"); printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
skb->dev ? skb->dev->name : "NULL!!!");
kfree_skb(skb); kfree_skb(skb);
return 1; return 1;
} }
/** /**
* dev_open - prepare an interface for use. * dev_open - prepare an interface for use.
* @dev: device to open * @dev: device to open
* *
* Takes a device from down to up state. The device's private open * Takes a device from down to up state. The device's private open
...@@ -678,7 +673,6 @@ static int default_rebuild_header(struct sk_buff *skb) ...@@ -678,7 +673,6 @@ static int default_rebuild_header(struct sk_buff *skb)
* Calling this function on an active interface is a nop. On a failure * Calling this function on an active interface is a nop. On a failure
* a negative errno code is returned. * a negative errno code is returned.
*/ */
int dev_open(struct net_device *dev) int dev_open(struct net_device *dev)
{ {
int ret = 0; int ret = 0;
...@@ -687,7 +681,7 @@ int dev_open(struct net_device *dev) ...@@ -687,7 +681,7 @@ int dev_open(struct net_device *dev)
* Is it already up? * Is it already up?
*/ */
if (dev->flags&IFF_UP) if (dev->flags & IFF_UP)
return 0; return 0;
/* /*
...@@ -702,7 +696,7 @@ int dev_open(struct net_device *dev) ...@@ -702,7 +696,7 @@ int dev_open(struct net_device *dev)
if (try_inc_mod_count(dev->owner)) { if (try_inc_mod_count(dev->owner)) {
if (dev->open) { if (dev->open) {
ret = dev->open(dev); ret = dev->open(dev);
if (ret != 0 && dev->owner) if (ret && dev->owner)
__MOD_DEC_USE_COUNT(dev->owner); __MOD_DEC_USE_COUNT(dev->owner);
} }
} else { } else {
...@@ -712,9 +706,8 @@ int dev_open(struct net_device *dev) ...@@ -712,9 +706,8 @@ int dev_open(struct net_device *dev)
/* /*
* If it went open OK then: * If it went open OK then:
*/ */
if (ret == 0) if (!ret) {
{
/* /*
* Set the flags. * Set the flags.
*/ */
...@@ -723,7 +716,7 @@ int dev_open(struct net_device *dev) ...@@ -723,7 +716,7 @@ int dev_open(struct net_device *dev)
set_bit(__LINK_STATE_START, &dev->state); set_bit(__LINK_STATE_START, &dev->state);
/* /*
* Initialize multicasting status * Initialize multicasting status
*/ */
dev_mc_upload(dev); dev_mc_upload(dev);
...@@ -737,7 +730,7 @@ int dev_open(struct net_device *dev) ...@@ -737,7 +730,7 @@ int dev_open(struct net_device *dev)
*/ */
notifier_call_chain(&netdev_chain, NETDEV_UP, dev); notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
} }
return(ret); return ret;
} }
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
...@@ -747,7 +740,7 @@ static void dev_do_clear_fastroute(struct net_device *dev) ...@@ -747,7 +740,7 @@ static void dev_do_clear_fastroute(struct net_device *dev)
if (dev->accept_fastpath) { if (dev->accept_fastpath) {
int i; int i;
for (i=0; i<=NETDEV_FASTROUTE_HMASK; i++) { for (i = 0; i <= NETDEV_FASTROUTE_HMASK; i++) {
struct dst_entry *dst; struct dst_entry *dst;
write_lock_irq(&dev->fastpath_lock); write_lock_irq(&dev->fastpath_lock);
...@@ -777,15 +770,14 @@ void dev_clear_fastroute(struct net_device *dev) ...@@ -777,15 +770,14 @@ void dev_clear_fastroute(struct net_device *dev)
* dev_close - shutdown an interface. * dev_close - shutdown an interface.
* @dev: device to shutdown * @dev: device to shutdown
* *
* This function moves an active device into down state. A * This function moves an active device into down state. A
* %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
* is then deactivated and finally a %NETDEV_DOWN is sent to the notifier * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
* chain. * chain.
*/ */
int dev_close(struct net_device *dev) int dev_close(struct net_device *dev)
{ {
if (!(dev->flags&IFF_UP)) if (!(dev->flags & IFF_UP))
return 0; return 0;
/* /*
...@@ -818,7 +810,6 @@ int dev_close(struct net_device *dev) ...@@ -818,7 +810,6 @@ int dev_close(struct net_device *dev)
* We allow it to be called even after a DETACH hot-plug * We allow it to be called even after a DETACH hot-plug
* event. * event.
*/ */
if (dev->stop) if (dev->stop)
dev->stop(dev); dev->stop(dev);
...@@ -842,7 +833,7 @@ int dev_close(struct net_device *dev) ...@@ -842,7 +833,7 @@ int dev_close(struct net_device *dev)
if (dev->owner) if (dev->owner)
__MOD_DEC_USE_COUNT(dev->owner); __MOD_DEC_USE_COUNT(dev->owner);
return(0); return 0;
} }
...@@ -850,7 +841,7 @@ int dev_close(struct net_device *dev) ...@@ -850,7 +841,7 @@ int dev_close(struct net_device *dev)
* Device change register/unregister. These are not inline or static * Device change register/unregister. These are not inline or static
* as we export them to the world. * as we export them to the world.
*/ */
/** /**
* register_netdevice_notifier - register a network notifier block * register_netdevice_notifier - register a network notifier block
* @nb: notifier * @nb: notifier
...@@ -878,7 +869,7 @@ int register_netdevice_notifier(struct notifier_block *nb) ...@@ -878,7 +869,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
int unregister_netdevice_notifier(struct notifier_block *nb) int unregister_netdevice_notifier(struct notifier_block *nb)
{ {
return notifier_chain_unregister(&netdev_chain,nb); return notifier_chain_unregister(&netdev_chain, nb);
} }
/* /*
...@@ -892,16 +883,14 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) ...@@ -892,16 +883,14 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
do_gettimeofday(&skb->stamp); do_gettimeofday(&skb->stamp);
br_read_lock(BR_NETPROTO_LOCK); br_read_lock(BR_NETPROTO_LOCK);
for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next) for (ptype = ptype_all; ptype; ptype = ptype->next) {
{
/* Never send packets back to the socket /* Never send packets back to the socket
* they originated from - MvS (miquels@drinkel.ow.org) * they originated from - MvS (miquels@drinkel.ow.org)
*/ */
if ((ptype->dev == dev || !ptype->dev) && if ((ptype->dev == dev || !ptype->dev) &&
((struct sock *)ptype->data != skb->sk)) (struct sock *)ptype->data != skb->sk) {
{ struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
struct sk_buff *skb2; if (!skb2)
if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
break; break;
/* skb->nh should be correctly /* skb->nh should be correctly
...@@ -910,9 +899,12 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) ...@@ -910,9 +899,12 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
*/ */
skb2->mac.raw = skb2->data; skb2->mac.raw = skb2->data;
if (skb2->nh.raw < skb2->data || skb2->nh.raw > skb2->tail) { if (skb2->nh.raw < skb2->data ||
skb2->nh.raw > skb2->tail) {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_DEBUG "protocol %04x is buggy, dev %s\n", skb2->protocol, dev->name); printk(KERN_DEBUG "protocol %04x is "
"buggy, dev %s\n",
skb2->protocol, dev->name);
skb2->nh.raw = skb2->data; skb2->nh.raw = skb2->data;
} }
...@@ -928,12 +920,11 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) ...@@ -928,12 +920,11 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
* If it failed by some reason, ignore and send skb with wrong * If it failed by some reason, ignore and send skb with wrong
* checksum. * checksum.
*/ */
struct sk_buff * skb_checksum_help(struct sk_buff *skb) struct sk_buff *skb_checksum_help(struct sk_buff *skb)
{ {
int offset;
unsigned int csum; unsigned int csum;
int offset = skb->h.raw - skb->data;
offset = skb->h.raw - skb->data;
if (offset > (int)skb->len) if (offset > (int)skb->len)
BUG(); BUG();
csum = skb_checksum(skb, offset, skb->len-offset, 0); csum = skb_checksum(skb, offset, skb->len-offset, 0);
...@@ -941,7 +932,7 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb) ...@@ -941,7 +932,7 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb)
offset = skb->tail - skb->h.raw; offset = skb->tail - skb->h.raw;
if (offset <= 0) if (offset <= 0)
BUG(); BUG();
if (skb->csum+2 > offset) if (skb->csum + 2 > offset)
BUG(); BUG();
*(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
...@@ -955,15 +946,14 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb) ...@@ -955,15 +946,14 @@ struct sk_buff * skb_checksum_help(struct sk_buff *skb)
* 2. No high memory really exists on this machine. * 2. No high memory really exists on this machine.
*/ */
static inline int static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
illegal_highdma(struct net_device *dev, struct sk_buff *skb)
{ {
int i; int i;
if (dev->features&NETIF_F_HIGHDMA) if (dev->features & NETIF_F_HIGHDMA)
return 0; return 0;
for (i=0; i<skb_shinfo(skb)->nr_frags; i++) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
if (skb_shinfo(skb)->frags[i].page >= highmem_start_page) if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
return 1; return 1;
...@@ -976,10 +966,10 @@ illegal_highdma(struct net_device *dev, struct sk_buff *skb) ...@@ -976,10 +966,10 @@ illegal_highdma(struct net_device *dev, struct sk_buff *skb)
/** /**
* dev_queue_xmit - transmit a buffer * dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit * @skb: buffer to transmit
* *
* Queue a buffer for transmission to a network device. The caller must * Queue a buffer for transmission to a network device. The caller must
* have set the device and priority and built the buffer before calling this * have set the device and priority and built the buffer before calling
* function. The function can be called from an interrupt. * this function. The function can be called from an interrupt.
* *
* A negative errno code is returned on a failure. A success does not * A negative errno code is returned on a failure. A success does not
* guarantee the frame will be transmitted as it may be dropped due * guarantee the frame will be transmitted as it may be dropped due
...@@ -989,61 +979,60 @@ illegal_highdma(struct net_device *dev, struct sk_buff *skb) ...@@ -989,61 +979,60 @@ illegal_highdma(struct net_device *dev, struct sk_buff *skb)
int dev_queue_xmit(struct sk_buff *skb) int dev_queue_xmit(struct sk_buff *skb)
{ {
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
struct Qdisc *q; struct Qdisc *q;
int rc = -ENOMEM;
if (skb_shinfo(skb)->frag_list && if (skb_shinfo(skb)->frag_list &&
!(dev->features&NETIF_F_FRAGLIST) && !(dev->features & NETIF_F_FRAGLIST) &&
skb_linearize(skb, GFP_ATOMIC) != 0) { skb_linearize(skb, GFP_ATOMIC))
kfree_skb(skb); goto out_kfree_skb;
return -ENOMEM;
}
/* Fragmented skb is linearized if device does not support SG, /* Fragmented skb is linearized if device does not support SG,
* or if at least one of fragments is in highmem and device * or if at least one of fragments is in highmem and device
* does not support DMA from it. * does not support DMA from it.
*/ */
if (skb_shinfo(skb)->nr_frags && if (skb_shinfo(skb)->nr_frags &&
(!(dev->features&NETIF_F_SG) || illegal_highdma(dev, skb)) && (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
skb_linearize(skb, GFP_ATOMIC) != 0) { skb_linearize(skb, GFP_ATOMIC))
kfree_skb(skb); goto out_kfree_skb;
return -ENOMEM;
}
/* If packet is not checksummed and device does not support /* If packet is not checksummed and device does not support
* checksumming for this protocol, complete checksumming here. * checksumming for this protocol, complete checksumming here.
*/ */
if (skb->ip_summed == CHECKSUM_HW && if (skb->ip_summed == CHECKSUM_HW &&
(!(dev->features&(NETIF_F_HW_CSUM|NETIF_F_NO_CSUM)) && (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
(!(dev->features&NETIF_F_IP_CSUM) || (!(dev->features & NETIF_F_IP_CSUM) ||
skb->protocol != htons(ETH_P_IP)))) { skb->protocol != htons(ETH_P_IP)))) {
if ((skb = skb_checksum_help(skb)) == NULL) if ((skb = skb_checksum_help(skb)) == NULL)
return -ENOMEM; goto out;
} }
/* Grab device queue */ /* Grab device queue */
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
q = dev->qdisc; q = dev->qdisc;
if (q->enqueue) { if (q->enqueue) {
int ret = q->enqueue(skb, q); rc = q->enqueue(skb, q);
qdisc_run(dev); qdisc_run(dev);
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
return ret == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : ret; rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
goto out;
} }
/* The device has no queue. Common case for software devices: /* The device has no queue. Common case for software devices:
loopback, all the sorts of tunnels... loopback, all the sorts of tunnels...
Really, it is unlikely that xmit_lock protection is necessary here. Really, it is unlikely that xmit_lock protection is necessary here.
(f.e. loopback and IP tunnels are clean ignoring statistics counters.) (f.e. loopback and IP tunnels are clean ignoring statistics
counters.)
However, it is possible, that they rely on protection However, it is possible, that they rely on protection
made by us here. made by us here.
Check this and shot the lock. It is not prone from deadlocks. Check this and shot the lock. It is not prone from deadlocks.
Either shot noqueue qdisc, it is even simpler 8) Either shot noqueue qdisc, it is even simpler 8)
*/ */
if (dev->flags&IFF_UP) { if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (dev->xmit_lock_owner != cpu) { if (dev->xmit_lock_owner != cpu) {
...@@ -1059,30 +1048,36 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -1059,30 +1048,36 @@ int dev_queue_xmit(struct sk_buff *skb)
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
if (netdev_nit) if (netdev_nit)
dev_queue_xmit_nit(skb,dev); dev_queue_xmit_nit(skb, dev);
if (dev->hard_start_xmit(skb, dev) == 0) { rc = 0;
if (!dev->hard_start_xmit(skb, dev)) {
dev->xmit_lock_owner = -1; dev->xmit_lock_owner = -1;
spin_unlock_bh(&dev->xmit_lock); spin_unlock_bh(&dev->xmit_lock);
return 0; goto out;
} }
} }
dev->xmit_lock_owner = -1; dev->xmit_lock_owner = -1;
spin_unlock_bh(&dev->xmit_lock); spin_unlock_bh(&dev->xmit_lock);
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_DEBUG "Virtual device %s asks to queue packet!\n", dev->name); printk(KERN_DEBUG "Virtual device %s asks to "
kfree_skb(skb); "queue packet!\n", dev->name);
return -ENETDOWN; goto out_enetdown;
} else { } else {
/* Recursion is detected! It is possible, unfortunately */ /* Recursion is detected! It is possible,
* unfortunately */
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_DEBUG "Dead loop on virtual device %s, fix it urgently!\n", dev->name); printk(KERN_DEBUG "Dead loop on virtual device "
"%s, fix it urgently!\n", dev->name);
} }
} }
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
out_enetdown:
rc = -ENETDOWN;
out_kfree_skb:
kfree_skb(skb); kfree_skb(skb);
return -ENETDOWN; out:
return rc;
} }
...@@ -1107,7 +1102,7 @@ struct netif_rx_stats netdev_rx_stat[NR_CPUS]; ...@@ -1107,7 +1102,7 @@ struct netif_rx_stats netdev_rx_stat[NR_CPUS];
#ifdef CONFIG_NET_HW_FLOWCONTROL #ifdef CONFIG_NET_HW_FLOWCONTROL
atomic_t netdev_dropping = ATOMIC_INIT(0); atomic_t netdev_dropping = ATOMIC_INIT(0);
static unsigned long netdev_fc_mask = 1; static unsigned long netdev_fc_mask = 1;
unsigned long netdev_fc_xoff = 0; unsigned long netdev_fc_xoff;
spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED; spinlock_t netdev_fc_lock = SPIN_LOCK_UNLOCKED;
static struct static struct
...@@ -1116,7 +1111,8 @@ static struct ...@@ -1116,7 +1111,8 @@ static struct
struct net_device *dev; struct net_device *dev;
} netdev_fc_slots[BITS_PER_LONG]; } netdev_fc_slots[BITS_PER_LONG];
int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev)) int netdev_register_fc(struct net_device *dev,
void (*stimul)(struct net_device *dev))
{ {
int bit = 0; int bit = 0;
unsigned long flags; unsigned long flags;
...@@ -1156,7 +1152,7 @@ static void netdev_wakeup(void) ...@@ -1156,7 +1152,7 @@ static void netdev_wakeup(void)
netdev_fc_xoff = 0; netdev_fc_xoff = 0;
while (xoff) { while (xoff) {
int i = ffz(~xoff); int i = ffz(~xoff);
xoff &= ~(1<<i); xoff &= ~(1 << i);
netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev); netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
} }
spin_unlock(&netdev_fc_lock); spin_unlock(&netdev_fc_lock);
...@@ -1172,7 +1168,7 @@ static void get_sample_stats(int cpu) ...@@ -1172,7 +1168,7 @@ static void get_sample_stats(int cpu)
int blog = softnet_data[cpu].input_pkt_queue.qlen; int blog = softnet_data[cpu].input_pkt_queue.qlen;
int avg_blog = softnet_data[cpu].avg_blog; int avg_blog = softnet_data[cpu].avg_blog;
avg_blog = (avg_blog >> 1)+ (blog >> 1); avg_blog = (avg_blog >> 1) + (blog >> 1);
if (avg_blog > mod_cong) { if (avg_blog > mod_cong) {
/* Above moderate congestion levels. */ /* Above moderate congestion levels. */
...@@ -1191,7 +1187,7 @@ static void get_sample_stats(int cpu) ...@@ -1191,7 +1187,7 @@ static void get_sample_stats(int cpu)
if (rq < avg_blog) /* unlucky bastard */ if (rq < avg_blog) /* unlucky bastard */
softnet_data[cpu].cng_level = NET_RX_CN_HIGH; softnet_data[cpu].cng_level = NET_RX_CN_HIGH;
#endif #endif
} else if (avg_blog > no_cong) } else if (avg_blog > no_cong)
softnet_data[cpu].cng_level = NET_RX_CN_LOW; softnet_data[cpu].cng_level = NET_RX_CN_LOW;
else /* no congestion */ else /* no congestion */
softnet_data[cpu].cng_level = NET_RX_SUCCESS; softnet_data[cpu].cng_level = NET_RX_SUCCESS;
...@@ -1219,17 +1215,16 @@ static void sample_queue(unsigned long dummy) ...@@ -1219,17 +1215,16 @@ static void sample_queue(unsigned long dummy)
* *
* This function receives a packet from a device driver and queues it for * This function receives a packet from a device driver and queues it for
* the upper (protocol) levels to process. It always succeeds. The buffer * the upper (protocol) levels to process. It always succeeds. The buffer
* may be dropped during processing for congestion control or by the * may be dropped during processing for congestion control or by the
* protocol layers. * protocol layers.
* *
* return values: * return values:
* NET_RX_SUCCESS (no congestion) * NET_RX_SUCCESS (no congestion)
* NET_RX_CN_LOW (low congestion) * NET_RX_CN_LOW (low congestion)
* NET_RX_CN_MOD (moderate congestion) * NET_RX_CN_MOD (moderate congestion)
* NET_RX_CN_HIGH (high congestion) * NET_RX_CN_HIGH (high congestion)
* NET_RX_DROP (packet was dropped) * NET_RX_DROP (packet was dropped)
* *
*
*/ */
int netif_rx(struct sk_buff *skb) int netif_rx(struct sk_buff *skb)
...@@ -1238,7 +1233,7 @@ int netif_rx(struct sk_buff *skb) ...@@ -1238,7 +1233,7 @@ int netif_rx(struct sk_buff *skb)
struct softnet_data *queue; struct softnet_data *queue;
unsigned long flags; unsigned long flags;
if (skb->stamp.tv_sec == 0) if (!skb->stamp.tv_sec)
do_gettimeofday(&skb->stamp); do_gettimeofday(&skb->stamp);
/* The code is rearranged so that the path is the most /* The code is rearranged so that the path is the most
...@@ -1256,7 +1251,7 @@ int netif_rx(struct sk_buff *skb) ...@@ -1256,7 +1251,7 @@ int netif_rx(struct sk_buff *skb)
enqueue: enqueue:
dev_hold(skb->dev); dev_hold(skb->dev);
__skb_queue_tail(&queue->input_pkt_queue,skb); __skb_queue_tail(&queue->input_pkt_queue, skb);
local_irq_restore(flags); local_irq_restore(flags);
#ifndef OFFLINE_SAMPLE #ifndef OFFLINE_SAMPLE
get_sample_stats(this_cpu); get_sample_stats(this_cpu);
...@@ -1276,7 +1271,7 @@ int netif_rx(struct sk_buff *skb) ...@@ -1276,7 +1271,7 @@ int netif_rx(struct sk_buff *skb)
goto enqueue; goto enqueue;
} }
if (queue->throttle == 0) { if (!queue->throttle) {
queue->throttle = 1; queue->throttle = 1;
netdev_rx_stat[this_cpu].throttled++; netdev_rx_stat[this_cpu].throttled++;
#ifdef CONFIG_NET_HW_FLOWCONTROL #ifdef CONFIG_NET_HW_FLOWCONTROL
...@@ -1295,21 +1290,19 @@ int netif_rx(struct sk_buff *skb) ...@@ -1295,21 +1290,19 @@ int netif_rx(struct sk_buff *skb)
/* Deliver skb to an old protocol, which is not threaded well /* Deliver skb to an old protocol, which is not threaded well
or which do not understand shared skbs. or which do not understand shared skbs.
*/ */
static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int last) static int deliver_to_old_ones(struct packet_type *pt,
struct sk_buff *skb, int last)
{ {
static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED; static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED;
int ret = NET_RX_DROP; int ret = NET_RX_DROP;
if (!last) { if (!last) {
skb = skb_clone(skb, GFP_ATOMIC); skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL) if (!skb)
return ret; goto out;
}
if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) {
kfree_skb(skb);
return ret;
} }
if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC))
goto out_kfree;
/* The assumption (correct one) is that old protocols /* The assumption (correct one) is that old protocols
did not depened on BHs different of NET_BH and TIMER_BH. did not depened on BHs different of NET_BH and TIMER_BH.
...@@ -1325,7 +1318,11 @@ static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int ...@@ -1325,7 +1318,11 @@ static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int
tasklet_hi_enable(bh_task_vec+TIMER_BH); tasklet_hi_enable(bh_task_vec+TIMER_BH);
spin_unlock(&net_bh_lock); spin_unlock(&net_bh_lock);
out:
return ret; return ret;
out_kfree:
kfree_skb(skb);
goto out;
} }
static __inline__ void skb_bond(struct sk_buff *skb) static __inline__ void skb_bond(struct sk_buff *skb)
...@@ -1348,11 +1345,11 @@ static void net_tx_action(struct softirq_action *h) ...@@ -1348,11 +1345,11 @@ static void net_tx_action(struct softirq_action *h)
softnet_data[cpu].completion_queue = NULL; softnet_data[cpu].completion_queue = NULL;
local_irq_enable(); local_irq_enable();
while (clist != NULL) { while (clist) {
struct sk_buff *skb = clist; struct sk_buff *skb = clist;
clist = clist->next; clist = clist->next;
BUG_TRAP(atomic_read(&skb->users) == 0); BUG_TRAP(!atomic_read(&skb->users));
__kfree_skb(skb); __kfree_skb(skb);
} }
} }
...@@ -1365,7 +1362,7 @@ static void net_tx_action(struct softirq_action *h) ...@@ -1365,7 +1362,7 @@ static void net_tx_action(struct softirq_action *h)
softnet_data[cpu].output_queue = NULL; softnet_data[cpu].output_queue = NULL;
local_irq_enable(); local_irq_enable();
while (head != NULL) { while (head) {
struct net_device *dev = head; struct net_device *dev = head;
head = head->next_sched; head = head->next_sched;
...@@ -1389,7 +1386,6 @@ static void net_tx_action(struct softirq_action *h) ...@@ -1389,7 +1386,6 @@ static void net_tx_action(struct softirq_action *h)
* Make a function call that is atomic with respect to the protocol * Make a function call that is atomic with respect to the protocol
* layers. * layers.
*/ */
void net_call_rx_atomic(void (*fn)(void)) void net_call_rx_atomic(void (*fn)(void))
{ {
br_write_lock_bh(BR_NETPROTO_LOCK); br_write_lock_bh(BR_NETPROTO_LOCK);
...@@ -1421,11 +1417,12 @@ static __inline__ int handle_bridge(struct sk_buff *skb, ...@@ -1421,11 +1417,12 @@ static __inline__ int handle_bridge(struct sk_buff *skb,
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
static inline void handle_diverter(struct sk_buff *skb) static inline int handle_diverter(struct sk_buff *skb)
{ {
/* if diversion is supported on device, then divert */ /* if diversion is supported on device, then divert */
if (skb->dev->divert && skb->dev->divert->divert) if (skb->dev->divert && skb->dev->divert->divert)
divert_frame(skb); divert_frame(skb);
return 0;
} }
#endif /* CONFIG_NET_DIVERT */ #endif /* CONFIG_NET_DIVERT */
...@@ -1435,7 +1432,7 @@ int netif_receive_skb(struct sk_buff *skb) ...@@ -1435,7 +1432,7 @@ int netif_receive_skb(struct sk_buff *skb)
int ret = NET_RX_DROP; int ret = NET_RX_DROP;
unsigned short type = skb->protocol; unsigned short type = skb->protocol;
if (skb->stamp.tv_sec == 0) if (!skb->stamp.tv_sec)
do_gettimeofday(&skb->stamp); do_gettimeofday(&skb->stamp);
skb_bond(skb); skb_bond(skb);
...@@ -1456,10 +1453,12 @@ int netif_receive_skb(struct sk_buff *skb) ...@@ -1456,10 +1453,12 @@ int netif_receive_skb(struct sk_buff *skb)
if (!ptype->dev || ptype->dev == skb->dev) { if (!ptype->dev || ptype->dev == skb->dev) {
if (pt_prev) { if (pt_prev) {
if (!pt_prev->data) { if (!pt_prev->data) {
ret = deliver_to_old_ones(pt_prev, skb, 0); ret = deliver_to_old_ones(pt_prev,
skb, 0);
} else { } else {
atomic_inc(&skb->users); atomic_inc(&skb->users);
ret = pt_prev->func(skb, skb->dev, pt_prev); ret = pt_prev->func(skb, skb->dev,
pt_prev);
} }
} }
pt_prev = ptype; pt_prev = ptype;
...@@ -1470,23 +1469,24 @@ int netif_receive_skb(struct sk_buff *skb) ...@@ -1470,23 +1469,24 @@ int netif_receive_skb(struct sk_buff *skb)
if (skb->dev->divert && skb->dev->divert->divert) if (skb->dev->divert && skb->dev->divert->divert)
ret = handle_diverter(skb); ret = handle_diverter(skb);
#endif /* CONFIG_NET_DIVERT */ #endif /* CONFIG_NET_DIVERT */
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
if (skb->dev->br_port != NULL && if (skb->dev->br_port && br_handle_frame_hook) {
br_handle_frame_hook != NULL) {
return handle_bridge(skb, pt_prev); return handle_bridge(skb, pt_prev);
} }
#endif #endif
for (ptype=ptype_base[ntohs(type)&15];ptype;ptype=ptype->next) { for (ptype = ptype_base[ntohs(type) & 15]; ptype; ptype = ptype->next) {
if (ptype->type == type && if (ptype->type == type &&
(!ptype->dev || ptype->dev == skb->dev)) { (!ptype->dev || ptype->dev == skb->dev)) {
if (pt_prev) { if (pt_prev) {
if (!pt_prev->data) { if (!pt_prev->data) {
ret = deliver_to_old_ones(pt_prev, skb, 0); ret = deliver_to_old_ones(pt_prev,
skb, 0);
} else { } else {
atomic_inc(&skb->users); atomic_inc(&skb->users);
ret = pt_prev->func(skb, skb->dev, pt_prev); ret = pt_prev->func(skb, skb->dev,
pt_prev);
} }
} }
pt_prev = ptype; pt_prev = ptype;
...@@ -1524,7 +1524,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) ...@@ -1524,7 +1524,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
local_irq_disable(); local_irq_disable();
skb = __skb_dequeue(&queue->input_pkt_queue); skb = __skb_dequeue(&queue->input_pkt_queue);
if (skb == NULL) if (!skb)
goto job_done; goto job_done;
local_irq_enable(); local_irq_enable();
...@@ -1540,7 +1540,8 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) ...@@ -1540,7 +1540,8 @@ static int process_backlog(struct net_device *backlog_dev, int *budget)
break; break;
#ifdef CONFIG_NET_HW_FLOWCONTROL #ifdef CONFIG_NET_HW_FLOWCONTROL
if (queue->throttle && queue->input_pkt_queue.qlen < no_cong_thresh ) { if (queue->throttle &&
queue->input_pkt_queue.qlen < no_cong_thresh ) {
if (atomic_dec_and_test(&netdev_dropping)) { if (atomic_dec_and_test(&netdev_dropping)) {
queue->throttle = 0; queue->throttle = 0;
netdev_wakeup(); netdev_wakeup();
...@@ -1590,7 +1591,8 @@ static void net_rx_action(struct softirq_action *h) ...@@ -1590,7 +1591,8 @@ static void net_rx_action(struct softirq_action *h)
local_irq_enable(); local_irq_enable();
dev = list_entry(queue->poll_list.next, struct net_device, poll_list); dev = list_entry(queue->poll_list.next,
struct net_device, poll_list);
if (dev->quota <= 0 || dev->poll(dev, &budget)) { if (dev->quota <= 0 || dev->poll(dev, &budget)) {
local_irq_disable(); local_irq_disable();
...@@ -1605,7 +1607,7 @@ static void net_rx_action(struct softirq_action *h) ...@@ -1605,7 +1607,7 @@ static void net_rx_action(struct softirq_action *h)
local_irq_disable(); local_irq_disable();
} }
} }
out:
local_irq_enable(); local_irq_enable();
br_read_unlock(BR_NETPROTO_LOCK); br_read_unlock(BR_NETPROTO_LOCK);
return; return;
...@@ -1613,9 +1615,7 @@ static void net_rx_action(struct softirq_action *h) ...@@ -1613,9 +1615,7 @@ static void net_rx_action(struct softirq_action *h)
softnet_break: softnet_break:
netdev_rx_stat[this_cpu].time_squeeze++; netdev_rx_stat[this_cpu].time_squeeze++;
__cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ); __cpu_raise_softirq(this_cpu, NET_RX_SOFTIRQ);
goto out;
local_irq_enable();
br_read_unlock(BR_NETPROTO_LOCK);
} }
static gifconf_func_t * gifconf_list [NPROTO]; static gifconf_func_t * gifconf_list [NPROTO];
...@@ -1629,10 +1629,9 @@ static gifconf_func_t * gifconf_list [NPROTO]; ...@@ -1629,10 +1629,9 @@ static gifconf_func_t * gifconf_list [NPROTO];
* that is passed must not be freed or reused until it has been replaced * that is passed must not be freed or reused until it has been replaced
* by another handler. * by another handler.
*/ */
int register_gifconf(unsigned int family, gifconf_func_t * gifconf) int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
{ {
if (family>=NPROTO) if (family >= NPROTO)
return -EINVAL; return -EINVAL;
gifconf_list[family] = gifconf; gifconf_list[family] = gifconf;
return 0; return 0;
...@@ -1656,9 +1655,9 @@ static int dev_ifname(struct ifreq *arg) ...@@ -1656,9 +1655,9 @@ static int dev_ifname(struct ifreq *arg)
struct ifreq ifr; struct ifreq ifr;
/* /*
* Fetch the caller's info block. * Fetch the caller's info block.
*/ */
if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT; return -EFAULT;
...@@ -1693,9 +1692,9 @@ static int dev_ifconf(char *arg) ...@@ -1693,9 +1692,9 @@ static int dev_ifconf(char *arg)
int i; int i;
/* /*
* Fetch the caller's info block. * Fetch the caller's info block.
*/ */
if (copy_from_user(&ifc, arg, sizeof(struct ifconf))) if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
return -EFAULT; return -EFAULT;
...@@ -1703,39 +1702,35 @@ static int dev_ifconf(char *arg) ...@@ -1703,39 +1702,35 @@ static int dev_ifconf(char *arg)
len = ifc.ifc_len; len = ifc.ifc_len;
/* /*
* Loop over the interfaces, and write an info block for each. * Loop over the interfaces, and write an info block for each.
*/ */
total = 0; total = 0;
for (dev = dev_base; dev != NULL; dev = dev->next) { for (dev = dev_base; dev; dev = dev->next) {
for (i=0; i<NPROTO; i++) { for (i = 0; i < NPROTO; i++) {
if (gifconf_list[i]) { if (gifconf_list[i]) {
int done; int done;
if (pos==NULL) { if (!pos)
done = gifconf_list[i](dev, NULL, 0); done = gifconf_list[i](dev, NULL, 0);
} else { else
done = gifconf_list[i](dev, pos+total, len-total); done = gifconf_list[i](dev, pos + total,
} len - total);
if (done<0) { if (done < 0)
return -EFAULT; return -EFAULT;
}
total += done; total += done;
} }
} }
} }
/* /*
* All done. Write the updated control block back to the caller. * All done. Write the updated control block back to the caller.
*/ */
ifc.ifc_len = total; ifc.ifc_len = total;
if (copy_to_user(arg, &ifc, sizeof(struct ifconf))) /*
return -EFAULT;
/*
* Both BSD and Solaris return 0 here, so we do too. * Both BSD and Solaris return 0 here, so we do too.
*/ */
return 0; return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
} }
/* /*
...@@ -1747,36 +1742,39 @@ static int dev_ifconf(char *arg) ...@@ -1747,36 +1742,39 @@ static int dev_ifconf(char *arg)
static int sprintf_stats(char *buffer, struct net_device *dev) static int sprintf_stats(char *buffer, struct net_device *dev)
{ {
struct net_device_stats *stats = (dev->get_stats ? dev->get_stats(dev): NULL); struct net_device_stats *stats = dev->get_stats ? dev->get_stats(dev) :
NULL;
int size; int size;
if (stats) if (stats)
size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu %8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", size = sprintf(buffer, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu "
"%10lu %9lu %8lu %7lu %4lu %4lu %4lu "
"%5lu %7lu %10lu\n",
dev->name, dev->name,
stats->rx_bytes, stats->rx_bytes,
stats->rx_packets, stats->rx_errors, stats->rx_packets, stats->rx_errors,
stats->rx_dropped + stats->rx_missed_errors, stats->rx_dropped + stats->rx_missed_errors,
stats->rx_fifo_errors, stats->rx_fifo_errors,
stats->rx_length_errors + stats->rx_over_errors stats->rx_length_errors + stats->rx_over_errors +
+ stats->rx_crc_errors + stats->rx_frame_errors, stats->rx_crc_errors + stats->rx_frame_errors,
stats->rx_compressed, stats->multicast, stats->rx_compressed, stats->multicast,
stats->tx_bytes, stats->tx_bytes,
stats->tx_packets, stats->tx_errors, stats->tx_dropped, stats->tx_packets, stats->tx_errors, stats->tx_dropped,
stats->tx_fifo_errors, stats->collisions, stats->tx_fifo_errors, stats->collisions,
stats->tx_carrier_errors + stats->tx_aborted_errors stats->tx_carrier_errors + stats->tx_aborted_errors +
+ stats->tx_window_errors + stats->tx_heartbeat_errors, stats->tx_window_errors + stats->tx_heartbeat_errors,
stats->tx_compressed); stats->tx_compressed);
else else
size = sprintf(buffer, "%6s: No statistics available.\n", dev->name); size = sprintf(buffer, "%6s: No statistics available.\n",
dev->name);
return size; return size;
} }
/* /*
* Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface * Called from the PROCfs module. This now uses the new arbitrary sized
* to create /proc/net/dev * /proc/net interface to create /proc/net/dev
*/ */
static int dev_get_info(char *buffer, char **start, off_t offset, int length) static int dev_get_info(char *buffer, char **start, off_t offset, int length)
{ {
int len = 0; int len = 0;
...@@ -1785,21 +1783,19 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -1785,21 +1783,19 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
int size; int size;
struct net_device *dev; struct net_device *dev;
size = sprintf(buffer,
size = sprintf(buffer,
"Inter-| Receive | Transmit\n" "Inter-| Receive | Transmit\n"
" face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n"); " face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n");
pos += size; pos += size;
len += size; len += size;
read_lock(&dev_base_lock); read_lock(&dev_base_lock);
for (dev = dev_base; dev != NULL; dev = dev->next) { for (dev = dev_base; dev; dev = dev->next) {
size = sprintf_stats(buffer+len, dev); size = sprintf_stats(buffer+len, dev);
len += size; len += size;
pos = begin + len; pos = begin + len;
if (pos < offset) { if (pos < offset) {
len = 0; len = 0;
begin = pos; begin = pos;
...@@ -1810,7 +1806,7 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -1810,7 +1806,7 @@ static int dev_get_info(char *buffer, char **start, off_t offset, int length)
read_unlock(&dev_base_lock); read_unlock(&dev_base_lock);
*start = buffer + (offset - begin); /* Start of wanted data */ *start = buffer + (offset - begin); /* Start of wanted data */
len -= (offset - begin); /* Start slop */ len -= offset - begin; /* Start slop */
if (len > length) if (len > length)
len = length; /* Ending slop */ len = length; /* Ending slop */
if (len < 0) if (len < 0)
...@@ -1822,11 +1818,12 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset, ...@@ -1822,11 +1818,12 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
int length, int *eof, void *data) int length, int *eof, void *data)
{ {
int i, lcpu; int i, lcpu;
int len=0; int len = 0;
for (lcpu=0; lcpu<smp_num_cpus; lcpu++) { for (lcpu = 0; lcpu < smp_num_cpus; lcpu++) {
i = cpu_logical_map(lcpu); i = cpu_logical_map(lcpu);
len += sprintf(buffer+len, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", len += sprintf(buffer + len, "%08x %08x %08x %08x %08x %08x "
"%08x %08x %08x\n",
netdev_rx_stat[i].total, netdev_rx_stat[i].total,
netdev_rx_stat[i].dropped, netdev_rx_stat[i].dropped,
netdev_rx_stat[i].time_squeeze, netdev_rx_stat[i].time_squeeze,
...@@ -1870,7 +1867,6 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset, ...@@ -1870,7 +1867,6 @@ static int dev_proc_stats(char *buffer, char **start, off_t offset,
* are adjusted, %RTM_NEWLINK is sent to the routing socket and the * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
* function returns zero. * function returns zero.
*/ */
int netdev_set_master(struct net_device *slave, struct net_device *master) int netdev_set_master(struct net_device *slave, struct net_device *master)
{ {
struct net_device *old = slave->master; struct net_device *old = slave->master;
...@@ -1909,7 +1905,6 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) ...@@ -1909,7 +1905,6 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
* the device reverts back to normal filtering operation. A negative inc * the device reverts back to normal filtering operation. A negative inc
* value is used to drop promiscuity on the device. * value is used to drop promiscuity on the device.
*/ */
void dev_set_promiscuity(struct net_device *dev, int inc) void dev_set_promiscuity(struct net_device *dev, int inc)
{ {
unsigned short old_flags = dev->flags; unsigned short old_flags = dev->flags;
...@@ -1917,9 +1912,9 @@ void dev_set_promiscuity(struct net_device *dev, int inc) ...@@ -1917,9 +1912,9 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
dev->flags |= IFF_PROMISC; dev->flags |= IFF_PROMISC;
if ((dev->promiscuity += inc) == 0) if ((dev->promiscuity += inc) == 0)
dev->flags &= ~IFF_PROMISC; dev->flags &= ~IFF_PROMISC;
if (dev->flags^old_flags) { if (dev->flags ^ old_flags) {
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
if (dev->flags&IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
netdev_fastroute_obstacles++; netdev_fastroute_obstacles++;
dev_clear_fastroute(dev); dev_clear_fastroute(dev);
} else } else
...@@ -1927,7 +1922,8 @@ void dev_set_promiscuity(struct net_device *dev, int inc) ...@@ -1927,7 +1922,8 @@ void dev_set_promiscuity(struct net_device *dev, int inc)
#endif #endif
dev_mc_upload(dev); dev_mc_upload(dev);
printk(KERN_INFO "device %s %s promiscuous mode\n", printk(KERN_INFO "device %s %s promiscuous mode\n",
dev->name, (dev->flags&IFF_PROMISC) ? "entered" : "left"); dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
"left");
} }
} }
...@@ -1950,7 +1946,7 @@ void dev_set_allmulti(struct net_device *dev, int inc) ...@@ -1950,7 +1946,7 @@ void dev_set_allmulti(struct net_device *dev, int inc)
dev->flags |= IFF_ALLMULTI; dev->flags |= IFF_ALLMULTI;
if ((dev->allmulti += inc) == 0) if ((dev->allmulti += inc) == 0)
dev->flags &= ~IFF_ALLMULTI; dev->flags &= ~IFF_ALLMULTI;
if (dev->flags^old_flags) if (dev->flags ^ old_flags)
dev_mc_upload(dev); dev_mc_upload(dev);
} }
...@@ -1963,13 +1959,15 @@ int dev_change_flags(struct net_device *dev, unsigned flags) ...@@ -1963,13 +1959,15 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
* Set the flags on our device. * Set the flags on our device.
*/ */
dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_NOARP|IFF_DYNAMIC| dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
(dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI)); IFF_AUTOMEDIA)) |
(dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
IFF_ALLMULTI));
/* /*
* Load in the correct multicast list now the flags have changed. * Load in the correct multicast list now the flags have changed.
*/ */
dev_mc_upload(dev); dev_mc_upload(dev);
...@@ -1980,20 +1978,20 @@ int dev_change_flags(struct net_device *dev, unsigned flags) ...@@ -1980,20 +1978,20 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
*/ */
ret = 0; ret = 0;
if ((old_flags^flags)&IFF_UP) /* Bit is different ? */ if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
{
ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev); ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
if (ret == 0) if (!ret)
dev_mc_upload(dev); dev_mc_upload(dev);
} }
if (dev->flags&IFF_UP && if (dev->flags & IFF_UP &&
((old_flags^dev->flags)&~(IFF_UP|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE))) ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_VOLATILE)))
notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev); notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
if ((flags^dev->gflags)&IFF_PROMISC) { if ((flags ^ dev->gflags) & IFF_PROMISC) {
int inc = (flags&IFF_PROMISC) ? +1 : -1; int inc = (flags & IFF_PROMISC) ? +1 : -1;
dev->gflags ^= IFF_PROMISC; dev->gflags ^= IFF_PROMISC;
dev_set_promiscuity(dev, inc); dev_set_promiscuity(dev, inc);
} }
...@@ -2002,53 +2000,56 @@ int dev_change_flags(struct net_device *dev, unsigned flags) ...@@ -2002,53 +2000,56 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
is important. Some (broken) drivers set IFF_PROMISC, when is important. Some (broken) drivers set IFF_PROMISC, when
IFF_ALLMULTI is requested not asking us and not reporting. IFF_ALLMULTI is requested not asking us and not reporting.
*/ */
if ((flags^dev->gflags)&IFF_ALLMULTI) { if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
int inc = (flags&IFF_ALLMULTI) ? +1 : -1; int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
dev->gflags ^= IFF_ALLMULTI; dev->gflags ^= IFF_ALLMULTI;
dev_set_allmulti(dev, inc); dev_set_allmulti(dev, inc);
} }
if (old_flags^dev->flags) if (old_flags ^ dev->flags)
rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags^dev->flags); rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
return ret; return ret;
} }
/* /*
* Perform the SIOCxIFxxx calls. * Perform the SIOCxIFxxx calls.
*/ */
static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
{ {
struct net_device *dev;
int err; int err;
struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL) if (!dev)
return -ENODEV; return -ENODEV;
switch(cmd) switch (cmd) {
{
case SIOCGIFFLAGS: /* Get interface flags */ case SIOCGIFFLAGS: /* Get interface flags */
ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI|IFF_RUNNING)) ifr->ifr_flags = (dev->flags & ~(IFF_PROMISC |
|(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI)); IFF_ALLMULTI |
IFF_RUNNING)) |
(dev->gflags & (IFF_PROMISC |
IFF_ALLMULTI));
if (netif_running(dev) && netif_carrier_ok(dev)) if (netif_running(dev) && netif_carrier_ok(dev))
ifr->ifr_flags |= IFF_RUNNING; ifr->ifr_flags |= IFF_RUNNING;
return 0; return 0;
case SIOCSIFFLAGS: /* Set interface flags */ case SIOCSIFFLAGS: /* Set interface flags */
return dev_change_flags(dev, ifr->ifr_flags); return dev_change_flags(dev, ifr->ifr_flags);
case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ case SIOCGIFMETRIC: /* Get the metric on the interface
(currently unused) */
ifr->ifr_metric = 0; ifr->ifr_metric = 0;
return 0; return 0;
case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ case SIOCSIFMETRIC: /* Set the metric on the interface
(currently unused) */
return -EOPNOTSUPP; return -EOPNOTSUPP;
case SIOCGIFMTU: /* Get the MTU of a device */ case SIOCGIFMTU: /* Get the MTU of a device */
ifr->ifr_mtu = dev->mtu; ifr->ifr_mtu = dev->mtu;
return 0; return 0;
case SIOCSIFMTU: /* Set the MTU of a device */ case SIOCSIFMTU: /* Set the MTU of a device */
if (ifr->ifr_mtu == dev->mtu) if (ifr->ifr_mtu == dev->mtu)
return 0; return 0;
...@@ -2056,80 +2057,85 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -2056,80 +2057,85 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
/* /*
* MTU must be positive. * MTU must be positive.
*/ */
if (ifr->ifr_mtu < 0)
if (ifr->ifr_mtu<0)
return -EINVAL; return -EINVAL;
if (!netif_device_present(dev)) if (!netif_device_present(dev))
return -ENODEV; return -ENODEV;
err = 0;
if (dev->change_mtu) if (dev->change_mtu)
err = dev->change_mtu(dev, ifr->ifr_mtu); err = dev->change_mtu(dev, ifr->ifr_mtu);
else { else
dev->mtu = ifr->ifr_mtu; dev->mtu = ifr->ifr_mtu;
err = 0; if (!err && dev->flags & IFF_UP)
} notifier_call_chain(&netdev_chain,
if (!err && dev->flags&IFF_UP) NETDEV_CHANGEMTU, dev);
notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev);
return err; return err;
case SIOCGIFHWADDR: case SIOCGIFHWADDR:
memcpy(ifr->ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN); memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
ifr->ifr_hwaddr.sa_family=dev->type; MAX_ADDR_LEN);
ifr->ifr_hwaddr.sa_family = dev->type;
return 0; return 0;
case SIOCSIFHWADDR: case SIOCSIFHWADDR:
if (dev->set_mac_address == NULL) if (!dev->set_mac_address)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (ifr->ifr_hwaddr.sa_family!=dev->type) if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL; return -EINVAL;
if (!netif_device_present(dev)) if (!netif_device_present(dev))
return -ENODEV; return -ENODEV;
err = dev->set_mac_address(dev, &ifr->ifr_hwaddr); err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
if (!err) if (!err)
notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev); notifier_call_chain(&netdev_chain,
NETDEV_CHANGEADDR, dev);
return err; return err;
case SIOCSIFHWBROADCAST: case SIOCSIFHWBROADCAST:
if (ifr->ifr_hwaddr.sa_family!=dev->type) if (ifr->ifr_hwaddr.sa_family != dev->type)
return -EINVAL; return -EINVAL;
memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN); memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev); MAX_ADDR_LEN);
notifier_call_chain(&netdev_chain,
NETDEV_CHANGEADDR, dev);
return 0; return 0;
case SIOCGIFMAP: case SIOCGIFMAP:
ifr->ifr_map.mem_start=dev->mem_start; ifr->ifr_map.mem_start = dev->mem_start;
ifr->ifr_map.mem_end=dev->mem_end; ifr->ifr_map.mem_end = dev->mem_end;
ifr->ifr_map.base_addr=dev->base_addr; ifr->ifr_map.base_addr = dev->base_addr;
ifr->ifr_map.irq=dev->irq; ifr->ifr_map.irq = dev->irq;
ifr->ifr_map.dma=dev->dma; ifr->ifr_map.dma = dev->dma;
ifr->ifr_map.port=dev->if_port; ifr->ifr_map.port = dev->if_port;
return 0; return 0;
case SIOCSIFMAP: case SIOCSIFMAP:
if (dev->set_config) { if (dev->set_config) {
if (!netif_device_present(dev)) if (!netif_device_present(dev))
return -ENODEV; return -ENODEV;
return dev->set_config(dev,&ifr->ifr_map); return dev->set_config(dev, &ifr->ifr_map);
} }
return -EOPNOTSUPP; return -EOPNOTSUPP;
case SIOCADDMULTI: case SIOCADDMULTI:
if (dev->set_multicast_list == NULL || if (!dev->set_multicast_list ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC) ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL; return -EINVAL;
if (!netif_device_present(dev)) if (!netif_device_present(dev))
return -ENODEV; return -ENODEV;
dev_mc_add(dev,ifr->ifr_hwaddr.sa_data, dev->addr_len, 1); dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
dev->addr_len, 1);
return 0; return 0;
case SIOCDELMULTI: case SIOCDELMULTI:
if (dev->set_multicast_list == NULL || if (!dev->set_multicast_list ||
ifr->ifr_hwaddr.sa_family!=AF_UNSPEC) ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL; return -EINVAL;
if (!netif_device_present(dev)) if (!netif_device_present(dev))
return -ENODEV; return -ENODEV;
dev_mc_delete(dev,ifr->ifr_hwaddr.sa_data,dev->addr_len, 1); dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
dev->addr_len, 1);
return 0; return 0;
case SIOCGIFINDEX: case SIOCGIFINDEX:
...@@ -2141,19 +2147,20 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -2141,19 +2147,20 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return 0; return 0;
case SIOCSIFTXQLEN: case SIOCSIFTXQLEN:
if (ifr->ifr_qlen<0) if (ifr->ifr_qlen < 0)
return -EINVAL; return -EINVAL;
dev->tx_queue_len = ifr->ifr_qlen; dev->tx_queue_len = ifr->ifr_qlen;
return 0; return 0;
case SIOCSIFNAME: case SIOCSIFNAME:
if (dev->flags&IFF_UP) if (dev->flags & IFF_UP)
return -EBUSY; return -EBUSY;
if (__dev_get_by_name(ifr->ifr_newname)) if (__dev_get_by_name(ifr->ifr_newname))
return -EEXIST; return -EEXIST;
memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ); memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ);
dev->name[IFNAMSIZ-1] = 0; dev->name[IFNAMSIZ - 1] = 0;
notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev); notifier_call_chain(&netdev_chain,
NETDEV_CHANGENAME, dev);
return 0; return 0;
/* /*
...@@ -2174,16 +2181,19 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -2174,16 +2181,19 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
cmd == SIOCGMIIREG || cmd == SIOCGMIIREG ||
cmd == SIOCSMIIREG || cmd == SIOCSMIIREG ||
cmd == SIOCWANDEV) { cmd == SIOCWANDEV) {
err = -EOPNOTSUPP;
if (dev->do_ioctl) { if (dev->do_ioctl) {
if (!netif_device_present(dev)) if (netif_device_present(dev))
return -ENODEV; err = dev->do_ioctl(dev, ifr,
return dev->do_ioctl(dev, ifr, cmd); cmd);
else
err = -ENODEV;
} }
return -EOPNOTSUPP; } else
} err = -EINVAL;
} }
return -EINVAL; return err;
} }
/* /*
...@@ -2197,7 +2207,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -2197,7 +2207,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
* @arg: pointer to a struct ifreq in user space * @arg: pointer to a struct ifreq in user space
* *
* Issue ioctl functions to devices. This is normally called by the * Issue ioctl functions to devices. This is normally called by the
* user space syscall interfaces but can sometimes be useful for * user space syscall interfaces but can sometimes be useful for
* other purposes. The return value is the return from the syscall if * other purposes. The return value is the return from the syscall if
* positive or a negative errno code on error. * positive or a negative errno code on error.
*/ */
...@@ -2212,16 +2222,15 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2212,16 +2222,15 @@ int dev_ioctl(unsigned int cmd, void *arg)
and requires shared lock, because it sleeps writing and requires shared lock, because it sleeps writing
to user space. to user space.
*/ */
if (cmd == SIOCGIFCONF) { if (cmd == SIOCGIFCONF) {
rtnl_shlock(); rtnl_shlock();
ret = dev_ifconf((char *) arg); ret = dev_ifconf((char *) arg);
rtnl_shunlock(); rtnl_shunlock();
return ret; return ret;
} }
if (cmd == SIOCGIFNAME) { if (cmd == SIOCGIFNAME)
return dev_ifname((struct ifreq *)arg); return dev_ifname((struct ifreq *)arg);
}
if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT; return -EFAULT;
...@@ -2233,18 +2242,16 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2233,18 +2242,16 @@ int dev_ioctl(unsigned int cmd, void *arg)
*colon = 0; *colon = 0;
/* /*
* See which interface the caller is talking about. * See which interface the caller is talking about.
*/ */
switch(cmd) switch (cmd) {
{
/* /*
* These ioctl calls: * These ioctl calls:
* - can be done by all. * - can be done by all.
* - atomic and do not require locking. * - atomic and do not require locking.
* - return a value * - return a value
*/ */
case SIOCGIFFLAGS: case SIOCGIFFLAGS:
case SIOCGIFMETRIC: case SIOCGIFMETRIC:
case SIOCGIFMTU: case SIOCGIFMTU:
...@@ -2260,8 +2267,9 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2260,8 +2267,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
if (!ret) { if (!ret) {
if (colon) if (colon)
*colon = ':'; *colon = ':';
if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) if (copy_to_user(arg, &ifr,
return -EFAULT; sizeof(struct ifreq)))
ret = -EFAULT;
} }
return ret; return ret;
...@@ -2271,7 +2279,6 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2271,7 +2279,6 @@ int dev_ioctl(unsigned int cmd, void *arg)
* - require strict serialization. * - require strict serialization.
* - return a value * - return a value
*/ */
case SIOCETHTOOL: case SIOCETHTOOL:
case SIOCGMIIPHY: case SIOCGMIIPHY:
case SIOCGMIIREG: case SIOCGMIIREG:
...@@ -2286,8 +2293,9 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2286,8 +2293,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
if (!ret) { if (!ret) {
if (colon) if (colon)
*colon = ':'; *colon = ':';
if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) if (copy_to_user(arg, &ifr,
return -EFAULT; sizeof(struct ifreq)))
ret = -EFAULT;
} }
return ret; return ret;
...@@ -2297,7 +2305,6 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2297,7 +2305,6 @@ int dev_ioctl(unsigned int cmd, void *arg)
* - require strict serialization. * - require strict serialization.
* - do not return a value * - do not return a value
*/ */
case SIOCSIFFLAGS: case SIOCSIFFLAGS:
case SIOCSIFMETRIC: case SIOCSIFMETRIC:
case SIOCSIFMTU: case SIOCSIFMTU:
...@@ -2325,19 +2332,19 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2325,19 +2332,19 @@ int dev_ioctl(unsigned int cmd, void *arg)
rtnl_unlock(); rtnl_unlock();
dev_probe_unlock(); dev_probe_unlock();
return ret; return ret;
case SIOCGIFMEM: case SIOCGIFMEM:
/* Get the per device memory space. We can add this but currently /* Get the per device memory space. We can add this but
do not support it */ * currently do not support it */
case SIOCSIFMEM: case SIOCSIFMEM:
/* Set the per device memory buffer space. Not applicable in our case */ /* Set the per device memory buffer space.
* Not applicable in our case */
case SIOCSIFLINK: case SIOCSIFLINK:
return -EINVAL; return -EINVAL;
/* /*
* Unknown or private ioctl. * Unknown or private ioctl.
*/ */
default: default:
if (cmd == SIOCWANDEV || if (cmd == SIOCWANDEV ||
(cmd >= SIOCDEVPRIVATE && (cmd >= SIOCDEVPRIVATE &&
...@@ -2348,8 +2355,9 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2348,8 +2355,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
ret = dev_ifsioc(&ifr, cmd); ret = dev_ifsioc(&ifr, cmd);
rtnl_unlock(); rtnl_unlock();
dev_probe_unlock(); dev_probe_unlock();
if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq))) if (!ret && copy_to_user(arg, &ifr,
return -EFAULT; sizeof(struct ifreq)))
ret = -EFAULT;
return ret; return ret;
} }
#ifdef WIRELESS_EXT #ifdef WIRELESS_EXT
...@@ -2358,8 +2366,8 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2358,8 +2366,8 @@ int dev_ioctl(unsigned int cmd, void *arg)
/* If command is `set a parameter', or /* If command is `set a parameter', or
* `get the encoding parameters', check if * `get the encoding parameters', check if
* the user has the right to do it */ * the user has the right to do it */
if (IW_IS_SET(cmd) || (cmd == SIOCGIWENCODE)) { if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE) {
if(!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return -EPERM; return -EPERM;
} }
dev_load(ifr.ifr_name); dev_load(ifr.ifr_name);
...@@ -2368,8 +2376,9 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2368,8 +2376,9 @@ int dev_ioctl(unsigned int cmd, void *arg)
ret = wireless_process_ioctl(&ifr, cmd); ret = wireless_process_ioctl(&ifr, cmd);
rtnl_unlock(); rtnl_unlock();
if (!ret && IW_IS_GET(cmd) && if (!ret && IW_IS_GET(cmd) &&
copy_to_user(arg, &ifr, sizeof(struct ifreq))) copy_to_user(arg, &ifr,
return -EFAULT; sizeof(struct ifreq)))
ret = -EFAULT;
return ret; return ret;
} }
#endif /* WIRELESS_EXT */ #endif /* WIRELESS_EXT */
...@@ -2385,14 +2394,13 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -2385,14 +2394,13 @@ int dev_ioctl(unsigned int cmd, void *arg)
* number. The caller must hold the rtnl semaphore or the * number. The caller must hold the rtnl semaphore or the
* dev_base_lock to be sure it remains unique. * dev_base_lock to be sure it remains unique.
*/ */
int dev_new_index(void) int dev_new_index(void)
{ {
static int ifindex; static int ifindex;
for (;;) { for (;;) {
if (++ifindex <= 0) if (++ifindex <= 0)
ifindex=1; ifindex = 1;
if (__dev_get_by_index(ifindex) == NULL) if (!__dev_get_by_index(ifindex))
return ifindex; return ifindex;
} }
} }
...@@ -2402,7 +2410,7 @@ static int dev_boot_phase = 1; ...@@ -2402,7 +2410,7 @@ static int dev_boot_phase = 1;
/** /**
* register_netdevice - register a network device * register_netdevice - register a network device
* @dev: device to register * @dev: device to register
* *
* Take a completed network device structure and add it to the kernel * Take a completed network device structure and add it to the kernel
* interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
* chain. 0 is returned on success. A negative errno code is returned * chain. 0 is returned on success. A negative errno code is returned
...@@ -2422,15 +2430,13 @@ int net_dev_init(void); ...@@ -2422,15 +2430,13 @@ int net_dev_init(void);
int register_netdevice(struct net_device *dev) int register_netdevice(struct net_device *dev)
{ {
struct net_device *d, **dp; struct net_device *d, **dp;
#ifdef CONFIG_NET_DIVERT
int ret; int ret;
#endif
spin_lock_init(&dev->queue_lock); spin_lock_init(&dev->queue_lock);
spin_lock_init(&dev->xmit_lock); spin_lock_init(&dev->xmit_lock);
dev->xmit_lock_owner = -1; dev->xmit_lock_owner = -1;
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
dev->fastpath_lock=RW_LOCK_UNLOCKED; dev->fastpath_lock = RW_LOCK_UNLOCKED;
#endif #endif
if (dev_boot_phase) if (dev_boot_phase)
...@@ -2439,38 +2445,32 @@ int register_netdevice(struct net_device *dev) ...@@ -2439,38 +2445,32 @@ int register_netdevice(struct net_device *dev)
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
ret = alloc_divert_blk(dev); ret = alloc_divert_blk(dev);
if (ret) if (ret)
return ret; goto out;
#endif /* CONFIG_NET_DIVERT */ #endif /* CONFIG_NET_DIVERT */
dev->iflink = -1; dev->iflink = -1;
/* Init, if this function is available */ /* Init, if this function is available */
if (dev->init && dev->init(dev) != 0) { ret = -EIO;
#ifdef CONFIG_NET_DIVERT if (dev->init && dev->init(dev))
free_divert_blk(dev); goto out_err;
#endif
return -EIO;
}
dev->ifindex = dev_new_index(); dev->ifindex = dev_new_index();
if (dev->iflink == -1) if (dev->iflink == -1)
dev->iflink = dev->ifindex; dev->iflink = dev->ifindex;
/* Check for existence, and append to tail of chain */ /* Check for existence, and append to tail of chain */
for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) { ret = -EEXIST;
if (d == dev || strcmp(d->name, dev->name) == 0) { for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
#ifdef CONFIG_NET_DIVERT if (d == dev || !strcmp(d->name, dev->name))
free_divert_blk(dev); goto out_err;
#endif
return -EEXIST;
}
} }
/* /*
* nil rebuild_header routine, * nil rebuild_header routine,
* that should be never called and used as just bug trap. * that should be never called and used as just bug trap.
*/ */
if (dev->rebuild_header == NULL) if (!dev->rebuild_header)
dev->rebuild_header = default_rebuild_header; dev->rebuild_header = default_rebuild_header;
/* /*
...@@ -2492,8 +2492,15 @@ int register_netdevice(struct net_device *dev) ...@@ -2492,8 +2492,15 @@ int register_netdevice(struct net_device *dev)
notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
net_run_sbin_hotplug(dev, "register"); net_run_sbin_hotplug(dev, "register");
ret = 0;
return 0; out:
return ret;
out_err:
#ifdef CONFIG_NET_DIVERT
free_divert_blk(dev);
#endif
goto out;
} }
/** /**
...@@ -2503,15 +2510,15 @@ int register_netdevice(struct net_device *dev) ...@@ -2503,15 +2510,15 @@ int register_netdevice(struct net_device *dev)
* Destroy and free a dead device. A value of zero is returned on * Destroy and free a dead device. A value of zero is returned on
* success. * success.
*/ */
int netdev_finish_unregister(struct net_device *dev) int netdev_finish_unregister(struct net_device *dev)
{ {
BUG_TRAP(dev->ip_ptr==NULL); BUG_TRAP(!dev->ip_ptr);
BUG_TRAP(dev->ip6_ptr==NULL); BUG_TRAP(!dev->ip6_ptr);
BUG_TRAP(dev->dn_ptr==NULL); BUG_TRAP(!dev->dn_ptr);
if (!dev->deadbeaf) { if (!dev->deadbeaf) {
printk(KERN_ERR "Freeing alive device %p, %s\n", dev, dev->name); printk(KERN_ERR "Freeing alive device %p, %s\n",
dev, dev->name);
return 0; return 0;
} }
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
...@@ -2547,11 +2554,11 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2547,11 +2554,11 @@ int unregister_netdevice(struct net_device *dev)
if (dev->flags & IFF_UP) if (dev->flags & IFF_UP)
dev_close(dev); dev_close(dev);
BUG_TRAP(dev->deadbeaf==0); BUG_TRAP(!dev->deadbeaf);
dev->deadbeaf = 1; dev->deadbeaf = 1;
/* And unlink it from device chain. */ /* And unlink it from device chain. */
for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) { for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
if (d == dev) { if (d == dev) {
write_lock_bh(&dev_base_lock); write_lock_bh(&dev_base_lock);
*dp = d->next; *dp = d->next;
...@@ -2559,8 +2566,9 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2559,8 +2566,9 @@ int unregister_netdevice(struct net_device *dev)
break; break;
} }
} }
if (d == NULL) { if (!d) {
printk(KERN_DEBUG "unregister_netdevice: device %s/%p never was registered\n", dev->name, dev); printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
return -ENODEV; return -ENODEV;
} }
...@@ -2568,7 +2576,7 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2568,7 +2576,7 @@ int unregister_netdevice(struct net_device *dev)
br_write_lock_bh(BR_NETPROTO_LOCK); br_write_lock_bh(BR_NETPROTO_LOCK);
br_write_unlock_bh(BR_NETPROTO_LOCK); br_write_unlock_bh(BR_NETPROTO_LOCK);
if (dev_boot_phase == 0) { if (!dev_boot_phase) {
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
dev_clear_fastroute(dev); dev_clear_fastroute(dev);
#endif #endif
...@@ -2593,7 +2601,7 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2593,7 +2601,7 @@ int unregister_netdevice(struct net_device *dev)
dev->uninit(dev); dev->uninit(dev);
/* Notifier chain MUST detach us from master device. */ /* Notifier chain MUST detach us from master device. */
BUG_TRAP(dev->master==NULL); BUG_TRAP(!dev->master);
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
free_divert_blk(dev); free_divert_blk(dev);
...@@ -2602,20 +2610,20 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2602,20 +2610,20 @@ int unregister_netdevice(struct net_device *dev)
if (dev->features & NETIF_F_DYNALLOC) { if (dev->features & NETIF_F_DYNALLOC) {
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
if (atomic_read(&dev->refcnt) != 1) if (atomic_read(&dev->refcnt) != 1)
printk(KERN_DEBUG "unregister_netdevice: holding %s refcnt=%d\n", dev->name, atomic_read(&dev->refcnt)-1); printk(KERN_DEBUG "unregister_netdevice: holding %s "
"refcnt=%d\n",
dev->name, atomic_read(&dev->refcnt) - 1);
#endif #endif
dev_put(dev); goto out;
return 0;
} }
/* Last reference is our one */ /* Last reference is our one */
if (atomic_read(&dev->refcnt) == 1) { if (atomic_read(&dev->refcnt) == 1)
dev_put(dev); goto out;
return 0;
}
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
printk("unregister_netdevice: waiting %s refcnt=%d\n", dev->name, atomic_read(&dev->refcnt)); printk(KERN_DEBUG "unregister_netdevice: waiting %s refcnt=%d\n",
dev->name, atomic_read(&dev->refcnt));
#endif #endif
/* EXPLANATION. If dev->refcnt is not now 1 (our own reference) /* EXPLANATION. If dev->refcnt is not now 1 (our own reference)
...@@ -2623,14 +2631,15 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2623,14 +2631,15 @@ int unregister_netdevice(struct net_device *dev)
to this device and we cannot release it. to this device and we cannot release it.
"New style" devices have destructors, hence we can return from this "New style" devices have destructors, hence we can return from this
function and destructor will do all the work later. As of kernel 2.4.0 function and destructor will do all the work later. As of kernel
there are very few "New Style" devices. 2.4.0 there are very few "New Style" devices.
"Old style" devices expect that the device is free of any references "Old style" devices expect that the device is free of any references
upon exit from this function. upon exit from this function.
We cannot return from this function until all such references have We cannot return from this function until all such references have
fallen away. This is because the caller of this function will probably fallen away. This is because the caller of this function will
immediately kfree(*dev) and then be unloaded via sys_delete_module. probably immediately kfree(*dev) and then be unloaded via
sys_delete_module.
So, we linger until all references fall away. The duration of the So, we linger until all references fall away. The duration of the
linger is basically unbounded! It is driven by, for example, the linger is basically unbounded! It is driven by, for example, the
...@@ -2643,20 +2652,22 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2643,20 +2652,22 @@ int unregister_netdevice(struct net_device *dev)
now = warning_time = jiffies; now = warning_time = jiffies;
while (atomic_read(&dev->refcnt) != 1) { while (atomic_read(&dev->refcnt) != 1) {
if ((jiffies - now) > 1*HZ) { if ((jiffies - now) > 1 * HZ) {
/* Rebroadcast unregister notification */ /* Rebroadcast unregister notification */
notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); notifier_call_chain(&netdev_chain,
NETDEV_UNREGISTER, dev);
} }
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule_timeout(HZ/4); schedule_timeout(HZ / 4);
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
if ((jiffies - warning_time) > 10*HZ) { if ((jiffies - warning_time) > 10 * HZ) {
printk(KERN_EMERG "unregister_netdevice: waiting for %s to " printk(KERN_EMERG "unregister_netdevice: waiting for "
"become free. Usage count = %d\n", "%s to become free. Usage count = %d\n",
dev->name, atomic_read(&dev->refcnt)); dev->name, atomic_read(&dev->refcnt));
warning_time = jiffies; warning_time = jiffies;
} }
} }
out:
dev_put(dev); dev_put(dev);
return 0; return 0;
} }
...@@ -2664,7 +2675,7 @@ int unregister_netdevice(struct net_device *dev) ...@@ -2664,7 +2675,7 @@ int unregister_netdevice(struct net_device *dev)
/* /*
* Initialize the DEV module. At boot time this walks the device list and * Initialize the DEV module. At boot time this walks the device list and
* unhooks any devices that fail to initialise (normally hardware not * unhooks any devices that fail to initialise (normally hardware not
* present) and leaves us with a valid list of present and active devices. * present) and leaves us with a valid list of present and active devices.
* *
*/ */
...@@ -2692,7 +2703,7 @@ int __init net_dev_init(void) ...@@ -2692,7 +2703,7 @@ int __init net_dev_init(void)
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
dv_init(); dv_init();
#endif /* CONFIG_NET_DIVERT */ #endif /* CONFIG_NET_DIVERT */
/* /*
* Initialise the packet receive queues. * Initialise the packet receive queues.
*/ */
...@@ -2752,7 +2763,7 @@ int __init net_dev_init(void) ...@@ -2752,7 +2763,7 @@ int __init net_dev_init(void)
if (strchr(dev->name, '%')) if (strchr(dev->name, '%'))
dev_alloc_name(dev, dev->name); dev_alloc_name(dev, dev->name);
/* /*
* Check boot time settings for the device. * Check boot time settings for the device.
*/ */
netdev_boot_setup_check(dev); netdev_boot_setup_check(dev);
...@@ -2770,7 +2781,7 @@ int __init net_dev_init(void) ...@@ -2770,7 +2781,7 @@ int __init net_dev_init(void)
dev->ifindex = dev_new_index(); dev->ifindex = dev_new_index();
if (dev->iflink == -1) if (dev->iflink == -1)
dev->iflink = dev->ifindex; dev->iflink = dev->ifindex;
if (dev->rebuild_header == NULL) if (!dev->rebuild_header)
dev->rebuild_header = default_rebuild_header; dev->rebuild_header = default_rebuild_header;
dev_init_scheduler(dev); dev_init_scheduler(dev);
set_bit(__LINK_STATE_PRESENT, &dev->state); set_bit(__LINK_STATE_PRESENT, &dev->state);
...@@ -2815,7 +2826,7 @@ int __init net_dev_init(void) ...@@ -2815,7 +2826,7 @@ int __init net_dev_init(void)
/* /*
* Initialise network devices * Initialise network devices
*/ */
net_device_init(); net_device_init();
return 0; return 0;
...@@ -2848,7 +2859,7 @@ static int net_run_sbin_hotplug(struct net_device *dev, char *action) ...@@ -2848,7 +2859,7 @@ static int net_run_sbin_hotplug(struct net_device *dev, char *action)
envp [i++] = ifname; envp [i++] = ifname;
envp [i++] = action_str; envp [i++] = action_str;
envp [i] = 0; envp [i] = 0;
return call_usermodehelper(argv [0], argv, envp); return call_usermodehelper(argv [0], argv, envp);
} }
#endif #endif
...@@ -21,30 +21,34 @@ ...@@ -21,30 +21,34 @@
* so sockets that fail to connect * so sockets that fail to connect
* don't return -EINPROGRESS. * don't return -EINPROGRESS.
* Alan Cox : Asynchronous I/O support * Alan Cox : Asynchronous I/O support
* Alan Cox : Keep correct socket pointer on sock structures * Alan Cox : Keep correct socket pointer on sock
* structures
* when accept() ed * when accept() ed
* Alan Cox : Semantics of SO_LINGER aren't state moved * Alan Cox : Semantics of SO_LINGER aren't state
* to close when you look carefully. With * moved to close when you look carefully.
* this fixed and the accept bug fixed * With this fixed and the accept bug fixed
* some RPC stuff seems happier. * some RPC stuff seems happier.
* Niibe Yutaka : 4.4BSD style write async I/O * Niibe Yutaka : 4.4BSD style write async I/O
* Alan Cox, * Alan Cox,
* Tony Gale : Fixed reuse semantics. * Tony Gale : Fixed reuse semantics.
* Alan Cox : bind() shouldn't abort existing but dead * Alan Cox : bind() shouldn't abort existing but dead
* sockets. Stops FTP netin:.. I hope. * sockets. Stops FTP netin:.. I hope.
* Alan Cox : bind() works correctly for RAW sockets. Note * Alan Cox : bind() works correctly for RAW sockets.
* that FreeBSD at least was broken in this respect * Note that FreeBSD at least was broken
* so be careful with compatibility tests... * in this respect so be careful with
* compatibility tests...
* Alan Cox : routing cache support * Alan Cox : routing cache support
* Alan Cox : memzero the socket structure for compactness. * Alan Cox : memzero the socket structure for
* compactness.
* Matt Day : nonblock connect error handler * Matt Day : nonblock connect error handler
* Alan Cox : Allow large numbers of pending sockets * Alan Cox : Allow large numbers of pending sockets
* (eg for big web sites), but only if * (eg for big web sites), but only if
* specifically application requested. * specifically application requested.
* Alan Cox : New buffering throughout IP. Used dumbly. * Alan Cox : New buffering throughout IP. Used
* dumbly.
* Alan Cox : New buffering now used smartly. * Alan Cox : New buffering now used smartly.
* Alan Cox : BSD rather than common sense interpretation of * Alan Cox : BSD rather than common sense
* listen. * interpretation of listen.
* Germano Caronni : Assorted small races. * Germano Caronni : Assorted small races.
* Alan Cox : sendmsg/recvmsg basic support. * Alan Cox : sendmsg/recvmsg basic support.
* Alan Cox : Only sendmsg/recvmsg now supported. * Alan Cox : Only sendmsg/recvmsg now supported.
...@@ -117,7 +121,7 @@ ...@@ -117,7 +121,7 @@
#include <linux/wireless.h> /* Note : will define WIRELESS_EXT */ #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
#endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */ #endif /* CONFIG_NET_RADIO || CONFIG_NET_PCMCIA_RADIO */
struct linux_mib net_statistics[NR_CPUS*2]; struct linux_mib net_statistics[NR_CPUS * 2];
#ifdef INET_REFCNT_DEBUG #ifdef INET_REFCNT_DEBUG
atomic_t inet_sock_nr; atomic_t inet_sock_nr;
...@@ -132,7 +136,7 @@ extern int udp_get_info(char *, char **, off_t, int); ...@@ -132,7 +136,7 @@ extern int udp_get_info(char *, char **, off_t, int);
extern void ip_mc_drop_socket(struct sock *sk); extern void ip_mc_drop_socket(struct sock *sk);
#ifdef CONFIG_DLCI #ifdef CONFIG_DLCI
extern int dlci_ioctl(unsigned int, void*); extern int dlci_ioctl(unsigned int, void *);
#endif #endif
#ifdef CONFIG_DLCI_MODULE #ifdef CONFIG_DLCI_MODULE
...@@ -177,17 +181,18 @@ void inet_sock_destruct(struct sock *sk) ...@@ -177,17 +181,18 @@ void inet_sock_destruct(struct sock *sk)
return; return;
} }
BUG_TRAP(atomic_read(&sk->rmem_alloc) == 0); BUG_TRAP(!atomic_read(&sk->rmem_alloc));
BUG_TRAP(atomic_read(&sk->wmem_alloc) == 0); BUG_TRAP(!atomic_read(&sk->wmem_alloc));
BUG_TRAP(sk->wmem_queued == 0); BUG_TRAP(!sk->wmem_queued);
BUG_TRAP(sk->forward_alloc == 0); BUG_TRAP(!sk->forward_alloc);
if (inet->opt) if (inet->opt)
kfree(inet->opt); kfree(inet->opt);
dst_release(sk->dst_cache); dst_release(sk->dst_cache);
#ifdef INET_REFCNT_DEBUG #ifdef INET_REFCNT_DEBUG
atomic_dec(&inet_sock_nr); atomic_dec(&inet_sock_nr);
printk(KERN_DEBUG "INET socket %p released, %d are still alive\n", sk, atomic_read(&inet_sock_nr)); printk(KERN_DEBUG "INET socket %p released, %d are still alive\n",
sk, atomic_read(&inet_sock_nr));
#endif #endif
} }
...@@ -221,9 +226,9 @@ void inet_sock_release(struct sock *sk) ...@@ -221,9 +226,9 @@ void inet_sock_release(struct sock *sk)
sock_orphan(sk); sock_orphan(sk);
#ifdef INET_REFCNT_DEBUG #ifdef INET_REFCNT_DEBUG
if (atomic_read(&sk->refcnt) != 1) { if (atomic_read(&sk->refcnt) != 1)
printk(KERN_DEBUG "Destruction inet %p delayed, c=%d\n", sk, atomic_read(&sk->refcnt)); printk(KERN_DEBUG "Destruction inet %p delayed, c=%d\n",
} sk, atomic_read(&sk->refcnt));
#endif #endif
sock_put(sk); sock_put(sk);
} }
...@@ -234,18 +239,16 @@ void inet_sock_release(struct sock *sk) ...@@ -234,18 +239,16 @@ void inet_sock_release(struct sock *sk)
* socket object. Mostly it punts to the subprotocols of IP to do * socket object. Mostly it punts to the subprotocols of IP to do
* the work. * the work.
*/ */
/* /*
* Set socket options on an inet socket. * Set socket options on an inet socket.
*/ */
int inet_setsockopt(struct socket *sock, int level, int optname, int inet_setsockopt(struct socket *sock, int level, int optname,
char *optval, int optlen) char *optval, int optlen)
{ {
struct sock *sk=sock->sk; struct sock *sk = sock->sk;
return sk->prot->setsockopt(sk,level,optname,optval,optlen); return sk->prot->setsockopt(sk, level, optname, optval, optlen);
} }
/* /*
...@@ -259,9 +262,9 @@ int inet_setsockopt(struct socket *sock, int level, int optname, ...@@ -259,9 +262,9 @@ int inet_setsockopt(struct socket *sock, int level, int optname,
int inet_getsockopt(struct socket *sock, int level, int optname, int inet_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen) char *optval, int *optlen)
{ {
struct sock *sk=sock->sk; struct sock *sk = sock->sk;
return sk->prot->getsockopt(sk,level,optname,optval,optlen); return sk->prot->getsockopt(sk, level, optname, optval, optlen);
} }
/* /*
...@@ -270,11 +273,12 @@ int inet_getsockopt(struct socket *sock, int level, int optname, ...@@ -270,11 +273,12 @@ int inet_getsockopt(struct socket *sock, int level, int optname,
static int inet_autobind(struct sock *sk) static int inet_autobind(struct sock *sk)
{ {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet;
/* We may need to bind the socket. */ /* We may need to bind the socket. */
lock_sock(sk); lock_sock(sk);
inet = inet_sk(sk);
if (!inet->num) { if (!inet->num) {
if (sk->prot->get_port(sk, 0) != 0) { if (sk->prot->get_port(sk, 0)) {
release_sock(sk); release_sock(sk);
return -EAGAIN; return -EAGAIN;
} }
...@@ -287,7 +291,6 @@ static int inet_autobind(struct sock *sk) ...@@ -287,7 +291,6 @@ static int inet_autobind(struct sock *sk)
/* /*
* Move a socket into listening state. * Move a socket into listening state.
*/ */
int inet_listen(struct socket *sock, int backlog) int inet_listen(struct socket *sock, int backlog)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
...@@ -301,7 +304,7 @@ int inet_listen(struct socket *sock, int backlog) ...@@ -301,7 +304,7 @@ int inet_listen(struct socket *sock, int backlog)
goto out; goto out;
old_state = sk->state; old_state = sk->state;
if (!((1<<old_state)&(TCPF_CLOSE|TCPF_LISTEN))) if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out; goto out;
/* Really, if the socket is already in listen state /* Really, if the socket is already in listen state
...@@ -349,16 +352,17 @@ static __inline__ int inet_sk_size(int protocol) ...@@ -349,16 +352,17 @@ static __inline__ int inet_sk_size(int protocol)
static int inet_create(struct socket *sock, int protocol) static int inet_create(struct socket *sock, int protocol)
{ {
struct sock *sk; struct sock *sk;
struct list_head *p; struct list_head *p;
struct inet_protosw *answer; struct inet_protosw *answer;
struct inet_opt *inet; struct inet_opt *inet;
int err = -ENOBUFS;
sock->state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED;
sk = sk_alloc(PF_INET, GFP_KERNEL, inet_sk_size(protocol), sk = sk_alloc(PF_INET, GFP_KERNEL, inet_sk_size(protocol),
inet_sk_slab(protocol)); inet_sk_slab(protocol));
if (sk == NULL) if (!sk)
goto do_oom; goto out;
/* Look for the requested type/protocol pair. */ /* Look for the requested type/protocol pair. */
answer = NULL; answer = NULL;
br_read_lock_bh(BR_NETPROTO_LOCK); br_read_lock_bh(BR_NETPROTO_LOCK);
...@@ -382,13 +386,16 @@ static int inet_create(struct socket *sock, int protocol) ...@@ -382,13 +386,16 @@ static int inet_create(struct socket *sock, int protocol)
} }
br_read_unlock_bh(BR_NETPROTO_LOCK); br_read_unlock_bh(BR_NETPROTO_LOCK);
err = -ESOCKTNOSUPPORT;
if (!answer) if (!answer)
goto free_and_badtype; goto out_sk_free;
err = -EPERM;
if (answer->capability > 0 && !capable(answer->capability)) if (answer->capability > 0 && !capable(answer->capability))
goto free_and_badperm; goto out_sk_free;
err = -EPROTONOSUPPORT;
if (!protocol) if (!protocol)
goto free_and_noproto; goto out_sk_free;
err = 0;
sock->ops = answer->ops; sock->ops = answer->ops;
sk->prot = answer->prot; sk->prot = answer->prot;
sk->no_check = answer->no_check; sk->no_check = answer->no_check;
...@@ -410,18 +417,15 @@ static int inet_create(struct socket *sock, int protocol) ...@@ -410,18 +417,15 @@ static int inet_create(struct socket *sock, int protocol)
inet->id = 0; inet->id = 0;
sock_init_data(sock,sk); sock_init_data(sock, sk);
sk->destruct = inet_sock_destruct; sk->destruct = inet_sock_destruct;
sk->zapped = 0; sk->zapped = 0;
sk->family = PF_INET; sk->family = PF_INET;
sk->protocol = protocol; sk->protocol = protocol;
sk->backlog_rcv = sk->prot->backlog_rcv; sk->backlog_rcv = sk->prot->backlog_rcv;
inet->ttl = sysctl_ip_default_ttl; inet->ttl = sysctl_ip_default_ttl;
inet->mc_loop = 1; inet->mc_loop = 1;
inet->mc_ttl = 1; inet->mc_ttl = 1;
inet->mc_index = 0; inet->mc_index = 0;
...@@ -438,34 +442,20 @@ static int inet_create(struct socket *sock, int protocol) ...@@ -438,34 +442,20 @@ static int inet_create(struct socket *sock, int protocol)
* shares. * shares.
*/ */
inet->sport = htons(inet->num); inet->sport = htons(inet->num);
/* Add to protocol hash chains. */ /* Add to protocol hash chains. */
sk->prot->hash(sk); sk->prot->hash(sk);
} }
if (sk->prot->init) { if (sk->prot->init) {
int err = sk->prot->init(sk); err = sk->prot->init(sk);
if (err != 0) { if (err)
inet_sock_release(sk); inet_sock_release(sk);
return err;
}
} }
return 0; out:
return err;
free_and_badtype: out_sk_free:
sk_free(sk);
return -ESOCKTNOSUPPORT;
free_and_badperm:
sk_free(sk);
return -EPERM;
free_and_noproto:
sk_free(sk); sk_free(sk);
return -EPROTONOSUPPORT; goto out;
do_oom:
return -ENOBUFS;
} }
...@@ -474,7 +464,6 @@ static int inet_create(struct socket *sock, int protocol) ...@@ -474,7 +464,6 @@ static int inet_create(struct socket *sock, int protocol)
* function we are destroying the object and from then on nobody * function we are destroying the object and from then on nobody
* should refer to it. * should refer to it.
*/ */
int inet_release(struct socket *sock) int inet_release(struct socket *sock)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
...@@ -498,7 +487,7 @@ int inet_release(struct socket *sock) ...@@ -498,7 +487,7 @@ int inet_release(struct socket *sock)
sock->sk = NULL; sock->sk = NULL;
sk->prot->close(sk, timeout); sk->prot->close(sk, timeout);
} }
return(0); return 0;
} }
/* It is off by default, see below. */ /* It is off by default, see below. */
...@@ -506,19 +495,21 @@ int sysctl_ip_nonlocal_bind; ...@@ -506,19 +495,21 @@ int sysctl_ip_nonlocal_bind;
static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{ {
struct sockaddr_in *addr=(struct sockaddr_in *)uaddr; struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct sock *sk=sock->sk; struct sock *sk = sock->sk;
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
unsigned short snum; unsigned short snum;
int chk_addr_ret; int chk_addr_ret;
int err; int err;
/* If the socket has its own bind function then use it. (RAW) */ /* If the socket has its own bind function then use it. (RAW) */
if(sk->prot->bind) if (sk->prot->bind) {
return sk->prot->bind(sk, uaddr, addr_len); err = sk->prot->bind(sk, uaddr, addr_len);
goto out;
}
err = -EINVAL;
if (addr_len < sizeof(struct sockaddr_in)) if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL; goto out;
chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr); chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
...@@ -529,17 +520,19 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ...@@ -529,17 +520,19 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
* (ie. your servers still start up even if your ISDN link * (ie. your servers still start up even if your ISDN link
* is temporarily down) * is temporarily down)
*/ */
if (sysctl_ip_nonlocal_bind == 0 && err = -EADDRNOTAVAIL;
inet->freebind == 0 && if (!sysctl_ip_nonlocal_bind &&
!inet->freebind &&
addr->sin_addr.s_addr != INADDR_ANY && addr->sin_addr.s_addr != INADDR_ANY &&
chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_MULTICAST &&
chk_addr_ret != RTN_BROADCAST) chk_addr_ret != RTN_BROADCAST)
return -EADDRNOTAVAIL; goto out;
snum = ntohs(addr->sin_port); snum = ntohs(addr->sin_port);
err = -EACCES;
if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES; goto out;
/* We keep a pair of addresses. rcv_saddr is the one /* We keep a pair of addresses. rcv_saddr is the one
* used by hash lookups, and saddr is used for transmit. * used by hash lookups, and saddr is used for transmit.
...@@ -553,17 +546,17 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ...@@ -553,17 +546,17 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Check these errors (active socket, double bind). */ /* Check these errors (active socket, double bind). */
err = -EINVAL; err = -EINVAL;
if (sk->state != TCP_CLOSE || inet->num) if (sk->state != TCP_CLOSE || inet->num)
goto out; goto out_release_sock;
inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr; inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->saddr = 0; /* Use device */ inet->saddr = 0; /* Use device */
/* Make sure we are allowed to bind here. */ /* Make sure we are allowed to bind here. */
if (sk->prot->get_port(sk, snum) != 0) { if (sk->prot->get_port(sk, snum)) {
inet->saddr = inet->rcv_saddr = 0; inet->saddr = inet->rcv_saddr = 0;
err = -EADDRINUSE; err = -EADDRINUSE;
goto out; goto out_release_sock;
} }
if (inet->rcv_saddr) if (inet->rcv_saddr)
...@@ -575,15 +568,16 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ...@@ -575,15 +568,16 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
inet->dport = 0; inet->dport = 0;
sk_dst_reset(sk); sk_dst_reset(sk);
err = 0; err = 0;
out: out_release_sock:
release_sock(sk); release_sock(sk);
out:
return err; return err;
} }
int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags) int addr_len, int flags)
{ {
struct sock *sk=sock->sk; struct sock *sk = sock->sk;
if (uaddr->sa_family == AF_UNSPEC) if (uaddr->sa_family == AF_UNSPEC)
return sk->prot->disconnect(sk, flags); return sk->prot->disconnect(sk, flags);
...@@ -605,7 +599,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) ...@@ -605,7 +599,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
* Connect() does not allow to get error notifications * Connect() does not allow to get error notifications
* without closing the socket. * without closing the socket.
*/ */
while ((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV)) { while ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
release_sock(sk); release_sock(sk);
timeo = schedule_timeout(timeo); timeo = schedule_timeout(timeo);
lock_sock(sk); lock_sock(sk);
...@@ -622,11 +616,10 @@ static long inet_wait_for_connect(struct sock *sk, long timeo) ...@@ -622,11 +616,10 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
* Connect to a remote host. There is regrettably still a little * Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here. * TCP 'magic' in here.
*/ */
int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags) int addr_len, int flags)
{ {
struct sock *sk=sock->sk; struct sock *sk = sock->sk;
int err; int err;
long timeo; long timeo;
...@@ -651,7 +644,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr, ...@@ -651,7 +644,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
break; break;
case SS_UNCONNECTED: case SS_UNCONNECTED:
err = -EISCONN; err = -EISCONN;
if (sk->state != TCP_CLOSE) if (sk->state != TCP_CLOSE)
goto out; goto out;
err = sk->prot->connect(sk, uaddr, addr_len); err = sk->prot->connect(sk, uaddr, addr_len);
...@@ -668,9 +661,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr, ...@@ -668,9 +661,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
break; break;
} }
timeo = sock_sndtimeo(sk, flags&O_NONBLOCK); timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if ((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV)) { if ((1 << sk->state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
/* Error code is set above */ /* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo)) if (!timeo || !inet_wait_for_connect(sk, timeo))
goto out; goto out;
...@@ -712,22 +705,22 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr, ...@@ -712,22 +705,22 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
int inet_accept(struct socket *sock, struct socket *newsock, int flags) int inet_accept(struct socket *sock, struct socket *newsock, int flags)
{ {
struct sock *sk1 = sock->sk; struct sock *sk1 = sock->sk;
struct sock *sk2;
int err = -EINVAL; int err = -EINVAL;
struct sock *sk2 = sk1->prot->accept(sk1, flags, &err);
if((sk2 = sk1->prot->accept(sk1,flags,&err)) == NULL) if (!sk2)
goto do_err; goto do_err;
lock_sock(sk2); lock_sock(sk2);
BUG_TRAP((1<<sk2->state)&(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_CLOSE)); BUG_TRAP((1 << sk2->state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE));
sock_graft(sk2, newsock); sock_graft(sk2, newsock);
newsock->state = SS_CONNECTED; newsock->state = SS_CONNECTED;
err = 0;
release_sock(sk2); release_sock(sk2);
return 0;
do_err: do_err:
return err; return err;
} }
...@@ -736,19 +729,18 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags) ...@@ -736,19 +729,18 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
/* /*
* This does both peername and sockname. * This does both peername and sockname.
*/ */
static int inet_getname(struct socket *sock, struct sockaddr *uaddr, static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer) int *uaddr_len, int peer)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
sin->sin_family = AF_INET; sin->sin_family = AF_INET;
if (peer) { if (peer) {
if (!inet->dport) if (!inet->dport ||
return -ENOTCONN; (((1 << sk->state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
if (((1<<sk->state)&(TCPF_CLOSE|TCPF_SYN_SENT)) && peer == 1) peer == 1))
return -ENOTCONN; return -ENOTCONN;
sin->sin_port = inet->dport; sin->sin_port = inet->dport;
sin->sin_addr.s_addr = inet->daddr; sin->sin_addr.s_addr = inet->daddr;
...@@ -760,7 +752,7 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr, ...@@ -760,7 +752,7 @@ static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
sin->sin_addr.s_addr = addr; sin->sin_addr.s_addr = addr;
} }
*uaddr_len = sizeof(*sin); *uaddr_len = sizeof(*sin);
return(0); return 0;
} }
...@@ -770,10 +762,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, int size, ...@@ -770,10 +762,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, int size,
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
int addr_len = 0; int addr_len = 0;
int err; int err = sk->prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
flags & ~MSG_DONTWAIT, &addr_len);
err = sk->prot->recvmsg(sk, msg, size, flags&MSG_DONTWAIT,
flags&~MSG_DONTWAIT, &addr_len);
if (err >= 0) if (err >= 0)
msg->msg_namelen = addr_len; msg->msg_namelen = addr_len;
return err; return err;
...@@ -803,12 +793,13 @@ int inet_shutdown(struct socket *sock, int how) ...@@ -803,12 +793,13 @@ int inet_shutdown(struct socket *sock, int how)
how++; /* maps 0->1 has the advantage of making bit 1 rcvs and how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
1->2 bit 2 snds. 1->2 bit 2 snds.
2->3 */ 2->3 */
if ((how & ~SHUTDOWN_MASK) || how==0) /* MAXINT->0 */ if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
return -EINVAL; return -EINVAL;
lock_sock(sk); lock_sock(sk);
if (sock->state == SS_CONNECTING) { if (sock->state == SS_CONNECTING) {
if ((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV|TCPF_CLOSE)) if ((1 << sk->state) &
(TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
sock->state = SS_DISCONNECTING; sock->state = SS_DISCONNECTING;
else else
sock->state = SS_CONNECTED; sock->state = SS_CONNECTED;
...@@ -858,38 +849,42 @@ int inet_shutdown(struct socket *sock, int how) ...@@ -858,38 +849,42 @@ int inet_shutdown(struct socket *sock, int how)
static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
int err; int err = 0;
int pid; int pid;
switch(cmd) { switch (cmd) {
case FIOSETOWN: case FIOSETOWN:
case SIOCSPGRP: case SIOCSPGRP:
err = get_user(pid, (int *) arg); if (get_user(pid, (int *)arg))
if (err) err = -EFAULT;
return err; else if (current->pid != pid &&
if (current->pid != pid && current->pgrp != -pid && current->pgrp != -pid &&
!capable(CAP_NET_ADMIN)) !capable(CAP_NET_ADMIN))
return -EPERM; err = -EPERM;
sk->proc = pid; else
return(0); sk->proc = pid;
break;
case FIOGETOWN: case FIOGETOWN:
case SIOCGPGRP: case SIOCGPGRP:
return put_user(sk->proc, (int *)arg); err = put_user(sk->proc, (int *)arg);
break;
case SIOCGSTAMP: case SIOCGSTAMP:
if(sk->stamp.tv_sec==0) if (!sk->stamp.tv_sec)
return -ENOENT; err = -ENOENT;
err = copy_to_user((void *)arg,&sk->stamp,sizeof(struct timeval)); else if (copy_to_user((void *)arg, &sk->stamp,
if (err) sizeof(struct timeval)))
err = -EFAULT; err = -EFAULT;
return err; break;
case SIOCADDRT: case SIOCADDRT:
case SIOCDELRT: case SIOCDELRT:
case SIOCRTMSG: case SIOCRTMSG:
return(ip_rt_ioctl(cmd,(void *) arg)); err = ip_rt_ioctl(cmd, (void *)arg);
break;
case SIOCDARP: case SIOCDARP:
case SIOCGARP: case SIOCGARP:
case SIOCSARP: case SIOCSARP:
return(arp_ioctl(cmd,(void *) arg)); err = arp_ioctl(cmd, (void *)arg);
break;
case SIOCGIFADDR: case SIOCGIFADDR:
case SIOCSIFADDR: case SIOCSIFADDR:
case SIOCGIFBRDADDR: case SIOCGIFBRDADDR:
...@@ -898,83 +893,82 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) ...@@ -898,83 +893,82 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFNETMASK: case SIOCSIFNETMASK:
case SIOCGIFDSTADDR: case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR: case SIOCSIFDSTADDR:
case SIOCSIFPFLAGS: case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS: case SIOCGIFPFLAGS:
case SIOCSIFFLAGS: case SIOCSIFFLAGS:
return(devinet_ioctl(cmd,(void *) arg)); err = devinet_ioctl(cmd, (void *)arg);
break;
case SIOCGIFBR: case SIOCGIFBR:
case SIOCSIFBR: case SIOCSIFBR:
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#ifdef CONFIG_KMOD #ifdef CONFIG_KMOD
if (br_ioctl_hook == NULL) if (!br_ioctl_hook)
request_module("bridge"); request_module("bridge");
#endif #endif
if (br_ioctl_hook != NULL) if (br_ioctl_hook)
return br_ioctl_hook(arg); err = br_ioctl_hook(arg);
else
#endif #endif
return -ENOPKG; err = -ENOPKG;
break;
case SIOCGIFVLAN: case SIOCGIFVLAN:
case SIOCSIFVLAN: case SIOCSIFVLAN:
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#ifdef CONFIG_KMOD #ifdef CONFIG_KMOD
if (vlan_ioctl_hook == NULL) if (!vlan_ioctl_hook)
request_module("8021q"); request_module("8021q");
#endif #endif
if (vlan_ioctl_hook != NULL) if (vlan_ioctl_hook)
return vlan_ioctl_hook(arg); err = vlan_ioctl_hook(arg);
else
#endif #endif
return -ENOPKG; err = -ENOPKG;
break;
case SIOCGIFDIVERT: case SIOCGIFDIVERT:
case SIOCSIFDIVERT: case SIOCSIFDIVERT:
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
return divert_ioctl(cmd, (struct divert_cf *) arg); err = divert_ioctl(cmd, (struct divert_cf *)arg);
#else #else
return -ENOPKG; err = -ENOPKG;
#endif /* CONFIG_NET_DIVERT */ #endif /* CONFIG_NET_DIVERT */
break;
case SIOCADDDLCI: case SIOCADDDLCI:
case SIOCDELDLCI: case SIOCDELDLCI:
#ifdef CONFIG_DLCI #ifdef CONFIG_DLCI
lock_kernel(); lock_kernel();
err = dlci_ioctl(cmd, (void *) arg); err = dlci_ioctl(cmd, (void *)arg);
unlock_kernel(); unlock_kernel();
return err; break;
#endif #elif CONFIG_DLCI_MODULE
#ifdef CONFIG_DLCI_MODULE
#ifdef CONFIG_KMOD #ifdef CONFIG_KMOD
if (dlci_ioctl_hook == NULL) if (!dlci_ioctl_hook)
request_module("dlci"); request_module("dlci");
#endif #endif
if (dlci_ioctl_hook) { if (dlci_ioctl_hook) {
lock_kernel(); lock_kernel();
err = (*dlci_ioctl_hook)(cmd, (void *) arg); err = (*dlci_ioctl_hook)(cmd, (void *)arg);
unlock_kernel(); unlock_kernel();
return err; } else
}
#endif #endif
return -ENOPKG; err = -ENOPKG;
break;
default: default:
if ((cmd >= SIOCDEVPRIVATE) && if (cmd >= SIOCDEVPRIVATE &&
(cmd <= (SIOCDEVPRIVATE + 15))) cmd <= (SIOCDEVPRIVATE + 15))
return(dev_ioctl(cmd,(void *) arg)); err = dev_ioctl(cmd, (void *)arg);
else
#ifdef WIRELESS_EXT #ifdef WIRELESS_EXT
if((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
return(dev_ioctl(cmd,(void *) arg)); err = dev_ioctl(cmd, (void *)arg);
else
#endif /* WIRELESS_EXT */ #endif /* WIRELESS_EXT */
if (!sk->prot->ioctl ||
if (sk->prot->ioctl==NULL || (err=sk->prot->ioctl(sk, cmd, arg))==-ENOIOCTLCMD) (err = sk->prot->ioctl(sk, cmd, arg)) ==
return(dev_ioctl(cmd,(void *) arg)); -ENOIOCTLCMD)
return err; err = dev_ioctl(cmd, (void *)arg);
break;
} }
/*NOTREACHED*/ return err;
return(0);
} }
struct proto_ops inet_stream_ops = { struct proto_ops inet_stream_ops = {
...@@ -985,7 +979,7 @@ struct proto_ops inet_stream_ops = { ...@@ -985,7 +979,7 @@ struct proto_ops inet_stream_ops = {
connect: inet_stream_connect, connect: inet_stream_connect,
socketpair: sock_no_socketpair, socketpair: sock_no_socketpair,
accept: inet_accept, accept: inet_accept,
getname: inet_getname, getname: inet_getname,
poll: tcp_poll, poll: tcp_poll,
ioctl: inet_ioctl, ioctl: inet_ioctl,
listen: inet_listen, listen: inet_listen,
...@@ -1006,7 +1000,7 @@ struct proto_ops inet_dgram_ops = { ...@@ -1006,7 +1000,7 @@ struct proto_ops inet_dgram_ops = {
connect: inet_dgram_connect, connect: inet_dgram_connect,
socketpair: sock_no_socketpair, socketpair: sock_no_socketpair,
accept: sock_no_accept, accept: sock_no_accept,
getname: inet_getname, getname: inet_getname,
poll: datagram_poll, poll: datagram_poll,
ioctl: inet_ioctl, ioctl: inet_ioctl,
listen: sock_no_listen, listen: sock_no_listen,
...@@ -1067,8 +1061,7 @@ static struct inet_protosw inetsw_array[] = ...@@ -1067,8 +1061,7 @@ static struct inet_protosw inetsw_array[] =
#define INETSW_ARRAY_LEN (sizeof(inetsw_array) / sizeof(struct inet_protosw)) #define INETSW_ARRAY_LEN (sizeof(inetsw_array) / sizeof(struct inet_protosw))
void void inet_register_protosw(struct inet_protosw *p)
inet_register_protosw(struct inet_protosw *p)
{ {
struct list_head *lh; struct list_head *lh;
struct inet_protosw *answer; struct inet_protosw *answer;
...@@ -1115,8 +1108,7 @@ inet_register_protosw(struct inet_protosw *p) ...@@ -1115,8 +1108,7 @@ inet_register_protosw(struct inet_protosw *p)
goto out; goto out;
} }
void void inet_unregister_protosw(struct inet_protosw *p)
inet_unregister_protosw(struct inet_protosw *p)
{ {
if (INET_PROTOSW_PERMANENT & p->flags) { if (INET_PROTOSW_PERMANENT & p->flags) {
printk(KERN_ERR printk(KERN_ERR
...@@ -1133,7 +1125,7 @@ inet_unregister_protosw(struct inet_protosw *p) ...@@ -1133,7 +1125,7 @@ inet_unregister_protosw(struct inet_protosw *p)
/* /*
* Called by socket.c on kernel startup. * Called by socket.c on kernel startup.
*/ */
static int __init inet_init(void) static int __init inet_init(void)
{ {
struct sk_buff *dummy_skb; struct sk_buff *dummy_skb;
...@@ -1157,32 +1149,32 @@ static int __init inet_init(void) ...@@ -1157,32 +1149,32 @@ static int __init inet_init(void)
raw4_sk_cachep = kmem_cache_create("raw4_sock", raw4_sk_cachep = kmem_cache_create("raw4_sock",
sizeof(struct raw_sock), 0, sizeof(struct raw_sock), 0,
SLAB_HWCACHE_ALIGN, 0, 0); SLAB_HWCACHE_ALIGN, 0, 0);
if (!tcp_sk_cachep || !udp_sk_cachep || !raw4_sk_cachep) if (!tcp_sk_cachep || !udp_sk_cachep || !raw4_sk_cachep)
printk(KERN_CRIT printk(KERN_CRIT
"inet_init: Can't create protocol sock SLAB caches!\n"); "inet_init: Can't create protocol sock SLAB caches!\n");
/* /*
* Tell SOCKET that we are alive... * Tell SOCKET that we are alive...
*/ */
(void) sock_register(&inet_family_ops); (void)sock_register(&inet_family_ops);
/* /*
* Add all the protocols. * Add all the protocols.
*/ */
printk(KERN_INFO "IP Protocols: "); printk(KERN_INFO "IP Protocols: ");
for (p = inet_protocol_base; p != NULL;) { for (p = inet_protocol_base; p;) {
struct inet_protocol *tmp = (struct inet_protocol *) p->next; struct inet_protocol *tmp = (struct inet_protocol *)p->next;
inet_add_protocol(p); inet_add_protocol(p);
printk("%s%s",p->name,tmp?", ":"\n"); printk("%s%s", p->name, tmp ? ", " : "\n");
p = tmp; p = tmp;
} }
/* Register the socket-side information for inet_create. */ /* Register the socket-side information for inet_create. */
for(r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r) for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
INIT_LIST_HEAD(r); INIT_LIST_HEAD(r);
for(q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q) for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
inet_register_protosw(q); inet_register_protosw(q);
/* /*
......
...@@ -18,16 +18,17 @@ ...@@ -18,16 +18,17 @@
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* *
* Changes: * Changes:
* Alexey Kuznetsov: pa_* fields are replaced with ifaddr lists. * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
* lists.
* Cyrus Durgin: updated for kmod * Cyrus Durgin: updated for kmod
* Matthias Andree: in devinet_ioctl, compare label and * Matthias Andree: in devinet_ioctl, compare label and
* address (4.4BSD alias style support), * address (4.4BSD alias style support),
* fall back to comparing just the label * fall back to comparing just the label
* if no match found. * if no match found.
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/bitops.h> #include <asm/bitops.h>
...@@ -60,15 +61,29 @@ ...@@ -60,15 +61,29 @@
#include <net/route.h> #include <net/route.h>
#include <net/ip_fib.h> #include <net/ip_fib.h>
struct ipv4_devconf ipv4_devconf = { 1, 1, 1, 1, 0, }; struct ipv4_devconf ipv4_devconf = {
static struct ipv4_devconf ipv4_devconf_dflt = { 1, 1, 1, 1, 1, }; accept_redirects: 1,
send_redirects: 1,
secure_redirects: 1,
shared_media: 1,
};
static struct ipv4_devconf ipv4_devconf_dflt = {
accept_redirects: 1,
send_redirects: 1,
secure_redirects: 1,
shared_media: 1,
accept_source_route: 1,
};
static void rtmsg_ifa(int event, struct in_ifaddr *); static void rtmsg_ifa(int event, struct in_ifaddr *);
static struct notifier_block *inetaddr_chain; static struct notifier_block *inetaddr_chain;
static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy); static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
static void devinet_sysctl_register(struct in_device *in_dev, struct ipv4_devconf *p); static void devinet_sysctl_register(struct in_device *in_dev,
struct ipv4_devconf *p);
static void devinet_sysctl_unregister(struct ipv4_devconf *p); static void devinet_sysctl_unregister(struct ipv4_devconf *p);
#endif #endif
...@@ -79,12 +94,10 @@ int inet_dev_count; ...@@ -79,12 +94,10 @@ int inet_dev_count;
rwlock_t inetdev_lock = RW_LOCK_UNLOCKED; rwlock_t inetdev_lock = RW_LOCK_UNLOCKED;
static struct in_ifaddr *inet_alloc_ifa(void)
static struct in_ifaddr * inet_alloc_ifa(void)
{ {
struct in_ifaddr *ifa; struct in_ifaddr *ifa = kmalloc(sizeof(*ifa), GFP_KERNEL);
ifa = kmalloc(sizeof(*ifa), GFP_KERNEL);
if (ifa) { if (ifa) {
memset(ifa, 0, sizeof(*ifa)); memset(ifa, 0, sizeof(*ifa));
inet_ifa_count++; inet_ifa_count++;
...@@ -105,18 +118,19 @@ void in_dev_finish_destroy(struct in_device *idev) ...@@ -105,18 +118,19 @@ void in_dev_finish_destroy(struct in_device *idev)
{ {
struct net_device *dev = idev->dev; struct net_device *dev = idev->dev;
BUG_TRAP(idev->ifa_list==NULL); BUG_TRAP(!idev->ifa_list);
BUG_TRAP(idev->mc_list==NULL); BUG_TRAP(!idev->mc_list);
#ifdef NET_REFCNT_DEBUG #ifdef NET_REFCNT_DEBUG
printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n", idev, dev ? dev->name : "NIL"); printk(KERN_DEBUG "in_dev_finish_destroy: %p=%s\n",
idev, dev ? dev->name : "NIL");
#endif #endif
dev_put(dev); dev_put(dev);
if (!idev->dead) { if (!idev->dead)
printk("Freeing alive in_device %p\n", idev); printk("Freeing alive in_device %p\n", idev);
return; else {
inet_dev_count--;
kfree(idev);
} }
inet_dev_count--;
kfree(idev);
} }
struct in_device *inetdev_init(struct net_device *dev) struct in_device *inetdev_init(struct net_device *dev)
...@@ -127,21 +141,20 @@ struct in_device *inetdev_init(struct net_device *dev) ...@@ -127,21 +141,20 @@ struct in_device *inetdev_init(struct net_device *dev)
in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL); in_dev = kmalloc(sizeof(*in_dev), GFP_KERNEL);
if (!in_dev) if (!in_dev)
return NULL; goto out;
memset(in_dev, 0, sizeof(*in_dev)); memset(in_dev, 0, sizeof(*in_dev));
in_dev->lock = RW_LOCK_UNLOCKED; in_dev->lock = RW_LOCK_UNLOCKED;
memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf)); memcpy(&in_dev->cnf, &ipv4_devconf_dflt, sizeof(in_dev->cnf));
in_dev->cnf.sysctl = NULL; in_dev->cnf.sysctl = NULL;
in_dev->dev = dev; in_dev->dev = dev;
if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL) { if ((in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl)) == NULL)
kfree(in_dev); goto out_kfree;
return NULL;
}
inet_dev_count++; inet_dev_count++;
/* Reference in_dev->dev */ /* Reference in_dev->dev */
dev_hold(dev); dev_hold(dev);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
neigh_sysctl_register(dev, in_dev->arp_parms, NET_IPV4, NET_IPV4_NEIGH, "ipv4"); neigh_sysctl_register(dev, in_dev->arp_parms, NET_IPV4,
NET_IPV4_NEIGH, "ipv4");
#endif #endif
write_lock_bh(&inetdev_lock); write_lock_bh(&inetdev_lock);
dev->ip_ptr = in_dev; dev->ip_ptr = in_dev;
...@@ -151,9 +164,14 @@ struct in_device *inetdev_init(struct net_device *dev) ...@@ -151,9 +164,14 @@ struct in_device *inetdev_init(struct net_device *dev)
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
devinet_sysctl_register(in_dev, &in_dev->cnf); devinet_sysctl_register(in_dev, &in_dev->cnf);
#endif #endif
if (dev->flags&IFF_UP) if (dev->flags & IFF_UP)
ip_mc_up(in_dev); ip_mc_up(in_dev);
out:
return in_dev; return in_dev;
out_kfree:
kfree(in_dev);
in_dev = NULL;
goto out;
} }
static void inetdev_destroy(struct in_device *in_dev) static void inetdev_destroy(struct in_device *in_dev)
...@@ -197,10 +215,10 @@ int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b) ...@@ -197,10 +215,10 @@ int inet_addr_onlink(struct in_device *in_dev, u32 a, u32 b)
} endfor_ifa(in_dev); } endfor_ifa(in_dev);
read_unlock(&in_dev->lock); read_unlock(&in_dev->lock);
return 0; return 0;
} }
static void static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy) int destroy)
{ {
struct in_ifaddr *ifa1 = *ifap; struct in_ifaddr *ifa1 = *ifap;
...@@ -208,12 +226,12 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy) ...@@ -208,12 +226,12 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
/* 1. Deleting primary ifaddr forces deletion all secondaries */ /* 1. Deleting primary ifaddr forces deletion all secondaries */
if (!(ifa1->ifa_flags&IFA_F_SECONDARY)) { if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
struct in_ifaddr *ifa; struct in_ifaddr *ifa;
struct in_ifaddr **ifap1 = &ifa1->ifa_next; struct in_ifaddr **ifap1 = &ifa1->ifa_next;
while ((ifa=*ifap1) != NULL) { while ((ifa = *ifap1) != NULL) {
if (!(ifa->ifa_flags&IFA_F_SECONDARY) || if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask || ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa)) { !inet_ifa_match(ifa1->ifa_address, ifa)) {
ifap1 = &ifa->ifa_next; ifap1 = &ifa->ifa_next;
...@@ -250,20 +268,19 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy) ...@@ -250,20 +268,19 @@ inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap, int destroy)
if (destroy) { if (destroy) {
inet_free_ifa(ifa1); inet_free_ifa(ifa1);
if (in_dev->ifa_list == NULL) if (!in_dev->ifa_list)
inetdev_destroy(in_dev); inetdev_destroy(in_dev);
} }
} }
static int static int inet_insert_ifa(struct in_ifaddr *ifa)
inet_insert_ifa(struct in_ifaddr *ifa)
{ {
struct in_device *in_dev = ifa->ifa_dev; struct in_device *in_dev = ifa->ifa_dev;
struct in_ifaddr *ifa1, **ifap, **last_primary; struct in_ifaddr *ifa1, **ifap, **last_primary;
ASSERT_RTNL(); ASSERT_RTNL();
if (ifa->ifa_local == 0) { if (!ifa->ifa_local) {
inet_free_ifa(ifa); inet_free_ifa(ifa);
return 0; return 0;
} }
...@@ -271,10 +288,13 @@ inet_insert_ifa(struct in_ifaddr *ifa) ...@@ -271,10 +288,13 @@ inet_insert_ifa(struct in_ifaddr *ifa)
ifa->ifa_flags &= ~IFA_F_SECONDARY; ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list; last_primary = &in_dev->ifa_list;
for (ifap=&in_dev->ifa_list; (ifa1=*ifap)!=NULL; ifap=&ifa1->ifa_next) { for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
if (!(ifa1->ifa_flags&IFA_F_SECONDARY) && ifa->ifa_scope <= ifa1->ifa_scope) ifap = &ifa1->ifa_next) {
if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
ifa->ifa_scope <= ifa1->ifa_scope)
last_primary = &ifa1->ifa_next; last_primary = &ifa1->ifa_next;
if (ifa1->ifa_mask == ifa->ifa_mask && inet_ifa_match(ifa1->ifa_address, ifa)) { if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa)) {
if (ifa1->ifa_local == ifa->ifa_local) { if (ifa1->ifa_local == ifa->ifa_local) {
inet_free_ifa(ifa); inet_free_ifa(ifa);
return -EEXIST; return -EEXIST;
...@@ -287,7 +307,7 @@ inet_insert_ifa(struct in_ifaddr *ifa) ...@@ -287,7 +307,7 @@ inet_insert_ifa(struct in_ifaddr *ifa)
} }
} }
if (!(ifa->ifa_flags&IFA_F_SECONDARY)) { if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
net_srandom(ifa->ifa_local); net_srandom(ifa->ifa_local);
ifap = last_primary; ifap = last_primary;
} }
...@@ -306,24 +326,23 @@ inet_insert_ifa(struct in_ifaddr *ifa) ...@@ -306,24 +326,23 @@ inet_insert_ifa(struct in_ifaddr *ifa)
return 0; return 0;
} }
static int static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
{ {
struct in_device *in_dev = __in_dev_get(dev); struct in_device *in_dev = __in_dev_get(dev);
ASSERT_RTNL(); ASSERT_RTNL();
if (in_dev == NULL) { if (!in_dev) {
in_dev = inetdev_init(dev); in_dev = inetdev_init(dev);
if (in_dev == NULL) { if (!in_dev) {
inet_free_ifa(ifa); inet_free_ifa(ifa);
return -ENOBUFS; return -ENOBUFS;
} }
} }
if (ifa->ifa_dev != in_dev) { if (ifa->ifa_dev != in_dev) {
BUG_TRAP(ifa->ifa_dev==NULL); BUG_TRAP(!ifa->ifa_dev);
in_dev_hold(in_dev); in_dev_hold(in_dev);
ifa->ifa_dev=in_dev; ifa->ifa_dev = in_dev;
} }
if (LOOPBACK(ifa->ifa_local)) if (LOOPBACK(ifa->ifa_local))
ifa->ifa_scope = RT_SCOPE_HOST; ifa->ifa_scope = RT_SCOPE_HOST;
...@@ -344,7 +363,8 @@ struct in_device *inetdev_by_index(int ifindex) ...@@ -344,7 +363,8 @@ struct in_device *inetdev_by_index(int ifindex)
/* Called only from RTNL semaphored context. No locks. */ /* Called only from RTNL semaphored context. No locks. */
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, u32 mask) struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix,
u32 mask)
{ {
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -355,10 +375,9 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, u32 ma ...@@ -355,10 +375,9 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, u32 prefix, u32 ma
return NULL; return NULL;
} }
int int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{ {
struct rtattr **rta = arg; struct rtattr **rta = arg;
struct in_device *in_dev; struct in_device *in_dev;
struct ifaddrmsg *ifm = NLMSG_DATA(nlh); struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
struct in_ifaddr *ifa, **ifap; struct in_ifaddr *ifa, **ifap;
...@@ -366,93 +385,103 @@ inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ...@@ -366,93 +385,103 @@ inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ASSERT_RTNL(); ASSERT_RTNL();
if ((in_dev = inetdev_by_index(ifm->ifa_index)) == NULL) if ((in_dev = inetdev_by_index(ifm->ifa_index)) == NULL)
return -EADDRNOTAVAIL; goto out;
__in_dev_put(in_dev); __in_dev_put(in_dev);
for (ifap=&in_dev->ifa_list; (ifa=*ifap)!=NULL; ifap=&ifa->ifa_next) { for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
if ((rta[IFA_LOCAL-1] && memcmp(RTA_DATA(rta[IFA_LOCAL-1]), &ifa->ifa_local, 4)) || ifap = &ifa->ifa_next) {
(rta[IFA_LABEL-1] && strcmp(RTA_DATA(rta[IFA_LABEL-1]), ifa->ifa_label)) || if ((rta[IFA_LOCAL - 1] &&
(rta[IFA_ADDRESS-1] && memcmp(RTA_DATA(rta[IFA_LOCAL - 1]),
&ifa->ifa_local, 4)) ||
(rta[IFA_LABEL - 1] &&
strcmp(RTA_DATA(rta[IFA_LABEL - 1]), ifa->ifa_label)) ||
(rta[IFA_ADDRESS - 1] &&
(ifm->ifa_prefixlen != ifa->ifa_prefixlen || (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
!inet_ifa_match(*(u32*)RTA_DATA(rta[IFA_ADDRESS-1]), ifa)))) !inet_ifa_match(*(u32*)RTA_DATA(rta[IFA_ADDRESS - 1]),
ifa))))
continue; continue;
inet_del_ifa(in_dev, ifap, 1); inet_del_ifa(in_dev, ifap, 1);
return 0; return 0;
} }
out:
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
} }
int int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{ {
struct rtattr **rta = arg; struct rtattr **rta = arg;
struct net_device *dev; struct net_device *dev;
struct in_device *in_dev; struct in_device *in_dev;
struct ifaddrmsg *ifm = NLMSG_DATA(nlh); struct ifaddrmsg *ifm = NLMSG_DATA(nlh);
struct in_ifaddr *ifa; struct in_ifaddr *ifa;
int rc = -EINVAL;
ASSERT_RTNL(); ASSERT_RTNL();
if (ifm->ifa_prefixlen > 32 || rta[IFA_LOCAL-1] == NULL) if (ifm->ifa_prefixlen > 32 || !rta[IFA_LOCAL - 1])
return -EINVAL; goto out;
rc = -ENODEV;
if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL) if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL)
return -ENODEV; goto out;
rc = -ENOBUFS;
if ((in_dev = __in_dev_get(dev)) == NULL) { if ((in_dev = __in_dev_get(dev)) == NULL) {
in_dev = inetdev_init(dev); in_dev = inetdev_init(dev);
if (!in_dev) if (!in_dev)
return -ENOBUFS; goto out;
} }
if ((ifa = inet_alloc_ifa()) == NULL) if ((ifa = inet_alloc_ifa()) == NULL)
return -ENOBUFS; goto out;
if (rta[IFA_ADDRESS-1] == NULL) if (!rta[IFA_ADDRESS - 1])
rta[IFA_ADDRESS-1] = rta[IFA_LOCAL-1]; rta[IFA_ADDRESS - 1] = rta[IFA_LOCAL - 1];
memcpy(&ifa->ifa_local, RTA_DATA(rta[IFA_LOCAL-1]), 4); memcpy(&ifa->ifa_local, RTA_DATA(rta[IFA_LOCAL - 1]), 4);
memcpy(&ifa->ifa_address, RTA_DATA(rta[IFA_ADDRESS-1]), 4); memcpy(&ifa->ifa_address, RTA_DATA(rta[IFA_ADDRESS - 1]), 4);
ifa->ifa_prefixlen = ifm->ifa_prefixlen; ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen); ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
if (rta[IFA_BROADCAST-1]) if (rta[IFA_BROADCAST - 1])
memcpy(&ifa->ifa_broadcast, RTA_DATA(rta[IFA_BROADCAST-1]), 4); memcpy(&ifa->ifa_broadcast,
if (rta[IFA_ANYCAST-1]) RTA_DATA(rta[IFA_BROADCAST - 1]), 4);
memcpy(&ifa->ifa_anycast, RTA_DATA(rta[IFA_ANYCAST-1]), 4); if (rta[IFA_ANYCAST - 1])
memcpy(&ifa->ifa_anycast, RTA_DATA(rta[IFA_ANYCAST - 1]), 4);
ifa->ifa_flags = ifm->ifa_flags; ifa->ifa_flags = ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope; ifa->ifa_scope = ifm->ifa_scope;
in_dev_hold(in_dev); in_dev_hold(in_dev);
ifa->ifa_dev = in_dev; ifa->ifa_dev = in_dev;
if (rta[IFA_LABEL-1]) if (rta[IFA_LABEL - 1])
memcpy(ifa->ifa_label, RTA_DATA(rta[IFA_LABEL-1]), IFNAMSIZ); memcpy(ifa->ifa_label, RTA_DATA(rta[IFA_LABEL - 1]), IFNAMSIZ);
else else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
return inet_insert_ifa(ifa); rc = inet_insert_ifa(ifa);
out:
return rc;
} }
/* /*
* Determine a default network mask, based on the IP address. * Determine a default network mask, based on the IP address.
*/ */
static __inline__ int inet_abc_len(u32 addr) static __inline__ int inet_abc_len(u32 addr)
{ {
if (ZERONET(addr)) int rc = -1; /* Something else, probably a multicast. */
return 0;
addr = ntohl(addr); if (ZERONET(addr))
if (IN_CLASSA(addr)) rc = 0;
return 8; else {
if (IN_CLASSB(addr)) addr = ntohl(addr);
return 16;
if (IN_CLASSC(addr)) if (IN_CLASSA(addr))
return 24; rc = 8;
else if (IN_CLASSB(addr))
rc = 16;
else if (IN_CLASSC(addr))
rc = 24;
}
/* return rc;
* Something else, probably a multicast.
*/
return -1;
} }
...@@ -466,7 +495,7 @@ int devinet_ioctl(unsigned int cmd, void *arg) ...@@ -466,7 +495,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
struct in_ifaddr *ifa = NULL; struct in_ifaddr *ifa = NULL;
struct net_device *dev; struct net_device *dev;
char *colon; char *colon;
int ret = 0; int ret = -EFAULT;
int tryaddrmatch = 0; int tryaddrmatch = 0;
/* /*
...@@ -474,8 +503,8 @@ int devinet_ioctl(unsigned int cmd, void *arg) ...@@ -474,8 +503,8 @@ int devinet_ioctl(unsigned int cmd, void *arg)
*/ */
if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT; goto out;
ifr.ifr_name[IFNAMSIZ-1] = 0; ifr.ifr_name[IFNAMSIZ - 1] = 0;
/* save original address for comparison */ /* save original address for comparison */
memcpy(&sin_orig, sin, sizeof(*sin)); memcpy(&sin_orig, sin, sizeof(*sin));
...@@ -503,215 +532,222 @@ int devinet_ioctl(unsigned int cmd, void *arg) ...@@ -503,215 +532,222 @@ int devinet_ioctl(unsigned int cmd, void *arg)
break; break;
case SIOCSIFFLAGS: case SIOCSIFFLAGS:
ret = -EACCES;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return -EACCES; goto out;
break; break;
case SIOCSIFADDR: /* Set interface address (and family) */ case SIOCSIFADDR: /* Set interface address (and family) */
case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */ case SIOCSIFNETMASK: /* Set the netmask for the interface */
ret = -EACCES;
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return -EACCES; goto out;
ret = -EINVAL;
if (sin->sin_family != AF_INET) if (sin->sin_family != AF_INET)
return -EINVAL; goto out;
break; break;
default: default:
return -EINVAL; ret = -EINVAL;
goto out;
} }
dev_probe_lock(); dev_probe_lock();
rtnl_lock(); rtnl_lock();
if ((dev = __dev_get_by_name(ifr.ifr_name)) == NULL) { ret = -ENODEV;
ret = -ENODEV; if ((dev = __dev_get_by_name(ifr.ifr_name)) == NULL)
goto done; goto done;
}
if (colon) if (colon)
*colon = ':'; *colon = ':';
if ((in_dev=__in_dev_get(dev)) != NULL) { if ((in_dev = __in_dev_get(dev)) != NULL) {
if (tryaddrmatch) { if (tryaddrmatch) {
/* Matthias Andree */ /* Matthias Andree */
/* compare label and address (4.4BSD style) */ /* compare label and address (4.4BSD style) */
/* note: we only do this for a limited set of ioctls /* note: we only do this for a limited set of ioctls
and only if the original address family was AF_INET. and only if the original address family was AF_INET.
This is checked above. */ This is checked above. */
for (ifap=&in_dev->ifa_list; (ifa=*ifap) != NULL; ifap=&ifa->ifa_next) { for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
if ((strcmp(ifr.ifr_name, ifa->ifa_label) == 0) ifap = &ifa->ifa_next) {
&& (sin_orig.sin_addr.s_addr == ifa->ifa_address)) { if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
ifa->ifa_address) {
break; /* found */ break; /* found */
} }
} }
} }
/* we didn't get a match, maybe the application is /* we didn't get a match, maybe the application is
4.3BSD-style and passed in junk so we fall back to 4.3BSD-style and passed in junk so we fall back to
comparing just the label */ comparing just the label */
if (ifa == NULL) { if (!ifa) {
for (ifap=&in_dev->ifa_list; (ifa=*ifap) != NULL; ifap=&ifa->ifa_next) for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
if (strcmp(ifr.ifr_name, ifa->ifa_label) == 0) ifap = &ifa->ifa_next)
if (!strcmp(ifr.ifr_name, ifa->ifa_label))
break; break;
} }
} }
if (ifa == NULL && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS) { ret = -EADDRNOTAVAIL;
ret = -EADDRNOTAVAIL; if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
goto done; goto done;
}
switch(cmd) { switch(cmd) {
case SIOCGIFADDR: /* Get interface address */ case SIOCGIFADDR: /* Get interface address */
sin->sin_addr.s_addr = ifa->ifa_local; sin->sin_addr.s_addr = ifa->ifa_local;
goto rarok; goto rarok;
case SIOCGIFBRDADDR: /* Get the broadcast address */ case SIOCGIFBRDADDR: /* Get the broadcast address */
sin->sin_addr.s_addr = ifa->ifa_broadcast; sin->sin_addr.s_addr = ifa->ifa_broadcast;
goto rarok; goto rarok;
case SIOCGIFDSTADDR: /* Get the destination address */ case SIOCGIFDSTADDR: /* Get the destination address */
sin->sin_addr.s_addr = ifa->ifa_address; sin->sin_addr.s_addr = ifa->ifa_address;
goto rarok; goto rarok;
case SIOCGIFNETMASK: /* Get the netmask for the interface */ case SIOCGIFNETMASK: /* Get the netmask for the interface */
sin->sin_addr.s_addr = ifa->ifa_mask; sin->sin_addr.s_addr = ifa->ifa_mask;
goto rarok; goto rarok;
case SIOCSIFFLAGS: case SIOCSIFFLAGS:
if (colon) { if (colon) {
if (ifa == NULL) { ret = -EADDRNOTAVAIL;
ret = -EADDRNOTAVAIL; if (!ifa)
break;
}
if (!(ifr.ifr_flags&IFF_UP))
inet_del_ifa(in_dev, ifap, 1);
break; break;
} ret = 0;
ret = dev_change_flags(dev, ifr.ifr_flags); if (!(ifr.ifr_flags & IFF_UP))
inet_del_ifa(in_dev, ifap, 1);
break;
}
ret = dev_change_flags(dev, ifr.ifr_flags);
break;
case SIOCSIFADDR: /* Set interface address (and family) */
ret = -EINVAL;
if (inet_abc_len(sin->sin_addr.s_addr) < 0)
break; break;
case SIOCSIFADDR: /* Set interface address (and family) */ if (!ifa) {
if (inet_abc_len(sin->sin_addr.s_addr) < 0) { ret = -ENOBUFS;
ret = -EINVAL; if ((ifa = inet_alloc_ifa()) == NULL)
break; break;
} if (colon)
memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
} else {
ret = 0;
if (ifa->ifa_local == sin->sin_addr.s_addr)
break;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_broadcast = 0;
ifa->ifa_anycast = 0;
}
if (!ifa) { ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
if ((ifa = inet_alloc_ifa()) == NULL) {
ret = -ENOBUFS; if (!(dev->flags & IFF_POINTOPOINT)) {
break; ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
} ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
if (colon) if ((dev->flags & IFF_BROADCAST) &&
memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ); ifa->ifa_prefixlen < 31)
else ifa->ifa_broadcast = ifa->ifa_address |
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); ~ifa->ifa_mask;
} else { } else {
ret = 0; ifa->ifa_prefixlen = 32;
if (ifa->ifa_local == sin->sin_addr.s_addr) ifa->ifa_mask = inet_make_mask(32);
break; }
inet_del_ifa(in_dev, ifap, 0); ret = inet_set_ifa(dev, ifa);
ifa->ifa_broadcast = 0; break;
ifa->ifa_anycast = 0;
}
ifa->ifa_address = case SIOCSIFBRDADDR: /* Set the broadcast address */
ifa->ifa_local = sin->sin_addr.s_addr; ret = 0;
if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
if (!(dev->flags&IFF_POINTOPOINT)) { inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address); ifa->ifa_broadcast = sin->sin_addr.s_addr;
ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen); inet_insert_ifa(ifa);
if ((dev->flags&IFF_BROADCAST) && ifa->ifa_prefixlen < 31) }
ifa->ifa_broadcast = ifa->ifa_address|~ifa->ifa_mask; break;
} else {
ifa->ifa_prefixlen = 32;
ifa->ifa_mask = inet_make_mask(32);
}
ret = inet_set_ifa(dev, ifa);
break;
case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */
if (ifa->ifa_broadcast != sin->sin_addr.s_addr) { ret = 0;
inet_del_ifa(in_dev, ifap, 0); if (ifa->ifa_address == sin->sin_addr.s_addr)
ifa->ifa_broadcast = sin->sin_addr.s_addr;
inet_insert_ifa(ifa);
}
break; break;
ret = -EINVAL;
case SIOCSIFDSTADDR: /* Set the destination address */ if (inet_abc_len(sin->sin_addr.s_addr) < 0)
if (ifa->ifa_address != sin->sin_addr.s_addr) {
if (inet_abc_len(sin->sin_addr.s_addr) < 0) {
ret = -EINVAL;
break;
}
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_address = sin->sin_addr.s_addr;
inet_insert_ifa(ifa);
}
break; break;
ret = 0;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_address = sin->sin_addr.s_addr;
inet_insert_ifa(ifa);
break;
case SIOCSIFNETMASK: /* Set the netmask for the interface */ case SIOCSIFNETMASK: /* Set the netmask for the interface */
/*
* The mask we set must be legal.
*/
if (bad_mask(sin->sin_addr.s_addr, 0)) {
ret = -EINVAL;
break;
}
if (ifa->ifa_mask != sin->sin_addr.s_addr) { /*
inet_del_ifa(in_dev, ifap, 0); * The mask we set must be legal.
ifa->ifa_mask = sin->sin_addr.s_addr; */
ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); ret = -EINVAL;
inet_insert_ifa(ifa); if (bad_mask(sin->sin_addr.s_addr, 0))
}
break; break;
ret = 0;
if (ifa->ifa_mask != sin->sin_addr.s_addr) {
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_mask = sin->sin_addr.s_addr;
ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
inet_insert_ifa(ifa);
}
break;
} }
done: done:
rtnl_unlock(); rtnl_unlock();
dev_probe_unlock(); dev_probe_unlock();
out:
return ret; return ret;
rarok: rarok:
rtnl_unlock(); rtnl_unlock();
dev_probe_unlock(); dev_probe_unlock();
if (copy_to_user(arg, &ifr, sizeof(struct ifreq))) ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
return -EFAULT; goto out;
return 0;
} }
static int static int inet_gifconf(struct net_device *dev, char *buf, int len)
inet_gifconf(struct net_device *dev, char *buf, int len)
{ {
struct in_device *in_dev = __in_dev_get(dev); struct in_device *in_dev = __in_dev_get(dev);
struct in_ifaddr *ifa; struct in_ifaddr *ifa;
struct ifreq ifr; struct ifreq ifr;
int done=0; int done = 0;
if (in_dev==NULL || (ifa=in_dev->ifa_list)==NULL) if (!in_dev || (ifa = in_dev->ifa_list) == NULL)
return 0; goto out;
for ( ; ifa; ifa = ifa->ifa_next) { for (; ifa; ifa = ifa->ifa_next) {
if (!buf) { if (!buf) {
done += sizeof(ifr); done += sizeof(ifr);
continue; continue;
} }
if (len < (int) sizeof(ifr)) if (len < (int) sizeof(ifr))
return done; break;
memset(&ifr, 0, sizeof(struct ifreq)); memset(&ifr, 0, sizeof(struct ifreq));
if (ifa->ifa_label) if (ifa->ifa_label)
strcpy(ifr.ifr_name, ifa->ifa_label); strcpy(ifr.ifr_name, ifa->ifa_label);
else else
strcpy(ifr.ifr_name, dev->name); strcpy(ifr.ifr_name, dev->name);
(*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = AF_INET; (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
(*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = ifa->ifa_local; (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
ifa->ifa_local;
if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
return -EFAULT; done = -EFAULT;
buf += sizeof(struct ifreq); break;
len -= sizeof(struct ifreq); }
buf += sizeof(struct ifreq);
len -= sizeof(struct ifreq);
done += sizeof(struct ifreq); done += sizeof(struct ifreq);
} }
out:
return done; return done;
} }
...@@ -722,10 +758,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) ...@@ -722,10 +758,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
read_lock(&inetdev_lock); read_lock(&inetdev_lock);
in_dev = __in_dev_get(dev); in_dev = __in_dev_get(dev);
if (in_dev == NULL) { if (!in_dev)
read_unlock(&inetdev_lock); goto out_unlock_inetdev;
return 0;
}
read_lock(&in_dev->lock); read_lock(&in_dev->lock);
for_primary_ifa(in_dev) { for_primary_ifa(in_dev) {
...@@ -742,7 +776,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) ...@@ -742,7 +776,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
read_unlock(&inetdev_lock); read_unlock(&inetdev_lock);
if (addr) if (addr)
return addr; goto out;
/* Not loopback addresses on loopback should be preferred /* Not loopback addresses on loopback should be preferred
in this case. It is importnat that lo is the first interface in this case. It is importnat that lo is the first interface
...@@ -750,8 +784,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) ...@@ -750,8 +784,8 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
*/ */
read_lock(&dev_base_lock); read_lock(&dev_base_lock);
read_lock(&inetdev_lock); read_lock(&inetdev_lock);
for (dev=dev_base; dev; dev=dev->next) { for (dev = dev_base; dev; dev = dev->next) {
if ((in_dev=__in_dev_get(dev)) == NULL) if ((in_dev = __in_dev_get(dev)) == NULL)
continue; continue;
read_lock(&in_dev->lock); read_lock(&in_dev->lock);
...@@ -759,17 +793,20 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) ...@@ -759,17 +793,20 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
if (ifa->ifa_scope != RT_SCOPE_LINK && if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope) { ifa->ifa_scope <= scope) {
read_unlock(&in_dev->lock); read_unlock(&in_dev->lock);
read_unlock(&inetdev_lock); addr = ifa->ifa_local;
read_unlock(&dev_base_lock); goto out_unlock_both;
return ifa->ifa_local;
} }
} endfor_ifa(in_dev); } endfor_ifa(in_dev);
read_unlock(&in_dev->lock); read_unlock(&in_dev->lock);
} }
out_unlock_both:
read_unlock(&inetdev_lock); read_unlock(&inetdev_lock);
read_unlock(&dev_base_lock); read_unlock(&dev_base_lock);
out:
return 0; return addr;
out_unlock_inetdev:
read_unlock(&inetdev_lock);
goto out;
} }
/* /*
...@@ -783,20 +820,21 @@ int register_inetaddr_notifier(struct notifier_block *nb) ...@@ -783,20 +820,21 @@ int register_inetaddr_notifier(struct notifier_block *nb)
int unregister_inetaddr_notifier(struct notifier_block *nb) int unregister_inetaddr_notifier(struct notifier_block *nb)
{ {
return notifier_chain_unregister(&inetaddr_chain,nb); return notifier_chain_unregister(&inetaddr_chain, nb);
} }
/* Called only under RTNL semaphore */ /* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr) static int inetdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{ {
struct net_device *dev = ptr; struct net_device *dev = ptr;
struct in_device *in_dev = __in_dev_get(dev); struct in_device *in_dev = __in_dev_get(dev);
ASSERT_RTNL(); ASSERT_RTNL();
if (in_dev == NULL) if (!in_dev)
return NOTIFY_DONE; goto out;
switch (event) { switch (event) {
case NETDEV_REGISTER: case NETDEV_REGISTER:
...@@ -810,7 +848,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void ...@@ -810,7 +848,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
struct in_ifaddr *ifa; struct in_ifaddr *ifa;
if ((ifa = inet_alloc_ifa()) != NULL) { if ((ifa = inet_alloc_ifa()) != NULL) {
ifa->ifa_local = ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK); ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8; ifa->ifa_prefixlen = 8;
ifa->ifa_mask = inet_make_mask(8); ifa->ifa_mask = inet_make_mask(8);
in_dev_hold(in_dev); in_dev_hold(in_dev);
...@@ -843,7 +881,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void ...@@ -843,7 +881,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
} }
break; break;
} }
out:
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -887,15 +925,14 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, ...@@ -887,15 +925,14 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{ {
int idx, ip_idx; int idx, ip_idx;
int s_idx, s_ip_idx;
struct net_device *dev; struct net_device *dev;
struct in_device *in_dev; struct in_device *in_dev;
struct in_ifaddr *ifa; struct in_ifaddr *ifa;
int s_ip_idx, s_idx = cb->args[0];
s_idx = cb->args[0];
s_ip_idx = ip_idx = cb->args[1]; s_ip_idx = ip_idx = cb->args[1];
read_lock(&dev_base_lock); read_lock(&dev_base_lock);
for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
if (idx < s_idx) if (idx < s_idx)
continue; continue;
if (idx > s_idx) if (idx > s_idx)
...@@ -911,7 +948,8 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -911,7 +948,8 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
if (ip_idx < s_ip_idx) if (ip_idx < s_ip_idx)
continue; continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWADDR) <= 0) { cb->nlh->nlmsg_seq,
RTM_NEWADDR) <= 0) {
read_unlock(&in_dev->lock); read_unlock(&in_dev->lock);
read_unlock(&inetdev_lock); read_unlock(&inetdev_lock);
goto done; goto done;
...@@ -929,65 +967,39 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -929,65 +967,39 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len; return skb->len;
} }
static void rtmsg_ifa(int event, struct in_ifaddr * ifa) static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
{ {
struct sk_buff *skb; int size = NLMSG_SPACE(sizeof(struct ifaddrmsg) + 128);
int size = NLMSG_SPACE(sizeof(struct ifaddrmsg)+128); struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
skb = alloc_skb(size, GFP_KERNEL); if (!skb)
if (!skb) {
netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, ENOBUFS); netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, ENOBUFS);
return; else if (inet_fill_ifaddr(skb, ifa, 0, 0, event) < 0) {
}
if (inet_fill_ifaddr(skb, ifa, 0, 0, event) < 0) {
kfree_skb(skb); kfree_skb(skb);
netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, EINVAL); netlink_set_err(rtnl, 0, RTMGRP_IPV4_IFADDR, EINVAL);
return; } else {
NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_IFADDR;
netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV4_IFADDR, GFP_KERNEL);
} }
NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_IFADDR;
netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV4_IFADDR, GFP_KERNEL);
} }
static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = {
static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX-RTM_BASE+1] = [4] = { doit: inet_rtm_newaddr, },
{ [5] = { doit: inet_rtm_deladdr, },
{ NULL, NULL, }, [6] = { dumpit: inet_dump_ifaddr, },
{ NULL, NULL, }, [8] = { doit: inet_rtm_newroute, },
{ NULL, NULL, }, [9] = { doit: inet_rtm_delroute, },
{ NULL, NULL, }, [10] = { doit: inet_rtm_getroute, dumpit: inet_dump_fib, },
{ inet_rtm_newaddr, NULL, },
{ inet_rtm_deladdr, NULL, },
{ NULL, inet_dump_ifaddr, },
{ NULL, NULL, },
{ inet_rtm_newroute, NULL, },
{ inet_rtm_delroute, NULL, },
{ inet_rtm_getroute, inet_dump_fib, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
#ifdef CONFIG_IP_MULTIPLE_TABLES #ifdef CONFIG_IP_MULTIPLE_TABLES
{ inet_rtm_newrule, NULL, }, [16] = { doit: inet_rtm_newrule, },
{ inet_rtm_delrule, NULL, }, [17] = { doit: inet_rtm_delrule, },
{ NULL, inet_dump_rules, }, [18] = { dumpit: inet_dump_rules, },
{ NULL, NULL, },
#else
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
{ NULL, NULL, },
#endif #endif
}; };
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
void inet_forward_change() void inet_forward_change(void)
{ {
struct net_device *dev; struct net_device *dev;
int on = ipv4_devconf.forwarding; int on = ipv4_devconf.forwarding;
...@@ -1009,15 +1021,13 @@ void inet_forward_change() ...@@ -1009,15 +1021,13 @@ void inet_forward_change()
rt_cache_flush(0); rt_cache_flush(0);
} }
static static int devinet_sysctl_forward(ctl_table *ctl, int write,
int devinet_sysctl_forward(ctl_table *ctl, int write, struct file * filp, struct file* filp, void *buffer,
void *buffer, size_t *lenp) size_t *lenp)
{ {
int *valp = ctl->data; int *valp = ctl->data;
int val = *valp; int val = *valp;
int ret; int ret = proc_dointvec(ctl, write, filp, buffer, lenp);
ret = proc_dointvec(ctl, write, filp, buffer, lenp);
if (write && *valp != val) { if (write && *valp != val) {
if (valp == &ipv4_devconf.forwarding) if (valp == &ipv4_devconf.forwarding)
...@@ -1026,81 +1036,179 @@ int devinet_sysctl_forward(ctl_table *ctl, int write, struct file * filp, ...@@ -1026,81 +1036,179 @@ int devinet_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
rt_cache_flush(0); rt_cache_flush(0);
} }
return ret; return ret;
} }
static struct devinet_sysctl_table static struct devinet_sysctl_table {
{
struct ctl_table_header *sysctl_header; struct ctl_table_header *sysctl_header;
ctl_table devinet_vars[15]; ctl_table devinet_vars[15];
ctl_table devinet_dev[2]; ctl_table devinet_dev[2];
ctl_table devinet_conf_dir[2]; ctl_table devinet_conf_dir[2];
ctl_table devinet_proto_dir[2]; ctl_table devinet_proto_dir[2];
ctl_table devinet_root_dir[2]; ctl_table devinet_root_dir[2];
} devinet_sysctl = { } devinet_sysctl = {
NULL, devinet_vars: {
{{NET_IPV4_CONF_FORWARDING, "forwarding", {
&ipv4_devconf.forwarding, sizeof(int), 0644, NULL, ctl_name: NET_IPV4_CONF_FORWARDING,
&devinet_sysctl_forward}, procname: "forwarding",
{NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding", data: &ipv4_devconf.forwarding,
&ipv4_devconf.mc_forwarding, sizeof(int), 0444, NULL, maxlen: sizeof(int),
&proc_dointvec}, mode: 0644,
{NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects", proc_handler: &devinet_sysctl_forward,
&ipv4_devconf.accept_redirects, sizeof(int), 0644, NULL, },
&proc_dointvec}, {
{NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects", ctl_name: NET_IPV4_CONF_MC_FORWARDING,
&ipv4_devconf.secure_redirects, sizeof(int), 0644, NULL, procname: "mc_forwarding",
&proc_dointvec}, data: &ipv4_devconf.mc_forwarding,
{NET_IPV4_CONF_SHARED_MEDIA, "shared_media", maxlen: sizeof(int),
&ipv4_devconf.shared_media, sizeof(int), 0644, NULL, mode: 0444,
&proc_dointvec}, proc_handler: &proc_dointvec,
{NET_IPV4_CONF_RP_FILTER, "rp_filter", },
&ipv4_devconf.rp_filter, sizeof(int), 0644, NULL, {
&proc_dointvec}, ctl_name: NET_IPV4_CONF_ACCEPT_REDIRECTS,
{NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects", procname: "accept_redirects",
&ipv4_devconf.send_redirects, sizeof(int), 0644, NULL, data: &ipv4_devconf.accept_redirects,
&proc_dointvec}, maxlen: sizeof(int),
{NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route", mode: 0644,
&ipv4_devconf.accept_source_route, sizeof(int), 0644, NULL, proc_handler: &proc_dointvec,
&proc_dointvec}, },
{NET_IPV4_CONF_PROXY_ARP, "proxy_arp", {
&ipv4_devconf.proxy_arp, sizeof(int), 0644, NULL, ctl_name: NET_IPV4_CONF_SECURE_REDIRECTS,
&proc_dointvec}, procname: "secure_redirects",
{NET_IPV4_CONF_MEDIUM_ID, "medium_id", data: &ipv4_devconf.secure_redirects,
&ipv4_devconf.medium_id, sizeof(int), 0644, NULL, maxlen: sizeof(int),
&proc_dointvec}, mode: 0644,
{NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay", proc_handler: &proc_dointvec,
&ipv4_devconf.bootp_relay, sizeof(int), 0644, NULL, },
&proc_dointvec}, {
{NET_IPV4_CONF_LOG_MARTIANS, "log_martians", ctl_name: NET_IPV4_CONF_SHARED_MEDIA,
&ipv4_devconf.log_martians, sizeof(int), 0644, NULL, procname: "shared_media",
&proc_dointvec}, data: &ipv4_devconf.shared_media,
{NET_IPV4_CONF_TAG, "tag", maxlen: sizeof(int),
&ipv4_devconf.tag, sizeof(int), 0644, NULL, mode: 0644,
&proc_dointvec}, proc_handler: &proc_dointvec,
{NET_IPV4_CONF_ARPFILTER, "arp_filter", },
&ipv4_devconf.arp_filter, sizeof(int), 0644, NULL, {
&proc_dointvec}, ctl_name: NET_IPV4_CONF_RP_FILTER,
{0}}, procname: "rp_filter",
data: &ipv4_devconf.rp_filter,
{{NET_PROTO_CONF_ALL, "all", NULL, 0, 0555, devinet_sysctl.devinet_vars},{0}}, maxlen: sizeof(int),
{{NET_IPV4_CONF, "conf", NULL, 0, 0555, devinet_sysctl.devinet_dev},{0}}, mode: 0644,
{{NET_IPV4, "ipv4", NULL, 0, 0555, devinet_sysctl.devinet_conf_dir},{0}}, proc_handler: &proc_dointvec,
{{CTL_NET, "net", NULL, 0, 0555, devinet_sysctl.devinet_proto_dir},{0}} },
{
ctl_name: NET_IPV4_CONF_SEND_REDIRECTS,
procname: "send_redirects",
data: &ipv4_devconf.send_redirects,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE,
procname: "accept_source_route",
data: &ipv4_devconf.accept_source_route,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_PROXY_ARP,
procname: "proxy_arp",
data: &ipv4_devconf.proxy_arp,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_MEDIUM_ID,
procname: "medium_id",
data: &ipv4_devconf.medium_id,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_BOOTP_RELAY,
procname: "bootp_relay",
data: &ipv4_devconf.bootp_relay,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_LOG_MARTIANS,
procname: "log_martians",
data: &ipv4_devconf.log_martians,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_TAG,
procname: "tag",
data: &ipv4_devconf.tag,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
{
ctl_name: NET_IPV4_CONF_ARPFILTER,
procname: "arp_filter",
data: &ipv4_devconf.arp_filter,
maxlen: sizeof(int),
mode: 0644,
proc_handler: &proc_dointvec,
},
},
devinet_dev: {
{
ctl_name: NET_PROTO_CONF_ALL,
procname: "all",
mode: 0555,
child: devinet_sysctl.devinet_vars,
},
},
devinet_conf_dir: {
{
ctl_name: NET_IPV4_CONF,
procname: "conf",
mode: 0555,
child: devinet_sysctl.devinet_dev,
},
},
devinet_proto_dir: {
{
ctl_name: NET_IPV4,
procname: "ipv4",
mode: 0555,
child: devinet_sysctl.devinet_conf_dir,
},
},
devinet_root_dir: {
{
ctl_name: CTL_NET,
procname: "net",
mode: 0555,
child: devinet_sysctl.devinet_proto_dir,
},
},
}; };
static void devinet_sysctl_register(struct in_device *in_dev, struct ipv4_devconf *p) static void devinet_sysctl_register(struct in_device *in_dev,
struct ipv4_devconf *p)
{ {
int i; int i;
struct net_device *dev = in_dev ? in_dev->dev : NULL; struct net_device *dev = in_dev ? in_dev->dev : NULL;
struct devinet_sysctl_table *t; struct devinet_sysctl_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
t = kmalloc(sizeof(*t), GFP_KERNEL); if (!t)
if (t == NULL)
return; return;
memcpy(t, &devinet_sysctl, sizeof(*t)); memcpy(t, &devinet_sysctl, sizeof(*t));
for (i=0; i<sizeof(t->devinet_vars)/sizeof(t->devinet_vars[0])-1; i++) { for (i = 0;
t->devinet_vars[i].data += (char*)p - (char*)&ipv4_devconf; i < sizeof(t->devinet_vars) / sizeof(t->devinet_vars[0]) - 1;
i++) {
t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
t->devinet_vars[i].de = NULL; t->devinet_vars[i].de = NULL;
} }
if (dev) { if (dev) {
...@@ -1110,17 +1218,17 @@ static void devinet_sysctl_register(struct in_device *in_dev, struct ipv4_devcon ...@@ -1110,17 +1218,17 @@ static void devinet_sysctl_register(struct in_device *in_dev, struct ipv4_devcon
t->devinet_dev[0].procname = "default"; t->devinet_dev[0].procname = "default";
t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT; t->devinet_dev[0].ctl_name = NET_PROTO_CONF_DEFAULT;
} }
t->devinet_dev[0].child = t->devinet_vars; t->devinet_dev[0].child = t->devinet_vars;
t->devinet_dev[0].de = NULL; t->devinet_dev[0].de = NULL;
t->devinet_conf_dir[0].child = t->devinet_dev; t->devinet_conf_dir[0].child = t->devinet_dev;
t->devinet_conf_dir[0].de = NULL; t->devinet_conf_dir[0].de = NULL;
t->devinet_proto_dir[0].child = t->devinet_conf_dir; t->devinet_proto_dir[0].child = t->devinet_conf_dir;
t->devinet_proto_dir[0].de = NULL; t->devinet_proto_dir[0].de = NULL;
t->devinet_root_dir[0].child = t->devinet_proto_dir; t->devinet_root_dir[0].child = t->devinet_proto_dir;
t->devinet_root_dir[0].de = NULL; t->devinet_root_dir[0].de = NULL;
t->sysctl_header = register_sysctl_table(t->devinet_root_dir, 0); t->sysctl_header = register_sysctl_table(t->devinet_root_dir, 0);
if (t->sysctl_header == NULL) if (!t->sysctl_header)
kfree(t); kfree(t);
else else
p->sysctl = t; p->sysctl = t;
......
/* /*
* NET3: Implementation of the ICMP protocol layer. * NET3: Implementation of the ICMP protocol layer.
* *
* Alan Cox, <alan@redhat.com> * Alan Cox, <alan@redhat.com>
* *
* Version: $Id: icmp.c,v 1.85 2002/02/01 22:01:03 davem Exp $ * Version: $Id: icmp.c,v 1.85 2002/02/01 22:01:03 davem Exp $
...@@ -21,25 +21,25 @@ ...@@ -21,25 +21,25 @@
* of broken per type icmp timeouts. * of broken per type icmp timeouts.
* Mike Shaver : RFC1122 checks. * Mike Shaver : RFC1122 checks.
* Alan Cox : Multicast ping reply as self. * Alan Cox : Multicast ping reply as self.
* Alan Cox : Fix atomicity lockup in ip_build_xmit * Alan Cox : Fix atomicity lockup in ip_build_xmit
* call. * call.
* Alan Cox : Added 216,128 byte paths to the MTU * Alan Cox : Added 216,128 byte paths to the MTU
* code. * code.
* Martin Mares : RFC1812 checks. * Martin Mares : RFC1812 checks.
* Martin Mares : Can be configured to follow redirects * Martin Mares : Can be configured to follow redirects
* if acting as a router _without_ a * if acting as a router _without_ a
* routing protocol (RFC 1812). * routing protocol (RFC 1812).
* Martin Mares : Echo requests may be configured to * Martin Mares : Echo requests may be configured to
* be ignored (RFC 1812). * be ignored (RFC 1812).
* Martin Mares : Limitation of ICMP error message * Martin Mares : Limitation of ICMP error message
* transmit rate (RFC 1812). * transmit rate (RFC 1812).
* Martin Mares : TOS and Precedence set correctly * Martin Mares : TOS and Precedence set correctly
* (RFC 1812). * (RFC 1812).
* Martin Mares : Now copying as much data from the * Martin Mares : Now copying as much data from the
* original packet as we can without * original packet as we can without
* exceeding 576 bytes (RFC 1812). * exceeding 576 bytes (RFC 1812).
* Willy Konynenberg : Transparent proxying support. * Willy Konynenberg : Transparent proxying support.
* Keith Owens : RFC1191 correction for 4.2BSD based * Keith Owens : RFC1191 correction for 4.2BSD based
* path MTU bug. * path MTU bug.
* Thomas Quinot : ICMP Dest Unreach codes up to 15 are * Thomas Quinot : ICMP Dest Unreach codes up to 15 are
* valid (RFC 1812). * valid (RFC 1812).
...@@ -52,9 +52,10 @@ ...@@ -52,9 +52,10 @@
* the rates sysctl configurable. * the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send * Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly * - IP option length was accounted wrongly
* - ICMP header length was not accounted at all. * - ICMP header length was not accounted
* Tristan Greaves : Added sysctl option to ignore bogus broadcast * at all.
* responses from broken routers. * Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
* *
* To Fix: * To Fix:
* *
...@@ -95,8 +96,7 @@ ...@@ -95,8 +96,7 @@
* Build xmit assembly blocks * Build xmit assembly blocks
*/ */
struct icmp_bxm struct icmp_bxm {
{
struct sk_buff *skb; struct sk_buff *skb;
int offset; int offset;
int data_len; int data_len;
...@@ -114,29 +114,76 @@ struct icmp_bxm ...@@ -114,29 +114,76 @@ struct icmp_bxm
/* /*
* Statistics * Statistics
*/ */
struct icmp_mib icmp_statistics[NR_CPUS * 2];
struct icmp_mib icmp_statistics[NR_CPUS*2];
/* An array of errno for error messages from dest unreach. */ /* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOS_UNREACH and SR_FAIELD MUST be considered 'transient errs'. */ /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOS_UNREACH and SR_FAIELD MUST be considered 'transient errs'. */
struct icmp_err icmp_err_convert[] = { struct icmp_err icmp_err_convert[] = {
{ ENETUNREACH, 0 }, /* ICMP_NET_UNREACH */ {
{ EHOSTUNREACH, 0 }, /* ICMP_HOST_UNREACH */ errno: ENETUNREACH, /* ICMP_NET_UNREACH */
{ ENOPROTOOPT, 1 }, /* ICMP_PROT_UNREACH */ fatal: 0,
{ ECONNREFUSED, 1 }, /* ICMP_PORT_UNREACH */ },
{ EMSGSIZE, 0 }, /* ICMP_FRAG_NEEDED */ {
{ EOPNOTSUPP, 0 }, /* ICMP_SR_FAILED */ errno: EHOSTUNREACH, /* ICMP_HOST_UNREACH */
{ ENETUNREACH, 1 }, /* ICMP_NET_UNKNOWN */ fatal: 0,
{ EHOSTDOWN, 1 }, /* ICMP_HOST_UNKNOWN */ },
{ ENONET, 1 }, /* ICMP_HOST_ISOLATED */ {
{ ENETUNREACH, 1 }, /* ICMP_NET_ANO */ errno: ENOPROTOOPT /* ICMP_PROT_UNREACH */,
{ EHOSTUNREACH, 1 }, /* ICMP_HOST_ANO */ fatal: 1,
{ ENETUNREACH, 0 }, /* ICMP_NET_UNR_TOS */ },
{ EHOSTUNREACH, 0 }, /* ICMP_HOST_UNR_TOS */ {
{ EHOSTUNREACH, 1 }, /* ICMP_PKT_FILTERED */ errno: ECONNREFUSED, /* ICMP_PORT_UNREACH */
{ EHOSTUNREACH, 1 }, /* ICMP_PREC_VIOLATION */ fatal: 1,
{ EHOSTUNREACH, 1 } /* ICMP_PREC_CUTOFF */ },
{
errno: EMSGSIZE, /* ICMP_FRAG_NEEDED */
fatal: 0,
},
{
errno: EOPNOTSUPP, /* ICMP_SR_FAILED */
fatal: 0,
},
{
errno: ENETUNREACH, /* ICMP_NET_UNKNOWN */
fatal: 1,
},
{
errno: EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
fatal: 1,
},
{
errno: ENONET, /* ICMP_HOST_ISOLATED */
fatal: 1,
},
{
errno: ENETUNREACH, /* ICMP_NET_ANO */
fatal: 1,
},
{
errno: EHOSTUNREACH, /* ICMP_HOST_ANO */
fatal: 1,
},
{
errno: ENETUNREACH, /* ICMP_NET_UNR_TOS */
fatal: 0,
},
{
errno: EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
fatal: 0,
},
{
errno: EHOSTUNREACH, /* ICMP_PKT_FILTERED */
fatal: 1,
},
{
errno: EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
fatal: 1,
},
{
errno: EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
fatal: 1,
},
}; };
extern int sysctl_ip_default_ttl; extern int sysctl_ip_default_ttl;
...@@ -148,19 +195,19 @@ int sysctl_icmp_echo_ignore_broadcasts; ...@@ -148,19 +195,19 @@ int sysctl_icmp_echo_ignore_broadcasts;
/* Control parameter - ignore bogus broadcast responses? */ /* Control parameter - ignore bogus broadcast responses? */
int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ignore_bogus_error_responses;
/* /*
* Configurable global rate limit. * Configurable global rate limit.
* *
* ratelimit defines tokens/packet consumed for dst->rate_token bucket * ratelimit defines tokens/packet consumed for dst->rate_token bucket
* ratemask defines which icmp types are ratelimited by setting * ratemask defines which icmp types are ratelimited by setting
* it's bit position. * it's bit position.
* *
* default: * default:
* dest unreachable (3), source quench (4), * dest unreachable (3), source quench (4),
* time exceeded (11), parameter problem (12) * time exceeded (11), parameter problem (12)
*/ */
int sysctl_icmp_ratelimit = 1*HZ; int sysctl_icmp_ratelimit = 1 * HZ;
int sysctl_icmp_ratemask = 0x1818; int sysctl_icmp_ratemask = 0x1818;
/* /*
...@@ -182,7 +229,6 @@ static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; ...@@ -182,7 +229,6 @@ static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
* our ICMP output as well as maintain a clean interface throughout * our ICMP output as well as maintain a clean interface throughout
* all layers. All Socketless IP sends will soon be gone. * all layers. All Socketless IP sends will soon be gone.
*/ */
struct socket *icmp_socket; struct socket *icmp_socket;
/* ICMPv4 socket is only a bit non-reenterable (unlike ICMPv6, /* ICMPv4 socket is only a bit non-reenterable (unlike ICMPv6,
...@@ -194,13 +240,17 @@ static int icmp_xmit_holder = -1; ...@@ -194,13 +240,17 @@ static int icmp_xmit_holder = -1;
static int icmp_xmit_lock_bh(void) static int icmp_xmit_lock_bh(void)
{ {
int rc;
if (!spin_trylock(&icmp_socket->sk->lock.slock)) { if (!spin_trylock(&icmp_socket->sk->lock.slock)) {
rc = -EAGAIN;
if (icmp_xmit_holder == smp_processor_id()) if (icmp_xmit_holder == smp_processor_id())
return -EAGAIN; goto out;
spin_lock(&icmp_socket->sk->lock.slock); spin_lock(&icmp_socket->sk->lock.slock);
} }
rc = 0;
icmp_xmit_holder = smp_processor_id(); icmp_xmit_holder = smp_processor_id();
return 0; out:
return rc;
} }
static __inline__ int icmp_xmit_lock(void) static __inline__ int icmp_xmit_lock(void)
...@@ -236,14 +286,14 @@ static __inline__ void icmp_xmit_unlock(void) ...@@ -236,14 +286,14 @@ static __inline__ void icmp_xmit_unlock(void)
* This function is generic and could be used for other purposes * This function is generic and could be used for other purposes
* too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
* *
* Note that the same dst_entry fields are modified by functions in * Note that the same dst_entry fields are modified by functions in
* route.c too, but these work for packet destinations while xrlim_allow * route.c too, but these work for packet destinations while xrlim_allow
* works for icmp destinations. This means the rate limiting information * works for icmp destinations. This means the rate limiting information
* for one "ip object" is shared - and these ICMPs are twice limited: * for one "ip object" is shared - and these ICMPs are twice limited:
* by source and by destination. * by source and by destination.
* *
* RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
* SHOULD allow setting of rate limits * SHOULD allow setting of rate limits
* *
* Shared between ICMPv4 and ICMPv6. * Shared between ICMPv4 and ICMPv6.
*/ */
...@@ -251,68 +301,75 @@ static __inline__ void icmp_xmit_unlock(void) ...@@ -251,68 +301,75 @@ static __inline__ void icmp_xmit_unlock(void)
int xrlim_allow(struct dst_entry *dst, int timeout) int xrlim_allow(struct dst_entry *dst, int timeout)
{ {
unsigned long now; unsigned long now;
int rc = 0;
now = jiffies; now = jiffies;
dst->rate_tokens += now - dst->rate_last; dst->rate_tokens += now - dst->rate_last;
dst->rate_last = now; dst->rate_last = now;
if (dst->rate_tokens > XRLIM_BURST_FACTOR*timeout) if (dst->rate_tokens > XRLIM_BURST_FACTOR * timeout)
dst->rate_tokens = XRLIM_BURST_FACTOR*timeout; dst->rate_tokens = XRLIM_BURST_FACTOR * timeout;
if (dst->rate_tokens >= timeout) { if (dst->rate_tokens >= timeout) {
dst->rate_tokens -= timeout; dst->rate_tokens -= timeout;
return 1; rc = 1;
} }
return 0; return rc;
} }
static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code) static inline int icmpv4_xrlim_allow(struct rtable *rt, int type, int code)
{ {
struct dst_entry *dst = &rt->u.dst; struct dst_entry *dst = &rt->u.dst;
int rc = 1;
if (type > NR_ICMP_TYPES) if (type > NR_ICMP_TYPES)
return 1; goto out;
/* Don't limit PMTU discovery. */ /* Don't limit PMTU discovery. */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
return 1; goto out;
/* No rate limit on loopback */ /* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
return 1; goto out;
/* Limit if icmp type is enabled in ratemask. */ /* Limit if icmp type is enabled in ratemask. */
if((1 << type) & sysctl_icmp_ratemask) if ((1 << type) & sysctl_icmp_ratemask)
return xrlim_allow(dst, sysctl_icmp_ratelimit); rc = xrlim_allow(dst, sysctl_icmp_ratelimit);
else out:
return 1; return rc;
} }
/* /*
* Maintain the counters used in the SNMP statistics for outgoing ICMP * Maintain the counters used in the SNMP statistics for outgoing ICMP
*/ */
static void icmp_out_count(int type) static void icmp_out_count(int type)
{ {
if (type>NR_ICMP_TYPES) if (type <= NR_ICMP_TYPES) {
return; (icmp_pointers[type].output)[(smp_processor_id() * 2 +
(icmp_pointers[type].output)[(smp_processor_id()*2+!in_softirq())*sizeof(struct icmp_mib)/sizeof(unsigned long)]++; !in_softirq()) *
ICMP_INC_STATS(IcmpOutMsgs); sizeof(struct icmp_mib) /
sizeof(unsigned long)]++;
ICMP_INC_STATS(IcmpOutMsgs);
}
} }
/* /*
* Checksum each fragment, and on the first include the headers and final checksum. * Checksum each fragment, and on the first include the headers and final
* checksum.
*/ */
static int icmp_glue_bits(const void *p, char *to, unsigned int offset,
static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned int fraglen) unsigned int fraglen)
{ {
struct icmp_bxm *icmp_param = (struct icmp_bxm *)p; struct icmp_bxm *icmp_param = (struct icmp_bxm *)p;
struct icmphdr *icmph; struct icmphdr *icmph;
unsigned int csum; unsigned int csum;
if (offset) { if (offset) {
icmp_param->csum=skb_copy_and_csum_bits(icmp_param->skb, icmp_param->csum =
icmp_param->offset+(offset-icmp_param->head_len), skb_copy_and_csum_bits(icmp_param->skb,
to, fraglen,icmp_param->csum); icmp_param->offset +
return 0; (offset - icmp_param->head_len),
to, fraglen, icmp_param->csum);
goto out;
} }
/* /*
...@@ -321,15 +378,14 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned ...@@ -321,15 +378,14 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
* for the whole packet here. * for the whole packet here.
*/ */
csum = csum_partial_copy_nocheck((void *)&icmp_param->data, csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
to, icmp_param->head_len, to, icmp_param->head_len,
icmp_param->csum); icmp_param->csum);
csum=skb_copy_and_csum_bits(icmp_param->skb, csum = skb_copy_and_csum_bits(icmp_param->skb, icmp_param->offset,
icmp_param->offset, to + icmp_param->head_len,
to+icmp_param->head_len, fraglen - icmp_param->head_len, csum);
fraglen-icmp_param->head_len, icmph = (struct icmphdr *)to;
csum);
icmph=(struct icmphdr *)to;
icmph->checksum = csum_fold(csum); icmph->checksum = csum_fold(csum);
out:
return 0; return 0;
} }
...@@ -339,20 +395,18 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned ...@@ -339,20 +395,18 @@ static int icmp_glue_bits(const void *p, char *to, unsigned int offset, unsigned
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{ {
struct sock *sk=icmp_socket->sk; struct sock *sk = icmp_socket->sk;
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
struct ipcm_cookie ipc; struct ipcm_cookie ipc;
struct rtable *rt = (struct rtable*)skb->dst; struct rtable *rt = (struct rtable *)skb->dst;
u32 daddr; u32 daddr;
if (ip_options_echo(&icmp_param->replyopts, skb)) if (ip_options_echo(&icmp_param->replyopts, skb) ||
return; icmp_xmit_lock_bh())
goto out;
if (icmp_xmit_lock_bh())
return;
icmp_param->data.icmph.checksum=0; icmp_param->data.icmph.checksum = 0;
icmp_param->csum=0; icmp_param->csum = 0;
icmp_out_count(icmp_param->data.icmph.type); icmp_out_count(icmp_param->data.icmph.type);
inet->tos = skb->nh.iph->tos; inet->tos = skb->nh.iph->tos;
...@@ -364,24 +418,27 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) ...@@ -364,24 +418,27 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
if (ipc.opt->srr) if (ipc.opt->srr)
daddr = icmp_param->replyopts.faddr; daddr = icmp_param->replyopts.faddr;
} }
if (ip_route_output(&rt, daddr, rt->rt_spec_dst, RT_TOS(skb->nh.iph->tos), 0)) if (ip_route_output(&rt, daddr, rt->rt_spec_dst,
goto out; RT_TOS(skb->nh.iph->tos), 0))
if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type, goto out_unlock;
icmp_param->data.icmph.code)) { if (icmpv4_xrlim_allow(rt, icmp_param->data.icmph.type,
ip_build_xmit(sk, icmp_glue_bits, icmp_param, icmp_param->data.icmph.code)) {
ip_build_xmit(sk, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len, icmp_param->data_len+icmp_param->head_len,
&ipc, rt, MSG_DONTWAIT); &ipc, rt, MSG_DONTWAIT);
} }
ip_rt_put(rt); ip_rt_put(rt);
out: out_unlock:
icmp_xmit_unlock_bh(); icmp_xmit_unlock_bh();
out:;
} }
/* /*
* Send an ICMP message in response to a situation * Send an ICMP message in response to a situation
* *
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. MAY send more (we do). * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information. * MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address. * MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address. * MUST NOT reply to a multicast/broadcast MAC address.
...@@ -393,13 +450,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) ...@@ -393,13 +450,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
struct iphdr *iph; struct iphdr *iph;
int room; int room;
struct icmp_bxm icmp_param; struct icmp_bxm icmp_param;
struct rtable *rt = (struct rtable*)skb_in->dst; struct rtable *rt = (struct rtable *)skb_in->dst;
struct ipcm_cookie ipc; struct ipcm_cookie ipc;
u32 saddr; u32 saddr;
u8 tos; u8 tos;
if (!rt) if (!rt)
return; goto out;
/* /*
* Find the original header. It is expected to be valid, of course. * Find the original header. It is expected to be valid, of course.
...@@ -408,66 +465,67 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) ...@@ -408,66 +465,67 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
*/ */
iph = skb_in->nh.iph; iph = skb_in->nh.iph;
if ((u8*)iph < skb_in->head || (u8*)(iph+1) > skb_in->tail) if ((u8 *)iph < skb_in->head || (u8 *)(iph + 1) > skb_in->tail)
return; goto out;
/* /*
* No replies to physical multicast/broadcast * No replies to physical multicast/broadcast
*/ */
if (skb_in->pkt_type!=PACKET_HOST) if (skb_in->pkt_type != PACKET_HOST)
return; goto out;
/* /*
* Now check at the protocol level * Now check at the protocol level
*/ */
if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST)) if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return; goto out;
/* /*
* Only reply to fragment 0. We byte re-order the constant * Only reply to fragment 0. We byte re-order the constant
* mask for efficiency. * mask for efficiency.
*/ */
if (iph->frag_off&htons(IP_OFFSET)) if (iph->frag_off & htons(IP_OFFSET))
return; goto out;
/* /*
* If we send an ICMP error to an ICMP error a mess would result.. * If we send an ICMP error to an ICMP error a mess would result..
*/ */
if (icmp_pointers[type].error) { if (icmp_pointers[type].error) {
/* /*
* We are an error, check if we are replying to an ICMP error * We are an error, check if we are replying to an
* ICMP error
*/ */
if (iph->protocol==IPPROTO_ICMP) { if (iph->protocol == IPPROTO_ICMP) {
u8 inner_type; u8 inner_type;
if (skb_copy_bits(skb_in, if (skb_copy_bits(skb_in,
skb_in->nh.raw + (iph->ihl<<2) skb_in->nh.raw + (iph->ihl << 2) +
+ offsetof(struct icmphdr, type) offsetof(struct icmphdr, type) -
- skb_in->data, skb_in->data, &inner_type, 1))
&inner_type, 1)) goto out;
return;
/* /*
* Assume any unknown ICMP type is an error. This isn't * Assume any unknown ICMP type is an error. This
* specified by the RFC, but think about it.. * isn't specified by the RFC, but think about it..
*/ */
if (inner_type>NR_ICMP_TYPES || icmp_pointers[inner_type].error) if (inner_type > NR_ICMP_TYPES ||
return; icmp_pointers[inner_type].error)
goto out;
} }
} }
if (icmp_xmit_lock()) if (icmp_xmit_lock())
return; goto out;
/* /*
* Construct source address and options. * Construct source address and options.
*/ */
#ifdef CONFIG_IP_ROUTE_NAT #ifdef CONFIG_IP_ROUTE_NAT
/* /*
* Restore original addresses if packet has been translated. * Restore original addresses if packet has been translated.
*/ */
if (rt->rt_flags&RTCF_NAT && IPCB(skb_in)->flags&IPSKB_TRANSLATED) { if (rt->rt_flags & RTCF_NAT && IPCB(skb_in)->flags & IPSKB_TRANSLATED) {
iph->daddr = rt->key.dst; iph->daddr = rt->key.dst;
iph->saddr = rt->key.src; iph->saddr = rt->key.src;
} }
...@@ -477,14 +535,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) ...@@ -477,14 +535,14 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
if (!(rt->rt_flags & RTCF_LOCAL)) if (!(rt->rt_flags & RTCF_LOCAL))
saddr = 0; saddr = 0;
tos = icmp_pointers[type].error ? tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : IPTOS_PREC_INTERNETCONTROL) :
iph->tos; iph->tos;
if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0)) if (ip_route_output(&rt, iph->saddr, saddr, RT_TOS(tos), 0))
goto out; goto out_unlock;
if (ip_options_echo(&icmp_param.replyopts, skb_in)) if (ip_options_echo(&icmp_param.replyopts, skb_in))
goto ende; goto ende;
...@@ -492,13 +550,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) ...@@ -492,13 +550,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
* Prepare data for ICMP header. * Prepare data for ICMP header.
*/ */
icmp_param.data.icmph.type=type; icmp_param.data.icmph.type = type;
icmp_param.data.icmph.code=code; icmp_param.data.icmph.code = code;
icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.un.gateway = info;
icmp_param.data.icmph.checksum=0; icmp_param.data.icmph.checksum = 0;
icmp_param.csum=0; icmp_param.csum = 0;
icmp_param.skb=skb_in; icmp_param.skb = skb_in;
icmp_param.offset=skb_in->nh.raw - skb_in->data; icmp_param.offset = skb_in->nh.raw - skb_in->data;
icmp_out_count(icmp_param.data.icmph.type); icmp_out_count(icmp_param.data.icmph.type);
inet_sk(icmp_socket->sk)->tos = tos; inet_sk(icmp_socket->sk)->tos = tos;
inet_sk(icmp_socket->sk)->ttl = sysctl_ip_default_ttl; inet_sk(icmp_socket->sk)->ttl = sysctl_ip_default_ttl;
...@@ -506,8 +564,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) ...@@ -506,8 +564,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
ipc.opt = &icmp_param.replyopts; ipc.opt = &icmp_param.replyopts;
if (icmp_param.replyopts.srr) { if (icmp_param.replyopts.srr) {
ip_rt_put(rt); ip_rt_put(rt);
if (ip_route_output(&rt, icmp_param.replyopts.faddr, saddr, RT_TOS(tos), 0)) if (ip_route_output(&rt, icmp_param.replyopts.faddr,
goto out; saddr, RT_TOS(tos), 0))
goto out_unlock;
} }
if (!icmpv4_xrlim_allow(rt, type, code)) if (!icmpv4_xrlim_allow(rt, type, code))
...@@ -521,24 +580,24 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info) ...@@ -521,24 +580,24 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
room -= sizeof(struct icmphdr); room -= sizeof(struct icmphdr);
icmp_param.data_len=skb_in->len-icmp_param.offset; icmp_param.data_len = skb_in->len - icmp_param.offset;
if (icmp_param.data_len > room) if (icmp_param.data_len > room)
icmp_param.data_len = room; icmp_param.data_len = room;
icmp_param.head_len = sizeof(struct icmphdr); icmp_param.head_len = sizeof(struct icmphdr);
ip_build_xmit(icmp_socket->sk, icmp_glue_bits, &icmp_param, ip_build_xmit(icmp_socket->sk, icmp_glue_bits, &icmp_param,
icmp_param.data_len+sizeof(struct icmphdr), icmp_param.data_len + sizeof(struct icmphdr),
&ipc, rt, MSG_DONTWAIT); &ipc, rt, MSG_DONTWAIT);
ende: ende:
ip_rt_put(rt); ip_rt_put(rt);
out: out_unlock:
icmp_xmit_unlock(); icmp_xmit_unlock();
out:;
} }
/* /*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH. * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
*/ */
static void icmp_unreach(struct sk_buff *skb) static void icmp_unreach(struct sk_buff *skb)
...@@ -556,60 +615,59 @@ static void icmp_unreach(struct sk_buff *skb) ...@@ -556,60 +615,59 @@ static void icmp_unreach(struct sk_buff *skb)
* additional check for longer headers in upper levels. * additional check for longer headers in upper levels.
*/ */
if (!pskb_may_pull(skb, sizeof(struct iphdr))) { if (!pskb_may_pull(skb, sizeof(struct iphdr)))
ICMP_INC_STATS_BH(IcmpInErrors); goto out_err;
return;
}
icmph = skb->h.icmph; icmph = skb->h.icmph;
iph = (struct iphdr *) skb->data; iph = (struct iphdr *)skb->data;
if (iph->ihl<5) { if (iph->ihl < 5) /* Mangled header, drop. */
/* Mangled header, drop. */ goto out_err;
ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
if(icmph->type==ICMP_DEST_UNREACH) { if (icmph->type == ICMP_DEST_UNREACH) {
switch(icmph->code & 15) { switch (icmph->code & 15) {
case ICMP_NET_UNREACH: case ICMP_NET_UNREACH:
break; case ICMP_HOST_UNREACH:
case ICMP_HOST_UNREACH: case ICMP_PROT_UNREACH:
break; case ICMP_PORT_UNREACH:
case ICMP_PROT_UNREACH: break;
break; case ICMP_FRAG_NEEDED:
case ICMP_PORT_UNREACH: if (ipv4_config.no_pmtu_disc) {
break;
case ICMP_FRAG_NEEDED:
if (ipv4_config.no_pmtu_disc) {
if (net_ratelimit())
printk(KERN_INFO "ICMP: %u.%u.%u.%u: fragmentation needed and DF set.\n",
NIPQUAD(iph->daddr));
} else {
info = ip_rt_frag_needed(iph, ntohs(icmph->un.frag.mtu));
if (!info)
goto out;
}
break;
case ICMP_SR_FAILED:
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_INFO "ICMP: %u.%u.%u.%u: Source Route Failed.\n", NIPQUAD(iph->daddr)); printk(KERN_INFO "ICMP: %u.%u.%u.%u: "
break; "fragmentation needed "
default: "and DF set.\n",
break; NIPQUAD(iph->daddr));
} else {
info = ip_rt_frag_needed(iph,
ntohs(icmph->un.frag.mtu));
if (!info)
goto out;
}
break;
case ICMP_SR_FAILED:
if (net_ratelimit())
printk(KERN_INFO "ICMP: %u.%u.%u.%u: Source "
"Route Failed.\n",
NIPQUAD(iph->daddr));
break;
default:
break;
} }
if (icmph->code>NR_ICMP_UNREACH) if (icmph->code > NR_ICMP_UNREACH)
goto out; goto out;
} else if (icmph->type == ICMP_PARAMETERPROB) { } else if (icmph->type == ICMP_PARAMETERPROB)
info = ntohl(icmph->un.gateway)>>24; info = ntohl(icmph->un.gateway) >> 24;
}
/* /*
* Throw it at our lower layers * Throw it at our lower layers
* *
* RFC 1122: 3.2.2 MUST extract the protocol ID from the passed header. * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed
* RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the transport layer. * header.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to transport layer. * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the
* transport layer.
* RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to
* transport layer.
*/ */
/* /*
...@@ -619,25 +677,22 @@ static void icmp_unreach(struct sk_buff *skb) ...@@ -619,25 +677,22 @@ static void icmp_unreach(struct sk_buff *skb)
* get the other vendor to fix their kit. * get the other vendor to fix their kit.
*/ */
if (!sysctl_icmp_ignore_bogus_error_responses) if (!sysctl_icmp_ignore_bogus_error_responses &&
{ inet_addr_type(iph->daddr) == RTN_BROADCAST) {
if (net_ratelimit())
if (inet_addr_type(iph->daddr) == RTN_BROADCAST) printk(KERN_WARNING "%u.%u.%u.%u sent an invalid ICMP "
{ "error to a broadcast.\n",
if (net_ratelimit()) NIPQUAD(skb->nh.iph->saddr));
printk(KERN_WARNING "%u.%u.%u.%u sent an invalid ICMP error to a broadcast.\n", goto out;
NIPQUAD(skb->nh.iph->saddr));
goto out;
}
} }
/* Checkin full IP header plus 8 bytes of protocol to /* Checkin full IP header plus 8 bytes of protocol to
* avoid additional coding at protocol handlers. * avoid additional coding at protocol handlers.
*/ */
if (!pskb_may_pull(skb, iph->ihl*4+8)) if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
goto out; goto out;
iph = (struct iphdr *) skb->data; iph = (struct iphdr *)skb->data;
protocol = iph->protocol; protocol = iph->protocol;
/* /*
...@@ -647,10 +702,10 @@ static void icmp_unreach(struct sk_buff *skb) ...@@ -647,10 +702,10 @@ static void icmp_unreach(struct sk_buff *skb)
/* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */ /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
hash = protocol & (MAX_INET_PROTOS - 1); hash = protocol & (MAX_INET_PROTOS - 1);
read_lock(&raw_v4_lock); read_lock(&raw_v4_lock);
if ((raw_sk = raw_v4_htable[hash]) != NULL) if ((raw_sk = raw_v4_htable[hash]) != NULL) {
{
while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr, while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr,
iph->saddr, skb->dev->ifindex)) != NULL) { iph->saddr,
skb->dev->ifindex)) != NULL) {
raw_err(raw_sk, skb, info); raw_err(raw_sk, skb, info);
raw_sk = raw_sk->next; raw_sk = raw_sk->next;
iph = (struct iphdr *)skb->data; iph = (struct iphdr *)skb->data;
...@@ -659,19 +714,18 @@ static void icmp_unreach(struct sk_buff *skb) ...@@ -659,19 +714,18 @@ static void icmp_unreach(struct sk_buff *skb)
read_unlock(&raw_v4_lock); read_unlock(&raw_v4_lock);
/* /*
* This can't change while we are doing it. * This can't change while we are doing it.
* Callers have obtained BR_NETPROTO_LOCK so * Callers have obtained BR_NETPROTO_LOCK so
* we are OK. * we are OK.
*/ */
ipprot = (struct inet_protocol *) inet_protos[hash]; ipprot = (struct inet_protocol *)inet_protos[hash];
while (ipprot) { while (ipprot) {
struct inet_protocol *nextip; struct inet_protocol *nextip;
nextip = (struct inet_protocol *) ipprot->next; nextip = (struct inet_protocol *)ipprot->next;
/*
/* * Pass it off to everyone who wants it.
* Pass it off to everyone who wants it.
*/ */
/* RFC1122: OK. Passes appropriate ICMP errors to the */ /* RFC1122: OK. Passes appropriate ICMP errors to the */
...@@ -682,12 +736,16 @@ static void icmp_unreach(struct sk_buff *skb) ...@@ -682,12 +736,16 @@ static void icmp_unreach(struct sk_buff *skb)
ipprot = nextip; ipprot = nextip;
} }
out:; out:
return;
out_err:
ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
} }
/* /*
* Handle ICMP_REDIRECT. * Handle ICMP_REDIRECT.
*/ */
static void icmp_redirect(struct sk_buff *skb) static void icmp_redirect(struct sk_buff *skb)
...@@ -695,18 +753,16 @@ static void icmp_redirect(struct sk_buff *skb) ...@@ -695,18 +753,16 @@ static void icmp_redirect(struct sk_buff *skb)
struct iphdr *iph; struct iphdr *iph;
unsigned long ip; unsigned long ip;
if (skb->len < sizeof(struct iphdr)) { if (skb->len < sizeof(struct iphdr))
ICMP_INC_STATS_BH(IcmpInErrors); goto out_err;
return;
}
/* /*
* Get the copied header of the packet that caused the redirect * Get the copied header of the packet that caused the redirect
*/ */
if (!pskb_may_pull(skb, sizeof(struct iphdr))) if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return; goto out;
iph = (struct iphdr *) skb->data; iph = (struct iphdr *)skb->data;
ip = iph->daddr; ip = iph->daddr;
switch (skb->h.icmph->code & 7) { switch (skb->h.icmph->code & 7) {
...@@ -716,22 +772,31 @@ static void icmp_redirect(struct sk_buff *skb) ...@@ -716,22 +772,31 @@ static void icmp_redirect(struct sk_buff *skb)
* As per RFC recommendations now handle it as * As per RFC recommendations now handle it as
* a host redirect. * a host redirect.
*/ */
case ICMP_REDIR_HOST: case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS: case ICMP_REDIR_HOSTTOS:
ip_rt_redirect(skb->nh.iph->saddr, ip, skb->h.icmph->un.gateway, iph->saddr, iph->tos, skb->dev); ip_rt_redirect(skb->nh.iph->saddr,
ip, skb->h.icmph->un.gateway,
iph->saddr, iph->tos, skb->dev);
break; break;
default: default:
break; break;
} }
out:
return;
out_err:
ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
} }
/* /*
* Handle ICMP_ECHO ("ping") requests. * Handle ICMP_ECHO ("ping") requests.
* *
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo requests. * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
* RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be included in the reply. * requests.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring echo requests, MUST have default=NOT. * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be
* included in the reply.
* RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring
* echo requests, MUST have default=NOT.
* See also WRT handling of options once they are done and working. * See also WRT handling of options once they are done and working.
*/ */
...@@ -740,65 +805,66 @@ static void icmp_echo(struct sk_buff *skb) ...@@ -740,65 +805,66 @@ static void icmp_echo(struct sk_buff *skb)
if (!sysctl_icmp_echo_ignore_all) { if (!sysctl_icmp_echo_ignore_all) {
struct icmp_bxm icmp_param; struct icmp_bxm icmp_param;
icmp_param.data.icmph=*skb->h.icmph; icmp_param.data.icmph = *skb->h.icmph;
icmp_param.data.icmph.type=ICMP_ECHOREPLY; icmp_param.data.icmph.type = ICMP_ECHOREPLY;
icmp_param.skb=skb; icmp_param.skb = skb;
icmp_param.offset=0; icmp_param.offset = 0;
icmp_param.data_len=skb->len; icmp_param.data_len = skb->len;
icmp_param.head_len=sizeof(struct icmphdr); icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb); icmp_reply(&icmp_param, skb);
} }
} }
/* /*
* Handle ICMP Timestamp requests. * Handle ICMP Timestamp requests.
* RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests.
* SHOULD be in the kernel for minimum random latency. * SHOULD be in the kernel for minimum random latency.
* MUST be accurate to a few minutes. * MUST be accurate to a few minutes.
* MUST be updated at least at 15Hz. * MUST be updated at least at 15Hz.
*/ */
static void icmp_timestamp(struct sk_buff *skb) static void icmp_timestamp(struct sk_buff *skb)
{ {
struct timeval tv; struct timeval tv;
struct icmp_bxm icmp_param; struct icmp_bxm icmp_param;
/* /*
* Too short. * Too short.
*/ */
if (skb->len < 4)
if (skb->len < 4) { goto out_err;
ICMP_INC_STATS_BH(IcmpInErrors);
return;
}
/* /*
* Fill in the current time as ms since midnight UT: * Fill in the current time as ms since midnight UT:
*/ */
do_gettimeofday(&tv); do_gettimeofday(&tv);
icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * 1000 +
tv.tv_usec / 1000);
icmp_param.data.times[2] = icmp_param.data.times[1]; icmp_param.data.times[2] = icmp_param.data.times[1];
if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
BUG(); BUG();
icmp_param.data.icmph=*skb->h.icmph; icmp_param.data.icmph = *skb->h.icmph;
icmp_param.data.icmph.type=ICMP_TIMESTAMPREPLY; icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY;
icmp_param.data.icmph.code=0; icmp_param.data.icmph.code = 0;
icmp_param.skb=skb; icmp_param.skb = skb;
icmp_param.offset=0; icmp_param.offset = 0;
icmp_param.data_len=0; icmp_param.data_len = 0;
icmp_param.head_len=sizeof(struct icmphdr)+12; icmp_param.head_len = sizeof(struct icmphdr) + 12;
icmp_reply(&icmp_param, skb); icmp_reply(&icmp_param, skb);
out:
return;
out_err:
ICMP_INC_STATS_BH(IcmpInErrors);
goto out;
} }
/* /*
* Handle ICMP_ADDRESS_MASK requests. (RFC950) * Handle ICMP_ADDRESS_MASK requests. (RFC950)
* *
* RFC1122 (3.2.2.9). A host MUST only send replies to * RFC1122 (3.2.2.9). A host MUST only send replies to
* ADDRESS_MASK requests if it's been configured as an address mask * ADDRESS_MASK requests if it's been configured as an address mask
* agent. Receiving a request doesn't constitute implicit permission to * agent. Receiving a request doesn't constitute implicit permission to
* act as one. Of course, implementing this correctly requires (SHOULD) * act as one. Of course, implementing this correctly requires (SHOULD)
* a way to turn the functionality on and off. Another one for sysctl(), * a way to turn the functionality on and off. Another one for sysctl(),
* I guess. -- MS * I guess. -- MS
* *
* RFC1812 (4.3.3.9). A router MUST implement it. * RFC1812 (4.3.3.9). A router MUST implement it.
...@@ -829,7 +895,7 @@ static void icmp_address(struct sk_buff *skb) ...@@ -829,7 +895,7 @@ static void icmp_address(struct sk_buff *skb)
#if 0 #if 0
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n"); printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n");
#endif #endif
} }
/* /*
...@@ -839,57 +905,60 @@ static void icmp_address(struct sk_buff *skb) ...@@ -839,57 +905,60 @@ static void icmp_address(struct sk_buff *skb)
static void icmp_address_reply(struct sk_buff *skb) static void icmp_address_reply(struct sk_buff *skb)
{ {
struct rtable *rt = (struct rtable*)skb->dst; struct rtable *rt = (struct rtable *)skb->dst;
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
struct in_device *in_dev; struct in_device *in_dev;
struct in_ifaddr *ifa; struct in_ifaddr *ifa;
u32 mask; u32 mask;
if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
return; goto out;
in_dev = in_dev_get(dev); in_dev = in_dev_get(dev);
if (!in_dev) if (!in_dev)
return; goto out;
read_lock(&in_dev->lock); read_lock(&in_dev->lock);
if (in_dev->ifa_list && if (in_dev->ifa_list &&
IN_DEV_LOG_MARTIANS(in_dev) && IN_DEV_LOG_MARTIANS(in_dev) &&
IN_DEV_FORWARD(in_dev)) { IN_DEV_FORWARD(in_dev)) {
if (skb_copy_bits(skb, 0, &mask, 4)) if (skb_copy_bits(skb, 0, &mask, 4))
BUG(); BUG();
for (ifa=in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (mask == ifa->ifa_mask && inet_ifa_match(rt->rt_src, ifa)) if (mask == ifa->ifa_mask &&
inet_ifa_match(rt->rt_src, ifa))
break; break;
} }
if (!ifa && net_ratelimit()) { if (!ifa && net_ratelimit()) {
printk(KERN_INFO "Wrong address mask %u.%u.%u.%u from %s/%u.%u.%u.%u\n", printk(KERN_INFO "Wrong address mask %u.%u.%u.%u from "
"%s/%u.%u.%u.%u\n",
NIPQUAD(mask), dev->name, NIPQUAD(rt->rt_src)); NIPQUAD(mask), dev->name, NIPQUAD(rt->rt_src));
} }
} }
read_unlock(&in_dev->lock); read_unlock(&in_dev->lock);
in_dev_put(in_dev); in_dev_put(in_dev);
out:;
} }
static void icmp_discard(struct sk_buff *skb) static void icmp_discard(struct sk_buff *skb)
{ {
} }
/* /*
* Deal with incoming ICMP packets. * Deal with incoming ICMP packets.
*/ */
int icmp_rcv(struct sk_buff *skb) int icmp_rcv(struct sk_buff *skb)
{ {
struct icmphdr *icmph; struct icmphdr *icmph;
struct rtable *rt = (struct rtable*)skb->dst; struct rtable *rt = (struct rtable *)skb->dst;
ICMP_INC_STATS_BH(IcmpInMsgs); ICMP_INC_STATS_BH(IcmpInMsgs);
switch (skb->ip_summed) { switch (skb->ip_summed) {
case CHECKSUM_HW: case CHECKSUM_HW:
if ((u16)csum_fold(skb->csum) == 0) if (!(u16)csum_fold(skb->csum))
break; break;
NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "icmp v4 hw csum failure\n")); NETDEBUG(if (net_ratelimit())
printk(KERN_DEBUG "icmp v4 hw csum failure\n"));
case CHECKSUM_NONE: case CHECKSUM_NONE:
if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))
goto error; goto error;
...@@ -904,17 +973,18 @@ int icmp_rcv(struct sk_buff *skb) ...@@ -904,17 +973,18 @@ int icmp_rcv(struct sk_buff *skb)
/* /*
* 18 is the highest 'known' ICMP type. Anything else is a mystery * 18 is the highest 'known' ICMP type. Anything else is a mystery
* *
* RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently discarded. * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
* discarded.
*/ */
if (icmph->type > NR_ICMP_TYPES) if (icmph->type > NR_ICMP_TYPES)
goto error; goto error;
/* /*
* Parse the ICMP message * Parse the ICMP message
*/ */
if (rt->rt_flags&(RTCF_BROADCAST|RTCF_MULTICAST)) { if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
/* /*
* RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be
* silently ignored (we let user decide with a sysctl). * silently ignored (we let user decide with a sysctl).
...@@ -933,7 +1003,9 @@ int icmp_rcv(struct sk_buff *skb) ...@@ -933,7 +1003,9 @@ int icmp_rcv(struct sk_buff *skb)
} }
} }
icmp_pointers[icmph->type].input[smp_processor_id()*2*sizeof(struct icmp_mib)/sizeof(unsigned long)]++; icmp_pointers[icmph->type].input[smp_processor_id() * 2 *
sizeof(struct icmp_mib) /
sizeof(unsigned long)]++;
(icmp_pointers[icmph->type].handler)(skb); (icmp_pointers[icmph->type].handler)(skb);
drop: drop:
...@@ -947,40 +1019,127 @@ int icmp_rcv(struct sk_buff *skb) ...@@ -947,40 +1019,127 @@ int icmp_rcv(struct sk_buff *skb)
/* /*
* This table is the definition of how we handle ICMP. * This table is the definition of how we handle ICMP.
*/ */
static struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
static struct icmp_control icmp_pointers[NR_ICMP_TYPES+1] = { /* ECHO REPLY (0) */
/* ECHO REPLY (0) */ [0] = {
{ &icmp_statistics[0].IcmpOutEchoReps, &icmp_statistics[0].IcmpInEchoReps, icmp_discard, 0 }, output: &icmp_statistics[0].IcmpOutEchoReps,
{ &icmp_statistics[0].dummy, &icmp_statistics[0].IcmpInErrors, icmp_discard, 1 }, input: &icmp_statistics[0].IcmpInEchoReps,
{ &icmp_statistics[0].dummy, &icmp_statistics[0].IcmpInErrors, icmp_discard, 1 }, handler: icmp_discard,
/* DEST UNREACH (3) */ },
{ &icmp_statistics[0].IcmpOutDestUnreachs, &icmp_statistics[0].IcmpInDestUnreachs, icmp_unreach, 1 }, [1] = {
/* SOURCE QUENCH (4) */ output: &icmp_statistics[0].dummy,
{ &icmp_statistics[0].IcmpOutSrcQuenchs, &icmp_statistics[0].IcmpInSrcQuenchs, icmp_unreach, 1 }, input: &icmp_statistics[0].IcmpInErrors,
/* REDIRECT (5) */ handler: icmp_discard,
{ &icmp_statistics[0].IcmpOutRedirects, &icmp_statistics[0].IcmpInRedirects, icmp_redirect, 1 }, error: 1,
{ &icmp_statistics[0].dummy, &icmp_statistics[0].IcmpInErrors, icmp_discard, 1 }, },
{ &icmp_statistics[0].dummy, &icmp_statistics[0].IcmpInErrors, icmp_discard, 1 }, [2] = {
/* ECHO (8) */ output: &icmp_statistics[0].dummy,
{ &icmp_statistics[0].IcmpOutEchos, &icmp_statistics[0].IcmpInEchos, icmp_echo, 0 }, input: &icmp_statistics[0].IcmpInErrors,
{ &icmp_statistics[0].dummy, &icmp_statistics[0].IcmpInErrors, icmp_discard, 1 }, handler: icmp_discard,
{ &icmp_statistics[0].dummy, &icmp_statistics[0].IcmpInErrors, icmp_discard, 1 }, error: 1,
/* TIME EXCEEDED (11) */ },
{ &icmp_statistics[0].IcmpOutTimeExcds, &icmp_statistics[0].IcmpInTimeExcds, icmp_unreach, 1 }, /* DEST UNREACH (3) */
/* PARAMETER PROBLEM (12) */ [3] = {
{ &icmp_statistics[0].IcmpOutParmProbs, &icmp_statistics[0].IcmpInParmProbs, icmp_unreach, 1 }, output: &icmp_statistics[0].IcmpOutDestUnreachs,
/* TIMESTAMP (13) */ input: &icmp_statistics[0].IcmpInDestUnreachs,
{ &icmp_statistics[0].IcmpOutTimestamps, &icmp_statistics[0].IcmpInTimestamps, icmp_timestamp, 0 }, handler: icmp_unreach,
/* TIMESTAMP REPLY (14) */ error: 1,
{ &icmp_statistics[0].IcmpOutTimestampReps, &icmp_statistics[0].IcmpInTimestampReps, icmp_discard, 0 }, },
/* INFO (15) */ /* SOURCE QUENCH (4) */
{ &icmp_statistics[0].dummy, &icmp_statistics[0].dummy, icmp_discard, 0 }, [4] = {
/* INFO REPLY (16) */ output: &icmp_statistics[0].IcmpOutSrcQuenchs,
{ &icmp_statistics[0].dummy, &icmp_statistics[0].dummy, icmp_discard, 0 }, input: &icmp_statistics[0].IcmpInSrcQuenchs,
/* ADDR MASK (17) */ icmp_unreach,
{ &icmp_statistics[0].IcmpOutAddrMasks, &icmp_statistics[0].IcmpInAddrMasks, icmp_address, 0 }, error: 1,
/* ADDR MASK REPLY (18) */ },
{ &icmp_statistics[0].IcmpOutAddrMaskReps, &icmp_statistics[0].IcmpInAddrMaskReps, icmp_address_reply, 0 } /* REDIRECT (5) */
[5] = {
output: &icmp_statistics[0].IcmpOutRedirects,
input: &icmp_statistics[0].IcmpInRedirects,
handler: icmp_redirect,
error: 1,
},
[6] = {
output: &icmp_statistics[0].dummy,
input: &icmp_statistics[0].IcmpInErrors,
handler: icmp_discard,
error: 1,
},
[7] = {
output: &icmp_statistics[0].dummy,
input: &icmp_statistics[0].IcmpInErrors,
handler: icmp_discard,
error: 1,
},
/* ECHO (8) */
[8] = {
output: &icmp_statistics[0].IcmpOutEchos,
input: &icmp_statistics[0].IcmpInEchos,
handler: icmp_echo,
error: 0,
},
[9] = {
output: &icmp_statistics[0].dummy,
input: &icmp_statistics[0].IcmpInErrors,
handler: icmp_discard,
error: 1,
},
[10] = {
output: &icmp_statistics[0].dummy,
input: &icmp_statistics[0].IcmpInErrors,
handler: icmp_discard,
error: 1,
},
/* TIME EXCEEDED (11) */
[11] = {
output: &icmp_statistics[0].IcmpOutTimeExcds,
input: &icmp_statistics[0].IcmpInTimeExcds,
handler: icmp_unreach,
error: 1,
},
/* PARAMETER PROBLEM (12) */
[12] = {
output: &icmp_statistics[0].IcmpOutParmProbs,
input: &icmp_statistics[0].IcmpInParmProbs,
handler: icmp_unreach,
error: 1,
},
/* TIMESTAMP (13) */
[13] = {
output: &icmp_statistics[0].IcmpOutTimestamps,
input: &icmp_statistics[0].IcmpInTimestamps,
handler: icmp_timestamp,
},
/* TIMESTAMP REPLY (14) */
[14] = {
output: &icmp_statistics[0].IcmpOutTimestampReps,
input: &icmp_statistics[0].IcmpInTimestampReps,
handler: icmp_discard,
},
/* INFO (15) */
[15] = {
output: &icmp_statistics[0].dummy,
input: &icmp_statistics[0].dummy,
handler: icmp_discard,
},
/* INFO REPLY (16) */
[16] = {
output: &icmp_statistics[0].dummy,
input: &icmp_statistics[0].dummy,
handler: icmp_discard,
},
/* ADDR MASK (17) */
[17] = {
output: &icmp_statistics[0].IcmpOutAddrMasks,
input: &icmp_statistics[0].IcmpInAddrMasks,
handler: icmp_address,
},
/* ADDR MASK REPLY (18) */
[18] = {
output: &icmp_statistics[0].IcmpOutAddrMaskReps,
input: &icmp_statistics[0].IcmpInAddrMaskReps,
handler: icmp_address_reply,
}
}; };
void __init icmp_init(struct net_proto_family *ops) void __init icmp_init(struct net_proto_family *ops)
...@@ -990,8 +1149,8 @@ void __init icmp_init(struct net_proto_family *ops) ...@@ -990,8 +1149,8 @@ void __init icmp_init(struct net_proto_family *ops)
if (err < 0) if (err < 0)
panic("Failed to create the ICMP control socket.\n"); panic("Failed to create the ICMP control socket.\n");
icmp_socket->sk->allocation=GFP_ATOMIC; icmp_socket->sk->allocation = GFP_ATOMIC;
icmp_socket->sk->sndbuf = SK_WMEM_MAX*2; icmp_socket->sk->sndbuf = SK_WMEM_MAX * 2;
inet = inet_sk(icmp_socket->sk); inet = inet_sk(icmp_socket->sk);
inet->ttl = MAXTTL; inet->ttl = MAXTTL;
inet->pmtudisc = IP_PMTUDISC_DONT; inet->pmtudisc = IP_PMTUDISC_DONT;
......
...@@ -32,7 +32,8 @@ ...@@ -32,7 +32,8 @@
* and the rest go in the other half. * and the rest go in the other half.
* Andi Kleen : Add support for syncookies and fixed * Andi Kleen : Add support for syncookies and fixed
* some bugs: ip options weren't passed to * some bugs: ip options weren't passed to
* the TCP layer, missed a check for an ACK bit. * the TCP layer, missed a check for an
* ACK bit.
* Andi Kleen : Implemented fast path mtu discovery. * Andi Kleen : Implemented fast path mtu discovery.
* Fixed many serious bugs in the * Fixed many serious bugs in the
* open_request handling and moved * open_request handling and moved
...@@ -42,7 +43,8 @@ ...@@ -42,7 +43,8 @@
* Mike McLagan : Routing by source * Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits * Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes. * Andi Kleen: various fixes.
* Vitaly E. Lavrov : Transparent proxy revived after year coma. * Vitaly E. Lavrov : Transparent proxy revived after year
* coma.
* Andi Kleen : Fix new listen. * Andi Kleen : Fix new listen.
* Andi Kleen : Fix accept error reporting. * Andi Kleen : Fix accept error reporting.
*/ */
...@@ -65,26 +67,18 @@ ...@@ -65,26 +67,18 @@
extern int sysctl_ip_dynaddr; extern int sysctl_ip_dynaddr;
extern int sysctl_ip_default_ttl; extern int sysctl_ip_default_ttl;
int sysctl_tcp_tw_reuse = 0; int sysctl_tcp_tw_reuse;
/* Check TCP sequence numbers in ICMP packets. */ /* Check TCP sequence numbers in ICMP packets. */
#define ICMP_MIN_LENGTH 8 #define ICMP_MIN_LENGTH 8
/* Socket used for sending RSTs */ /* Socket used for sending RSTs */
static struct socket *tcp_socket; static struct socket *tcp_socket;
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb); struct sk_buff *skb);
/*
* ALL members must be initialised to prevent gcc-2.7.2.3 miscompilation
*/
struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
__tcp_ehash: NULL,
__tcp_bhash: NULL,
__tcp_bhash_size: 0,
__tcp_ehash_size: 0,
__tcp_listening_hash: { NULL, },
__tcp_lhash_lock: RW_LOCK_UNLOCKED, __tcp_lhash_lock: RW_LOCK_UNLOCKED,
__tcp_lhash_users: ATOMIC_INIT(0), __tcp_lhash_users: ATOMIC_INIT(0),
__tcp_lhash_wait: __tcp_lhash_wait:
...@@ -98,14 +92,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = { ...@@ -98,14 +92,14 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
* 32768-61000 * 32768-61000
*/ */
int sysctl_local_port_range[2] = { 1024, 4999 }; int sysctl_local_port_range[2] = { 1024, 4999 };
int tcp_port_rover = (1024 - 1); int tcp_port_rover = 1024 - 1;
static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport, static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
__u32 faddr, __u16 fport) __u32 faddr, __u16 fport)
{ {
int h = ((laddr ^ lport) ^ (faddr ^ fport)); int h = (laddr ^ lport) ^ (faddr ^ fport);
h ^= h>>16; h ^= h >> 16;
h ^= h>>8; h ^= h >> 8;
return h & (tcp_ehash_size - 1); return h & (tcp_ehash_size - 1);
} }
...@@ -126,14 +120,13 @@ static __inline__ int tcp_sk_hashfn(struct sock *sk) ...@@ -126,14 +120,13 @@ static __inline__ int tcp_sk_hashfn(struct sock *sk)
struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
unsigned short snum) unsigned short snum)
{ {
struct tcp_bind_bucket *tb; struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
SLAB_ATOMIC);
tb = kmem_cache_alloc(tcp_bucket_cachep, SLAB_ATOMIC); if (tb) {
if(tb != NULL) {
tb->port = snum; tb->port = snum;
tb->fastreuse = 0; tb->fastreuse = 0;
tb->owners = NULL; tb->owners = NULL;
if((tb->next = head->chain) != NULL) if ((tb->next = head->chain) != NULL)
tb->next->pprev = &tb->next; tb->next->pprev = &tb->next;
head->chain = tb; head->chain = tb;
tb->pprev = &head->chain; tb->pprev = &head->chain;
...@@ -152,9 +145,9 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child) ...@@ -152,9 +145,9 @@ static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
tb = (struct tcp_bind_bucket *)sk->prev; tb = (struct tcp_bind_bucket *)sk->prev;
if ((child->bind_next = tb->owners) != NULL) if ((child->bind_next = tb->owners) != NULL)
tb->owners->bind_pprev = &child->bind_next; tb->owners->bind_pprev = &child->bind_next;
tb->owners = child; tb->owners = child;
child->bind_pprev = &tb->owners; child->bind_pprev = &tb->owners;
child->prev = (struct sock *) tb; child->prev = (struct sock *)tb;
spin_unlock(&head->lock); spin_unlock(&head->lock);
} }
...@@ -165,14 +158,15 @@ __inline__ void tcp_inherit_port(struct sock *sk, struct sock *child) ...@@ -165,14 +158,15 @@ __inline__ void tcp_inherit_port(struct sock *sk, struct sock *child)
local_bh_enable(); local_bh_enable();
} }
static inline void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, unsigned short snum) static inline void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
unsigned short snum)
{ {
inet_sk(sk)->num = snum; inet_sk(sk)->num = snum;
if ((sk->bind_next = tb->owners) != NULL) if ((sk->bind_next = tb->owners) != NULL)
tb->owners->bind_pprev = &sk->bind_next; tb->owners->bind_pprev = &sk->bind_next;
tb->owners = sk; tb->owners = sk;
sk->bind_pprev = &tb->owners; sk->bind_pprev = &tb->owners;
sk->prev = (struct sock *) tb; sk->prev = (struct sock *)tb;
} }
static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb) static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
...@@ -180,17 +174,14 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb) ...@@ -180,17 +174,14 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
struct sock *sk2 = tb->owners; struct sock *sk2 = tb->owners;
int sk_reuse = sk->reuse; int sk_reuse = sk->reuse;
for( ; sk2 != NULL; sk2 = sk2->bind_next) { for ( ; sk2; sk2 = sk2->bind_next) {
if (sk != sk2 && if (sk != sk2 && sk->bound_dev_if == sk2->bound_dev_if) {
sk->bound_dev_if == sk2->bound_dev_if) { if (!sk_reuse || !sk2->reuse ||
if (!sk_reuse ||
!sk2->reuse ||
sk2->state == TCP_LISTEN) { sk2->state == TCP_LISTEN) {
struct inet_opt *inet2 = inet_sk(sk2); struct inet_opt *inet2 = inet_sk(sk2);
if (!inet2->rcv_saddr || if (!inet2->rcv_saddr || !inet->rcv_saddr ||
!inet->rcv_saddr || inet2->rcv_saddr == inet->rcv_saddr)
(inet2->rcv_saddr == inet->rcv_saddr))
break; break;
} }
} }
...@@ -208,7 +199,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -208,7 +199,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
int ret; int ret;
local_bh_disable(); local_bh_disable();
if (snum == 0) { if (!snum) {
int low = sysctl_local_port_range[0]; int low = sysctl_local_port_range[0];
int high = sysctl_local_port_range[1]; int high = sysctl_local_port_range[1];
int remaining = (high - low) + 1; int remaining = (high - low) + 1;
...@@ -216,8 +207,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -216,8 +207,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
spin_lock(&tcp_portalloc_lock); spin_lock(&tcp_portalloc_lock);
rover = tcp_port_rover; rover = tcp_port_rover;
do { rover++; do {
if ((rover < low) || (rover > high)) rover++;
if (rover < low || rover > high)
rover = low; rover = low;
head = &tcp_bhash[tcp_bhashfn(rover)]; head = &tcp_bhash[tcp_bhashfn(rover)];
spin_lock(&head->lock); spin_lock(&head->lock);
...@@ -244,14 +236,14 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -244,14 +236,14 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
} else { } else {
head = &tcp_bhash[tcp_bhashfn(snum)]; head = &tcp_bhash[tcp_bhashfn(snum)];
spin_lock(&head->lock); spin_lock(&head->lock);
for (tb = head->chain; tb != NULL; tb = tb->next) for (tb = head->chain; tb; tb = tb->next)
if (tb->port == snum) if (tb->port == snum)
break; break;
} }
if (tb != NULL && tb->owners != NULL) { if (tb && tb->owners) {
if (sk->reuse > 1) if (sk->reuse > 1)
goto success; goto success;
if (tb->fastreuse > 0 && sk->reuse != 0 && sk->state != TCP_LISTEN) { if (tb->fastreuse > 0 && sk->reuse && sk->state != TCP_LISTEN) {
goto success; goto success;
} else { } else {
ret = 1; ret = 1;
...@@ -260,21 +252,19 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) ...@@ -260,21 +252,19 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
} }
} }
ret = 1; ret = 1;
if (tb == NULL && if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
(tb = tcp_bucket_create(head, snum)) == NULL) goto fail_unlock;
goto fail_unlock; if (!tb->owners) {
if (tb->owners == NULL) {
if (sk->reuse && sk->state != TCP_LISTEN) if (sk->reuse && sk->state != TCP_LISTEN)
tb->fastreuse = 1; tb->fastreuse = 1;
else else
tb->fastreuse = 0; tb->fastreuse = 0;
} else if (tb->fastreuse && } else if (tb->fastreuse && (!sk->reuse || sk->state == TCP_LISTEN))
((sk->reuse == 0) || (sk->state == TCP_LISTEN)))
tb->fastreuse = 0; tb->fastreuse = 0;
success: success:
if (sk->prev == NULL) if (!sk->prev)
tcp_bind_hash(sk, tb, snum); tcp_bind_hash(sk, tb, snum);
BUG_TRAP(sk->prev == (struct sock *) tb); BUG_TRAP(sk->prev == (struct sock *)tb);
ret = 0; ret = 0;
fail_unlock: fail_unlock:
...@@ -298,9 +288,9 @@ __inline__ void __tcp_put_port(struct sock *sk) ...@@ -298,9 +288,9 @@ __inline__ void __tcp_put_port(struct sock *sk)
if (sk->bind_next) if (sk->bind_next)
sk->bind_next->bind_pprev = sk->bind_pprev; sk->bind_next->bind_pprev = sk->bind_pprev;
*(sk->bind_pprev) = sk->bind_next; *(sk->bind_pprev) = sk->bind_next;
sk->prev = NULL; sk->prev = NULL;
inet->num = 0; inet->num = 0;
if (tb->owners == NULL) { if (!tb->owners) {
if (tb->next) if (tb->next)
tb->next->pprev = tb->pprev; tb->next->pprev = tb->pprev;
*(tb->pprev) = tb->next; *(tb->pprev) = tb->next;
...@@ -333,7 +323,7 @@ void tcp_listen_wlock(void) ...@@ -333,7 +323,7 @@ void tcp_listen_wlock(void)
add_wait_queue_exclusive(&tcp_lhash_wait, &wait); add_wait_queue_exclusive(&tcp_lhash_wait, &wait);
for (;;) { for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&tcp_lhash_users) == 0) if (!atomic_read(&tcp_lhash_users))
break; break;
write_unlock_bh(&tcp_lhash_lock); write_unlock_bh(&tcp_lhash_lock);
schedule(); schedule();
...@@ -350,8 +340,8 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) ...@@ -350,8 +340,8 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
struct sock **skp; struct sock **skp;
rwlock_t *lock; rwlock_t *lock;
BUG_TRAP(sk->pprev==NULL); BUG_TRAP(!sk->pprev);
if(listen_possible && sk->state == TCP_LISTEN) { if (listen_possible && sk->state == TCP_LISTEN) {
skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)]; skp = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
lock = &tcp_lhash_lock; lock = &tcp_lhash_lock;
tcp_listen_wlock(); tcp_listen_wlock();
...@@ -360,7 +350,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible) ...@@ -360,7 +350,7 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
lock = &tcp_ehash[sk->hashent].lock; lock = &tcp_ehash[sk->hashent].lock;
write_lock(lock); write_lock(lock);
} }
if((sk->next = *skp) != NULL) if ((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next; (*skp)->pprev = &sk->next;
*skp = sk; *skp = sk;
sk->pprev = skp; sk->pprev = skp;
...@@ -396,8 +386,8 @@ void tcp_unhash(struct sock *sk) ...@@ -396,8 +386,8 @@ void tcp_unhash(struct sock *sk)
write_lock_bh(&head->lock); write_lock_bh(&head->lock);
} }
if(sk->pprev) { if (sk->pprev) {
if(sk->next) if (sk->next)
sk->next->pprev = sk->pprev; sk->next->pprev = sk->pprev;
*sk->pprev = sk->next; *sk->pprev = sk->next;
sk->pprev = NULL; sk->pprev = NULL;
...@@ -416,20 +406,21 @@ void tcp_unhash(struct sock *sk) ...@@ -416,20 +406,21 @@ void tcp_unhash(struct sock *sk)
* connection. So always assume those are both wildcarded * connection. So always assume those are both wildcarded
* during the search since they can never be otherwise. * during the search since they can never be otherwise.
*/ */
static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr, unsigned short hnum, int dif) static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr,
unsigned short hnum, int dif)
{ {
struct sock *result = NULL; struct sock *result = NULL;
int score, hiscore; int score, hiscore;
hiscore=0; hiscore=0;
for(; sk; sk = sk->next) { for (; sk; sk = sk->next) {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
if(inet->num == hnum) { if (inet->num == hnum) {
__u32 rcv_saddr = inet->rcv_saddr; __u32 rcv_saddr = inet->rcv_saddr;
score = 1; score = 1;
if(rcv_saddr) { if (rcv_saddr) {
if (rcv_saddr != daddr) if (rcv_saddr != daddr)
continue; continue;
score++; score++;
...@@ -451,7 +442,8 @@ static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr, unsigne ...@@ -451,7 +442,8 @@ static struct sock *__tcp_v4_lookup_listener(struct sock *sk, u32 daddr, unsigne
} }
/* Optimize the common listener case. */ /* Optimize the common listener case. */
__inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, int dif) __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum,
int dif)
{ {
struct sock *sk; struct sock *sk;
...@@ -460,8 +452,7 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i ...@@ -460,8 +452,7 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
if (sk) { if (sk) {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
if (inet->num == hnum && if (inet->num == hnum && !sk->next &&
sk->next == NULL &&
(!inet->rcv_saddr || inet->rcv_saddr == daddr) && (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
!sk->bound_dev_if) !sk->bound_dev_if)
goto sherry_cache; goto sherry_cache;
...@@ -482,53 +473,47 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i ...@@ -482,53 +473,47 @@ __inline__ struct sock *tcp_v4_lookup_listener(u32 daddr, unsigned short hnum, i
*/ */
static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport, static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
u32 daddr, u16 hnum, int dif) u32 daddr, u16 hnum,
int dif)
{ {
struct tcp_ehash_bucket *head; struct tcp_ehash_bucket *head;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr) TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(sport, hnum); __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
struct sock *sk; struct sock *sk;
int hash;
/* Optimize here for direct hit, only listening connections can /* Optimize here for direct hit, only listening connections can
* have wildcards anyways. * have wildcards anyways.
*/ */
hash = tcp_hashfn(daddr, hnum, saddr, sport); int hash = tcp_hashfn(daddr, hnum, saddr, sport);
head = &tcp_ehash[hash]; head = &tcp_ehash[hash];
read_lock(&head->lock); read_lock(&head->lock);
for(sk = head->chain; sk; sk = sk->next) { for (sk = head->chain; sk; sk = sk->next) {
if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */ goto hit; /* You sunk my battleship! */
} }
/* Must check for a TIME_WAIT'er before going to listener hash. */ /* Must check for a TIME_WAIT'er before going to listener hash. */
for(sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next) for (sk = (head + tcp_ehash_size)->chain; sk; sk = sk->next)
if(TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif)) if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit; goto hit;
out:
read_unlock(&head->lock); read_unlock(&head->lock);
return sk;
return NULL;
hit: hit:
sock_hold(sk); sock_hold(sk);
read_unlock(&head->lock); goto out;
return sk;
} }
static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport, static inline struct sock *__tcp_v4_lookup(u32 saddr, u16 sport,
u32 daddr, u16 hnum, int dif) u32 daddr, u16 hnum, int dif)
{ {
struct sock *sk; struct sock *sk = __tcp_v4_lookup_established(saddr, sport,
daddr, hnum, dif);
sk = __tcp_v4_lookup_established(saddr, sport, daddr, hnum, dif); return sk ? : tcp_v4_lookup_listener(daddr, hnum, dif);
if (sk)
return sk;
return tcp_v4_lookup_listener(daddr, hnum, dif);
} }
__inline__ struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif) __inline__ struct sock *tcp_v4_lookup(u32 saddr, u16 sport, u32 daddr,
u16 dport, int dif)
{ {
struct sock *sk; struct sock *sk;
...@@ -565,11 +550,11 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -565,11 +550,11 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
write_lock(&head->lock); write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */ /* Check TIME-WAIT sockets first. */
for(skp = &(head + tcp_ehash_size)->chain; (sk2=*skp) != NULL; for (skp = &(head + tcp_ehash_size)->chain; (sk2 = *skp) != NULL;
skp = &sk2->next) { skp = &sk2->next) {
tw = (struct tcp_tw_bucket*)sk2; tw = (struct tcp_tw_bucket *)sk2;
if(TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
/* With PAWS, it is safe from the viewpoint /* With PAWS, it is safe from the viewpoint
...@@ -588,7 +573,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -588,7 +573,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
*/ */
if (tw->ts_recent_stamp && if (tw->ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse && (!twp || (sysctl_tcp_tw_reuse &&
xtime.tv_sec - tw->ts_recent_stamp > 1))) { xtime.tv_sec -
tw->ts_recent_stamp > 1))) {
if ((tp->write_seq = if ((tp->write_seq =
tw->snd_nxt + 65535 + 2) == 0) tw->snd_nxt + 65535 + 2) == 0)
tp->write_seq = 1; tp->write_seq = 1;
...@@ -604,8 +590,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -604,8 +590,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
tw = NULL; tw = NULL;
/* And established part... */ /* And established part... */
for(skp = &head->chain; (sk2=*skp)!=NULL; skp = &sk2->next) { for (skp = &head->chain; (sk2 = *skp) != NULL; skp = &sk2->next) {
if(TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif)) if (TCP_IPV4_MATCH(sk2, acookie, saddr, daddr, ports, dif))
goto not_unique; goto not_unique;
} }
...@@ -614,7 +600,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, ...@@ -614,7 +600,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
* in hash table socket with a funny identity. */ * in hash table socket with a funny identity. */
inet->num = lport; inet->num = lport;
inet->sport = htons(lport); inet->sport = htons(lport);
BUG_TRAP(sk->pprev==NULL); BUG_TRAP(!sk->pprev);
if ((sk->next = *skp) != NULL) if ((sk->next = *skp) != NULL)
(*skp)->pprev = &sk->next; (*skp)->pprev = &sk->next;
...@@ -651,16 +637,17 @@ static int tcp_v4_hash_connect(struct sock *sk) ...@@ -651,16 +637,17 @@ static int tcp_v4_hash_connect(struct sock *sk)
unsigned short snum = inet_sk(sk)->num; unsigned short snum = inet_sk(sk)->num;
struct tcp_bind_hashbucket *head; struct tcp_bind_hashbucket *head;
struct tcp_bind_bucket *tb; struct tcp_bind_bucket *tb;
int ret;
if (snum == 0) {
if (!snum) {
int rover; int rover;
int low = sysctl_local_port_range[0]; int low = sysctl_local_port_range[0];
int high = sysctl_local_port_range[1]; int high = sysctl_local_port_range[1];
int remaining = (high - low) + 1; int remaining = (high - low) + 1;
struct tcp_tw_bucket *tw = NULL; struct tcp_tw_bucket *tw = NULL;
local_bh_disable(); local_bh_disable();
/* TODO. Actually it is not so bad idea to remove /* TODO. Actually it is not so bad idea to remove
* tcp_portalloc_lock before next submission to Linus. * tcp_portalloc_lock before next submission to Linus.
* As soon as we touch this place at all it is time to think. * As soon as we touch this place at all it is time to think.
...@@ -676,29 +663,31 @@ static int tcp_v4_hash_connect(struct sock *sk) ...@@ -676,29 +663,31 @@ static int tcp_v4_hash_connect(struct sock *sk)
*/ */
spin_lock(&tcp_portalloc_lock); spin_lock(&tcp_portalloc_lock);
rover = tcp_port_rover; rover = tcp_port_rover;
do { do {
rover++; rover++;
if ((rover < low) || (rover > high)) if ((rover < low) || (rover > high))
rover = low; rover = low;
head = &tcp_bhash[tcp_bhashfn(rover)]; head = &tcp_bhash[tcp_bhashfn(rover)];
spin_lock(&head->lock); spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks, /* Does not bother with rcv_saddr checks,
* because the established check is already * because the established check is already
* unique enough. * unique enough.
*/ */
for (tb = head->chain; tb; tb = tb->next) { for (tb = head->chain; tb; tb = tb->next) {
if (tb->port == rover) { if (tb->port == rover) {
BUG_TRAP(tb->owners != NULL); BUG_TRAP(tb->owners);
if (tb->fastreuse >= 0) if (tb->fastreuse >= 0)
goto next_port; goto next_port;
if (!__tcp_v4_check_established(sk, rover, &tw)) if (!__tcp_v4_check_established(sk,
rover,
&tw))
goto ok; goto ok;
goto next_port; goto next_port;
} }
} }
tb = tcp_bucket_create(head, rover); tb = tcp_bucket_create(head, rover);
if (!tb) { if (!tb) {
spin_unlock(&head->lock); spin_unlock(&head->lock);
...@@ -706,22 +695,22 @@ static int tcp_v4_hash_connect(struct sock *sk) ...@@ -706,22 +695,22 @@ static int tcp_v4_hash_connect(struct sock *sk)
} }
tb->fastreuse = -1; tb->fastreuse = -1;
goto ok; goto ok;
next_port: next_port:
spin_unlock(&head->lock); spin_unlock(&head->lock);
} while (--remaining > 0); } while (--remaining > 0);
tcp_port_rover = rover; tcp_port_rover = rover;
spin_unlock(&tcp_portalloc_lock); spin_unlock(&tcp_portalloc_lock);
local_bh_enable(); local_bh_enable();
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
ok: ok:
/* All locks still held and bhs disabled */ /* All locks still held and bhs disabled */
tcp_port_rover = rover; tcp_port_rover = rover;
spin_unlock(&tcp_portalloc_lock); spin_unlock(&tcp_portalloc_lock);
tcp_bind_hash(sk, tb, rover); tcp_bind_hash(sk, tb, rover);
if (!sk->pprev) { if (!sk->pprev) {
inet_sk(sk)->sport = htons(rover); inet_sk(sk)->sport = htons(rover);
...@@ -734,23 +723,23 @@ static int tcp_v4_hash_connect(struct sock *sk) ...@@ -734,23 +723,23 @@ static int tcp_v4_hash_connect(struct sock *sk)
tcp_timewait_kill(tw); tcp_timewait_kill(tw);
tcp_tw_put(tw); tcp_tw_put(tw);
} }
local_bh_enable(); ret = 0;
return 0; goto out;
} }
head = &tcp_bhash[tcp_bhashfn(snum)]; head = &tcp_bhash[tcp_bhashfn(snum)];
tb = (struct tcp_bind_bucket *)sk->prev; tb = (struct tcp_bind_bucket *)sk->prev;
spin_lock_bh(&head->lock); spin_lock_bh(&head->lock);
if (tb->owners == sk && sk->bind_next == NULL) { if (tb->owners == sk && !sk->bind_next) {
__tcp_v4_hash(sk, 0); __tcp_v4_hash(sk, 0);
spin_unlock_bh(&head->lock); spin_unlock_bh(&head->lock);
return 0; return 0;
} else { } else {
int ret;
spin_unlock(&head->lock); spin_unlock(&head->lock);
/* No definite answer... Walk to established hash table */ /* No definite answer... Walk to established hash table */
ret = __tcp_v4_check_established(sk, snum, NULL); ret = __tcp_v4_check_established(sk, snum, NULL);
out:
local_bh_enable(); local_bh_enable();
return ret; return ret;
} }
...@@ -761,21 +750,21 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -761,21 +750,21 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{ {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct rtable *rt; struct rtable *rt;
u32 daddr, nexthop; u32 daddr, nexthop;
int tmp; int tmp;
int err; int err;
if (addr_len < sizeof(struct sockaddr_in)) if (addr_len < sizeof(struct sockaddr_in))
return(-EINVAL); return -EINVAL;
if (usin->sin_family != AF_INET) if (usin->sin_family != AF_INET)
return(-EAFNOSUPPORT); return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr; nexthop = daddr = usin->sin_addr.s_addr;
if (inet->opt && inet->opt->srr) { if (inet->opt && inet->opt->srr) {
if (daddr == 0) if (!daddr)
return -EINVAL; return -EINVAL;
nexthop = inet->opt->faddr; nexthop = inet->opt->faddr;
} }
...@@ -785,7 +774,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -785,7 +774,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (tmp < 0) if (tmp < 0)
return tmp; return tmp;
if (rt->rt_flags&(RTCF_MULTICAST|RTCF_BROADCAST)) { if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt); ip_rt_put(rt);
return -ENETUNREACH; return -ENETUNREACH;
} }
...@@ -808,8 +797,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -808,8 +797,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
} }
if (sysctl_tcp_tw_recycle && if (sysctl_tcp_tw_recycle &&
!tp->ts_recent_stamp && !tp->ts_recent_stamp && rt->rt_dst == daddr) {
rt->rt_dst == daddr) {
struct inet_peer *peer = rt_get_peer(rt); struct inet_peer *peer = rt_get_peer(rt);
/* VJ's idea. We save last timestamp seen from /* VJ's idea. We save last timestamp seen from
...@@ -866,24 +854,24 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ...@@ -866,24 +854,24 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
static __inline__ int tcp_v4_iif(struct sk_buff *skb) static __inline__ int tcp_v4_iif(struct sk_buff *skb)
{ {
return ((struct rtable*)skb->dst)->rt_iif; return ((struct rtable *)skb->dst)->rt_iif;
} }
static __inline__ unsigned tcp_v4_synq_hash(u32 raddr, u16 rport) static __inline__ unsigned tcp_v4_synq_hash(u32 raddr, u16 rport)
{ {
unsigned h = raddr ^ rport; unsigned h = raddr ^ rport;
h ^= h>>16; h ^= h >> 16;
h ^= h>>8; h ^= h >> 8;
return h&(TCP_SYNQ_HSIZE-1); return h & (TCP_SYNQ_HSIZE - 1);
} }
static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
struct open_request ***prevp, struct open_request ***prevp,
__u16 rport, __u16 rport,
__u32 raddr, __u32 laddr) __u32 raddr, __u32 laddr)
{ {
struct tcp_listen_opt *lopt = tp->listen_opt; struct tcp_listen_opt *lopt = tp->listen_opt;
struct open_request *req, **prev; struct open_request *req, **prev;
for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport)]; for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport)];
(req = *prev) != NULL; (req = *prev) != NULL;
...@@ -892,13 +880,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, ...@@ -892,13 +880,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp,
req->af.v4_req.rmt_addr == raddr && req->af.v4_req.rmt_addr == raddr &&
req->af.v4_req.loc_addr == laddr && req->af.v4_req.loc_addr == laddr &&
TCP_INET_FAMILY(req->class->family)) { TCP_INET_FAMILY(req->class->family)) {
BUG_TRAP(req->sk == NULL); BUG_TRAP(!req->sk);
*prevp = prev; *prevp = prev;
return req; break;
} }
} }
return NULL; return req;
} }
static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
...@@ -920,7 +908,7 @@ static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) ...@@ -920,7 +908,7 @@ static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
} }
/* /*
* This routine does path mtu discovery as defined in RFC1191. * This routine does path mtu discovery as defined in RFC1191.
*/ */
static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
...@@ -935,7 +923,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, ...@@ -935,7 +923,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
* unfragmented). * unfragmented).
*/ */
if (sk->state == TCP_LISTEN) if (sk->state == TCP_LISTEN)
return; return;
/* We don't check in the destentry if pmtu discovery is forbidden /* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets * on this route. We just assume that no packet_to_big packets
...@@ -958,7 +946,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, ...@@ -958,7 +946,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
tp->pmtu_cookie > dst->pmtu) { tp->pmtu_cookie > dst->pmtu) {
tcp_sync_mss(sk, dst->pmtu); tcp_sync_mss(sk, dst->pmtu);
/* Resend the TCP packet because it's /* Resend the TCP packet because it's
* clear that the old packet has been * clear that the old packet has been
* dropped. This is the new "fast" path mtu * dropped. This is the new "fast" path mtu
* discovery. * discovery.
...@@ -985,8 +973,8 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, ...@@ -985,8 +973,8 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph,
void tcp_v4_err(struct sk_buff *skb, u32 info) void tcp_v4_err(struct sk_buff *skb, u32 info)
{ {
struct iphdr *iph = (struct iphdr*)skb->data; struct iphdr *iph = (struct iphdr *)skb->data;
struct tcphdr *th = (struct tcphdr*)(skb->data+(iph->ihl<<2)); struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
struct tcp_opt *tp; struct tcp_opt *tp;
struct inet_opt *inet; struct inet_opt *inet;
int type = skb->h.icmph->type; int type = skb->h.icmph->type;
...@@ -996,17 +984,18 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -996,17 +984,18 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
int err; int err;
if (skb->len < (iph->ihl << 2) + 8) { if (skb->len < (iph->ihl << 2) + 8) {
ICMP_INC_STATS_BH(IcmpInErrors); ICMP_INC_STATS_BH(IcmpInErrors);
return; return;
} }
sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr, th->source, tcp_v4_iif(skb)); sk = tcp_v4_lookup(iph->daddr, th->dest, iph->saddr,
if (sk == NULL) { th->source, tcp_v4_iif(skb));
if (!sk) {
ICMP_INC_STATS_BH(IcmpInErrors); ICMP_INC_STATS_BH(IcmpInErrors);
return; return;
} }
if (sk->state == TCP_TIME_WAIT) { if (sk->state == TCP_TIME_WAIT) {
tcp_tw_put((struct tcp_tw_bucket*)sk); tcp_tw_put((struct tcp_tw_bucket *)sk);
return; return;
} }
...@@ -1014,7 +1003,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -1014,7 +1003,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* If too many ICMPs get dropped on busy /* If too many ICMPs get dropped on busy
* servers this needs to be solved differently. * servers this needs to be solved differently.
*/ */
if (sk->lock.users != 0) if (sk->lock.users)
NET_INC_STATS_BH(LockDroppedIcmps); NET_INC_STATS_BH(LockDroppedIcmps);
if (sk->state == TCP_CLOSE) if (sk->state == TCP_CLOSE)
...@@ -1033,18 +1022,18 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -1033,18 +1022,18 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
/* This is deprecated, but if someone generated it, /* This is deprecated, but if someone generated it,
* we have no reasons to ignore it. * we have no reasons to ignore it.
*/ */
if (sk->lock.users == 0) if (!sk->lock.users)
tcp_enter_cwr(tp); tcp_enter_cwr(tp);
goto out; goto out;
case ICMP_PARAMETERPROB: case ICMP_PARAMETERPROB:
err = EPROTO; err = EPROTO;
break; break;
case ICMP_DEST_UNREACH: case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH) if (code > NR_ICMP_UNREACH)
goto out; goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (sk->lock.users == 0) if (!sk->lock.users)
do_pmtu_discovery(sk, iph, info); do_pmtu_discovery(sk, iph, info);
goto out; goto out;
} }
...@@ -1061,39 +1050,38 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -1061,39 +1050,38 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
switch (sk->state) { switch (sk->state) {
struct open_request *req, **prev; struct open_request *req, **prev;
case TCP_LISTEN: case TCP_LISTEN:
if (sk->lock.users != 0) if (sk->lock.users)
goto out; goto out;
req = tcp_v4_search_req(tp, &prev, req = tcp_v4_search_req(tp, &prev, th->dest,
th->dest, iph->daddr, iph->saddr);
iph->daddr, iph->saddr);
if (!req) if (!req)
goto out; goto out;
/* ICMPs are not backlogged, hence we cannot get /* ICMPs are not backlogged, hence we cannot get
an established socket here. an established socket here.
*/ */
BUG_TRAP(req->sk == NULL); BUG_TRAP(!req->sk);
if (seq != req->snt_isn) { if (seq != req->snt_isn) {
NET_INC_STATS_BH(OutOfWindowIcmps); NET_INC_STATS_BH(OutOfWindowIcmps);
goto out; goto out;
} }
/* /*
* Still in SYN_RECV, just remove it silently. * Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly * There is no good way to pass the error to the newly
* created socket, and POSIX does not want network * created socket, and POSIX does not want network
* errors returned from accept(). * errors returned from accept().
*/ */
tcp_synq_drop(sk, req, prev); tcp_synq_drop(sk, req, prev);
goto out; goto out;
case TCP_SYN_SENT: case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen. case TCP_SYN_RECV: /* Cannot happen.
It can f.e. if SYNs crossed. It can f.e. if SYNs crossed.
*/ */
if (sk->lock.users == 0) { if (!sk->lock.users) {
TCP_INC_STATS_BH(TcpAttemptFails); TCP_INC_STATS_BH(TcpAttemptFails);
sk->err = err; sk->err = err;
...@@ -1123,7 +1111,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -1123,7 +1111,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
*/ */
inet = inet_sk(sk); inet = inet_sk(sk);
if (sk->lock.users == 0 && inet->recverr) { if (!sk->lock.users && inet->recverr) {
sk->err = err; sk->err = err;
sk->error_report(sk); sk->error_report(sk);
} else { /* Only an error on timeout */ } else { /* Only an error on timeout */
...@@ -1136,7 +1124,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) ...@@ -1136,7 +1124,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
} }
/* This routine computes an IPv4 TCP checksum. */ /* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
...@@ -1146,7 +1134,9 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len, ...@@ -1146,7 +1134,9 @@ void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
skb->csum = offsetof(struct tcphdr, check); skb->csum = offsetof(struct tcphdr, check);
} else { } else {
th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr, th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
csum_partial((char *)th, th->doff<<2, skb->csum)); csum_partial((char *)th,
th->doff << 2,
skb->csum));
} }
} }
...@@ -1173,34 +1163,32 @@ static void tcp_v4_send_reset(struct sk_buff *skb) ...@@ -1173,34 +1163,32 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
if (th->rst) if (th->rst)
return; return;
if (((struct rtable*)skb->dst)->rt_type != RTN_LOCAL) if (((struct rtable *)skb->dst)->rt_type != RTN_LOCAL)
return; return;
/* Swap the send and the receive. */ /* Swap the send and the receive. */
memset(&rth, 0, sizeof(struct tcphdr)); memset(&rth, 0, sizeof(struct tcphdr));
rth.dest = th->source; rth.dest = th->source;
rth.source = th->dest; rth.source = th->dest;
rth.doff = sizeof(struct tcphdr)/4; rth.doff = sizeof(struct tcphdr) / 4;
rth.rst = 1; rth.rst = 1;
if (th->ack) { if (th->ack) {
rth.seq = th->ack_seq; rth.seq = th->ack_seq;
} else { } else {
rth.ack = 1; rth.ack = 1;
rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
+ skb->len - (th->doff<<2)); skb->len - (th->doff << 2));
} }
memset(&arg, 0, sizeof arg); memset(&arg, 0, sizeof arg);
arg.iov[0].iov_base = (unsigned char *)&rth; arg.iov[0].iov_base = (unsigned char *)&rth;
arg.iov[0].iov_len = sizeof rth; arg.iov[0].iov_len = sizeof rth;
arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
skb->nh.iph->saddr, /*XXX*/ skb->nh.iph->saddr, /*XXX*/
sizeof(struct tcphdr), sizeof(struct tcphdr), IPPROTO_TCP, 0);
IPPROTO_TCP,
0);
arg.n_iov = 1; arg.n_iov = 1;
arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.csumoffset = offsetof(struct tcphdr, check) / 2;
inet_sk(tcp_socket->sk)->ttl = sysctl_ip_default_ttl; inet_sk(tcp_socket->sk)->ttl = sysctl_ip_default_ttl;
ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth); ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
...@@ -1213,7 +1201,8 @@ static void tcp_v4_send_reset(struct sk_buff *skb) ...@@ -1213,7 +1201,8 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
outside socket context is ugly, certainly. What can I do? outside socket context is ugly, certainly. What can I do?
*/ */
static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts) static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 ts)
{ {
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
struct { struct {
...@@ -1225,34 +1214,31 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ...@@ -1225,34 +1214,31 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32
memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof arg); memset(&arg, 0, sizeof arg);
arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th); arg.iov[0].iov_len = sizeof(rep.th);
arg.n_iov = 1; arg.n_iov = 1;
if (ts) { if (ts) {
rep.tsopt[0] = __constant_htonl((TCPOPT_NOP << 24) | rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) |
(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
TCPOLEN_TIMESTAMP);
rep.tsopt[1] = htonl(tcp_time_stamp); rep.tsopt[1] = htonl(tcp_time_stamp);
rep.tsopt[2] = htonl(ts); rep.tsopt[2] = htonl(ts);
arg.iov[0].iov_len = sizeof(rep); arg.iov[0].iov_len = sizeof(rep);
} }
/* Swap the send and the receive. */ /* Swap the send and the receive. */
rep.th.dest = th->source; rep.th.dest = th->source;
rep.th.source = th->dest; rep.th.source = th->dest;
rep.th.doff = arg.iov[0].iov_len/4; rep.th.doff = arg.iov[0].iov_len / 4;
rep.th.seq = htonl(seq); rep.th.seq = htonl(seq);
rep.th.ack_seq = htonl(ack); rep.th.ack_seq = htonl(ack);
rep.th.ack = 1; rep.th.ack = 1;
rep.th.window = htons(win); rep.th.window = htons(win);
arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr, arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
skb->nh.iph->saddr, /*XXX*/ skb->nh.iph->saddr, /*XXX*/
arg.iov[0].iov_len, arg.iov[0].iov_len, IPPROTO_TCP, 0);
IPPROTO_TCP, arg.csumoffset = offsetof(struct tcphdr, check) / 2;
0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len); ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
...@@ -1264,26 +1250,25 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) ...@@ -1264,26 +1250,25 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk; struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
tcp_v4_send_ack(skb, tw->snd_nxt, tw->rcv_nxt, tcp_v4_send_ack(skb, tw->snd_nxt, tw->rcv_nxt,
tw->rcv_wnd>>tw->rcv_wscale, tw->ts_recent); tw->rcv_wnd >> tw->rcv_wscale, tw->ts_recent);
tcp_tw_put(tw); tcp_tw_put(tw);
} }
static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req) static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
{ {
tcp_v4_send_ack(skb, req->snt_isn+1, req->rcv_isn+1, req->rcv_wnd, tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
req->ts_recent); req->ts_recent);
} }
static struct dst_entry* tcp_v4_route_req(struct sock *sk, struct open_request *req) static struct dst_entry* tcp_v4_route_req(struct sock *sk,
struct open_request *req)
{ {
struct rtable *rt; struct rtable *rt;
struct ip_options *opt; struct ip_options *opt = req->af.v4_req.opt;
opt = req->af.v4_req.opt; if (ip_route_output(&rt, ((opt && opt->srr) ? opt->faddr :
if(ip_route_output(&rt, ((opt && opt->srr) ? req->af.v4_req.rmt_addr),
opt->faddr :
req->af.v4_req.rmt_addr),
req->af.v4_req.loc_addr, req->af.v4_req.loc_addr,
RT_CONN_FLAGS(sk), sk->bound_dev_if)) { RT_CONN_FLAGS(sk), sk->bound_dev_if)) {
IP_INC_STATS_BH(IpOutNoRoutes); IP_INC_STATS_BH(IpOutNoRoutes);
...@@ -1298,10 +1283,10 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, struct open_request * ...@@ -1298,10 +1283,10 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk, struct open_request *
} }
/* /*
* Send a SYN-ACK after having received an ACK. * Send a SYN-ACK after having received an ACK.
* This still operates on a open_request only, not on a big * This still operates on a open_request only, not on a big
* socket. * socket.
*/ */
static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
struct dst_entry *dst) struct dst_entry *dst)
{ {
...@@ -1309,8 +1294,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, ...@@ -1309,8 +1294,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
struct sk_buff * skb; struct sk_buff * skb;
/* First, grab a route. */ /* First, grab a route. */
if (dst == NULL && if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
(dst = tcp_v4_route_req(sk, req)) == NULL)
goto out; goto out;
skb = tcp_make_synack(sk, dst, req); skb = tcp_make_synack(sk, dst, req);
...@@ -1319,11 +1303,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, ...@@ -1319,11 +1303,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
th->check = tcp_v4_check(th, skb->len, th->check = tcp_v4_check(th, skb->len,
req->af.v4_req.loc_addr, req->af.v4_req.rmt_addr, req->af.v4_req.loc_addr,
csum_partial((char *)th, skb->len, skb->csum)); req->af.v4_req.rmt_addr,
csum_partial((char *)th, skb->len,
skb->csum));
err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr, err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
req->af.v4_req.rmt_addr, req->af.v4_req.opt); req->af.v4_req.rmt_addr,
req->af.v4_req.opt);
if (err == NET_XMIT_CN) if (err == NET_XMIT_CN)
err = 0; err = 0;
} }
...@@ -1335,7 +1322,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req, ...@@ -1335,7 +1322,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
/* /*
* IPv4 open_request destructor. * IPv4 open_request destructor.
*/ */
static void tcp_v4_or_free(struct open_request *req) static void tcp_v4_or_free(struct open_request *req)
{ {
if (req->af.v4_req.opt) if (req->af.v4_req.opt)
...@@ -1345,26 +1332,26 @@ static void tcp_v4_or_free(struct open_request *req) ...@@ -1345,26 +1332,26 @@ static void tcp_v4_or_free(struct open_request *req)
static inline void syn_flood_warning(struct sk_buff *skb) static inline void syn_flood_warning(struct sk_buff *skb)
{ {
static unsigned long warntime; static unsigned long warntime;
if (jiffies - warntime > HZ*60) { if (jiffies - warntime > HZ * 60) {
warntime = jiffies; warntime = jiffies;
printk(KERN_INFO printk(KERN_INFO
"possible SYN flooding on port %d. Sending cookies.\n", "possible SYN flooding on port %d. Sending cookies.\n",
ntohs(skb->h.th->dest)); ntohs(skb->h.th->dest));
} }
} }
/* /*
* Save and compile IPv4 options into the open_request if needed. * Save and compile IPv4 options into the open_request if needed.
*/ */
static inline struct ip_options * static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *opt = &(IPCB(skb)->opt);
struct ip_options *dopt = NULL; struct ip_options *dopt = NULL;
if (opt && opt->optlen) { if (opt && opt->optlen) {
int opt_size = optlength(opt); int opt_size = optlength(opt);
dopt = kmalloc(opt_size, GFP_ATOMIC); dopt = kmalloc(opt_size, GFP_ATOMIC);
if (dopt) { if (dopt) {
if (ip_options_echo(dopt, skb)) { if (ip_options_echo(dopt, skb)) {
...@@ -1376,7 +1363,7 @@ tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) ...@@ -1376,7 +1363,7 @@ tcp_v4_save_options(struct sock *sk, struct sk_buff *skb)
return dopt; return dopt;
} }
/* /*
* Maximum number of SYN_RECV sockets in queue per LISTEN socket. * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
* One SYN_RECV socket costs about 80bytes on a 32bit machine. * One SYN_RECV socket costs about 80bytes on a 32bit machine.
* It would be better to replace it with a global counter for all sockets * It would be better to replace it with a global counter for all sockets
...@@ -1389,14 +1376,14 @@ tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) ...@@ -1389,14 +1376,14 @@ tcp_v4_save_options(struct sock *sk, struct sk_buff *skb)
* (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
* Further increasing requires to change hash table size. * Further increasing requires to change hash table size.
*/ */
int sysctl_max_syn_backlog = 256; int sysctl_max_syn_backlog = 256;
struct or_calltable or_ipv4 = { struct or_calltable or_ipv4 = {
PF_INET, family: PF_INET,
tcp_v4_send_synack, rtx_syn_ack: tcp_v4_send_synack,
tcp_v4_or_send_ack, send_ack: tcp_v4_or_send_ack,
tcp_v4_or_free, destructor: tcp_v4_or_free,
tcp_v4_send_reset send_reset: tcp_v4_send_reset,
}; };
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
...@@ -1414,9 +1401,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1414,9 +1401,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
#endif #endif
/* Never answer to SYNs send to broadcast or multicast */ /* Never answer to SYNs send to broadcast or multicast */
if (((struct rtable *)skb->dst)->rt_flags & if (((struct rtable *)skb->dst)->rt_flags &
(RTCF_BROADCAST|RTCF_MULTICAST)) (RTCF_BROADCAST | RTCF_MULTICAST))
goto drop; goto drop;
/* TW buckets are converted to open requests without /* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is * limitations, they conserve resources and peer is
...@@ -1425,7 +1412,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1425,7 +1412,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (tcp_synq_is_full(sk) && !isn) { if (tcp_synq_is_full(sk) && !isn) {
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (sysctl_tcp_syncookies) { if (sysctl_tcp_syncookies) {
want_cookie = 1; want_cookie = 1;
} else } else
#endif #endif
goto drop; goto drop;
...@@ -1440,12 +1427,12 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1440,12 +1427,12 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop; goto drop;
req = tcp_openreq_alloc(); req = tcp_openreq_alloc();
if (req == NULL) if (!req)
goto drop; goto drop;
tcp_clear_options(&tp); tcp_clear_options(&tp);
tp.mss_clamp = 536; tp.mss_clamp = 536;
tp.user_mss = tcp_sk(sk)->user_mss; tp.user_mss = tcp_sk(sk)->user_mss;
tcp_parse_options(skb, &tp, 0); tcp_parse_options(skb, &tp, 0);
...@@ -1454,14 +1441,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1454,14 +1441,14 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tp.saw_tstamp = 0; tp.saw_tstamp = 0;
} }
if (tp.saw_tstamp && tp.rcv_tsval == 0) { if (tp.saw_tstamp && !tp.rcv_tsval) {
/* Some OSes (unknown ones, but I see them on web server, which /* Some OSes (unknown ones, but I see them on web server, which
* contains information interesting only for windows' * contains information interesting only for windows'
* users) do not send their stamp in SYN. It is easy case. * users) do not send their stamp in SYN. It is easy case.
* We simply do not advertise TS support. * We simply do not advertise TS support.
*/ */
tp.saw_tstamp = 0; tp.saw_tstamp = 0;
tp.tstamp_ok = 0; tp.tstamp_ok = 0;
} }
tp.tstamp_ok = tp.saw_tstamp; tp.tstamp_ok = tp.saw_tstamp;
...@@ -1479,7 +1466,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1479,7 +1466,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
syn_flood_warning(skb); syn_flood_warning(skb);
#endif #endif
isn = cookie_v4_init_sequence(sk, skb, &req->mss); isn = cookie_v4_init_sequence(sk, skb, &req->mss);
} else if (isn == 0) { } else if (!isn) {
struct inet_peer *peer = NULL; struct inet_peer *peer = NULL;
/* VJ's idea. We save last timestamp seen /* VJ's idea. We save last timestamp seen
...@@ -1494,10 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1494,10 +1481,11 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (tp.saw_tstamp && if (tp.saw_tstamp &&
sysctl_tcp_tw_recycle && sysctl_tcp_tw_recycle &&
(dst = tcp_v4_route_req(sk, req)) != NULL && (dst = tcp_v4_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable*)dst)) != NULL && (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
peer->v4daddr == saddr) { peer->v4daddr == saddr) {
if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) > TCP_PAWS_WINDOW) { (s32)(peer->tcp_ts - req->ts_recent) >
TCP_PAWS_WINDOW) {
NET_INC_STATS_BH(PAWSPassiveRejected); NET_INC_STATS_BH(PAWSPassiveRejected);
dst_release(dst); dst_release(dst);
goto drop_and_free; goto drop_and_free;
...@@ -1505,19 +1493,23 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1505,19 +1493,23 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
} }
/* Kill the following clause, if you dislike this way. */ /* Kill the following clause, if you dislike this way. */
else if (!sysctl_tcp_syncookies && else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - tcp_synq_len(sk) (sysctl_max_syn_backlog - tcp_synq_len(sk) <
< (sysctl_max_syn_backlog>>2)) && (sysctl_max_syn_backlog >> 2)) &&
(!peer || !peer->tcp_ts_stamp) && (!peer || !peer->tcp_ts_stamp) &&
(!dst || !dst->rtt)) { (!dst || !dst->rtt)) {
/* Without syncookies last quarter of /* Without syncookies last quarter of
* backlog is filled with destinations, proven to be alive. * backlog is filled with destinations,
* proven to be alive.
* It means that we continue to communicate * It means that we continue to communicate
* to destinations, already remembered * to destinations, already remembered
* to the moment of synflood. * to the moment of synflood.
*/ */
NETDEBUG(if (net_ratelimit()) \ NETDEBUG(if (net_ratelimit()) \
printk(KERN_DEBUG "TCP: drop open request from %u.%u.%u.%u/%u\n", \ printk(KERN_DEBUG "TCP: drop open "
NIPQUAD(saddr), ntohs(skb->h.th->source))); "request from %u.%u."
"%u.%u/%u\n", \
NIPQUAD(saddr),
ntohs(skb->h.th->source)));
dst_release(dst); dst_release(dst);
goto drop_and_free; goto drop_and_free;
} }
...@@ -1530,27 +1522,27 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1530,27 +1522,27 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free; goto drop_and_free;
if (want_cookie) { if (want_cookie) {
tcp_openreq_free(req); tcp_openreq_free(req);
} else { } else {
tcp_v4_synq_add(sk, req); tcp_v4_synq_add(sk, req);
} }
return 0; return 0;
drop_and_free: drop_and_free:
tcp_openreq_free(req); tcp_openreq_free(req);
drop: drop:
TCP_INC_STATS_BH(TcpAttemptFails); TCP_INC_STATS_BH(TcpAttemptFails);
return 0; return 0;
} }
/* /*
* The three way handshake has completed - we got a valid synack - * The three way handshake has completed - we got a valid synack -
* now create the new socket. * now create the new socket.
*/ */
struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct open_request *req, struct open_request *req,
struct dst_entry *dst) struct dst_entry *dst)
{ {
struct inet_opt *newinet; struct inet_opt *newinet;
struct tcp_opt *newtp; struct tcp_opt *newtp;
...@@ -1559,8 +1551,7 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1559,8 +1551,7 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
if (tcp_acceptq_is_full(sk)) if (tcp_acceptq_is_full(sk))
goto exit_overflow; goto exit_overflow;
if (dst == NULL && if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL)
(dst = tcp_v4_route_req(sk, req)) == NULL)
goto exit; goto exit;
newsk = tcp_create_openreq_child(sk, req, skb); newsk = tcp_create_openreq_child(sk, req, skb);
...@@ -1570,15 +1561,15 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1570,15 +1561,15 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newsk->dst_cache = dst; newsk->dst_cache = dst;
newsk->route_caps = dst->dev->features; newsk->route_caps = dst->dev->features;
newtp = tcp_sk(newsk); newtp = tcp_sk(newsk);
newinet = inet_sk(newsk); newinet = inet_sk(newsk);
newinet->daddr = req->af.v4_req.rmt_addr; newinet->daddr = req->af.v4_req.rmt_addr;
newinet->rcv_saddr = req->af.v4_req.loc_addr; newinet->rcv_saddr = req->af.v4_req.loc_addr;
newinet->saddr = req->af.v4_req.loc_addr; newinet->saddr = req->af.v4_req.loc_addr;
newinet->opt = req->af.v4_req.opt; newinet->opt = req->af.v4_req.opt;
req->af.v4_req.opt = NULL; req->af.v4_req.opt = NULL;
newinet->mc_index = tcp_v4_iif(skb); newinet->mc_index = tcp_v4_iif(skb);
newinet->mc_ttl = skb->nh.iph->ttl; newinet->mc_ttl = skb->nh.iph->ttl;
newtp->ext_header_len = 0; newtp->ext_header_len = 0;
if (newinet->opt) if (newinet->opt)
newtp->ext_header_len = newinet->opt->optlen; newtp->ext_header_len = newinet->opt->optlen;
...@@ -1601,18 +1592,16 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ...@@ -1601,18 +1592,16 @@ struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
return NULL; return NULL;
} }
static struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb) static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{ {
struct open_request *req, **prev;
struct tcphdr *th = skb->h.th; struct tcphdr *th = skb->h.th;
struct iphdr *iph = skb->nh.iph; struct iphdr *iph = skb->nh.iph;
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
struct sock *nsk; struct sock *nsk;
struct open_request **prev;
/* Find possible connection requests. */ /* Find possible connection requests. */
req = tcp_v4_search_req(tp, &prev, struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
th->source, iph->saddr, iph->daddr);
iph->saddr, iph->daddr);
if (req) if (req)
return tcp_check_req(sk, skb, req, prev); return tcp_check_req(sk, skb, req, prev);
...@@ -1627,7 +1616,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb) ...@@ -1627,7 +1616,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk,struct sk_buff *skb)
bh_lock_sock(nsk); bh_lock_sock(nsk);
return nsk; return nsk;
} }
tcp_tw_put((struct tcp_tw_bucket*)nsk); tcp_tw_put((struct tcp_tw_bucket *)nsk);
return NULL; return NULL;
} }
...@@ -1642,22 +1631,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb) ...@@ -1642,22 +1631,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
{ {
if (skb->ip_summed == CHECKSUM_HW) { if (skb->ip_summed == CHECKSUM_HW) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!tcp_v4_check(skb->h.th,skb->len,skb->nh.iph->saddr, if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
skb->nh.iph->daddr,skb->csum)) skb->nh.iph->daddr, skb->csum))
return 0; return 0;
NETDEBUG(if (net_ratelimit()) printk(KERN_DEBUG "hw tcp v4 csum failed\n")); NETDEBUG(if (net_ratelimit())
printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
} }
if (skb->len <= 76) { if (skb->len <= 76) {
if (tcp_v4_check(skb->h.th,skb->len,skb->nh.iph->saddr, if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
skb->nh.iph->daddr, skb->nh.iph->daddr,
skb_checksum(skb, 0, skb->len, 0))) skb_checksum(skb, 0, skb->len, 0)))
return -1; return -1;
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else { } else {
skb->csum = ~tcp_v4_check(skb->h.th,skb->len,skb->nh.iph->saddr, skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
skb->nh.iph->daddr,0); skb->nh.iph->saddr,
skb->nh.iph->daddr, 0);
} }
return 0; return 0;
} }
...@@ -1686,13 +1677,13 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1686,13 +1677,13 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
goto reset; goto reset;
TCP_CHECK_TIMER(sk); TCP_CHECK_TIMER(sk);
return 0; return 0;
} }
if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb)) if (skb->len < (skb->h.th->doff << 2) || tcp_checksum_complete(skb))
goto csum_err; goto csum_err;
if (sk->state == TCP_LISTEN) { if (sk->state == TCP_LISTEN) {
struct sock *nsk = tcp_v4_hnd_req(sk, skb); struct sock *nsk = tcp_v4_hnd_req(sk, skb);
if (!nsk) if (!nsk)
goto discard; goto discard;
...@@ -1715,7 +1706,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) ...@@ -1715,7 +1706,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
discard: discard:
kfree_skb(skb); kfree_skb(skb);
/* Be careful here. If this function gets more complicated and /* Be careful here. If this function gets more complicated and
* gcc suffers from register pressure on the x86, sk (in %ebx) * gcc suffers from register pressure on the x86, sk (in %ebx)
* might be destroyed here. This current version compiles correctly, * might be destroyed here. This current version compiles correctly,
* but you have been warned. * but you have been warned.
*/ */
...@@ -1736,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1736,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
struct sock *sk; struct sock *sk;
int ret; int ret;
if (skb->pkt_type!=PACKET_HOST) if (skb->pkt_type != PACKET_HOST)
goto discard_it; goto discard_it;
/* Count it even if it's bad */ /* Count it even if it's bad */
...@@ -1747,9 +1738,9 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1747,9 +1738,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = skb->h.th; th = skb->h.th;
if (th->doff < sizeof(struct tcphdr)/4) if (th->doff < sizeof(struct tcphdr) / 4)
goto bad_packet; goto bad_packet;
if (!pskb_may_pull(skb, th->doff*4)) if (!pskb_may_pull(skb, th->doff * 4))
goto discard_it; goto discard_it;
/* An explanation is required here, I think. /* An explanation is required here, I think.
...@@ -1763,20 +1754,21 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1763,20 +1754,21 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = skb->h.th; th = skb->h.th;
TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4); skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0; TCP_SKB_CB(skb)->when = 0;
TCP_SKB_CB(skb)->flags = skb->nh.iph->tos; TCP_SKB_CB(skb)->flags = skb->nh.iph->tos;
TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->sacked = 0;
sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source, sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source,
skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); skb->nh.iph->daddr, ntohs(th->dest),
tcp_v4_iif(skb));
if (!sk) if (!sk)
goto no_tcp_socket; goto no_tcp_socket;
process: process:
if(!ipsec_sk_policy(sk,skb)) if (!ipsec_sk_policy(sk, skb))
goto discard_and_relse; goto discard_and_relse;
if (sk->state == TCP_TIME_WAIT) if (sk->state == TCP_TIME_WAIT)
...@@ -1798,7 +1790,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1798,7 +1790,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
return ret; return ret;
no_tcp_socket: no_tcp_socket:
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
bad_packet: bad_packet:
TCP_INC_STATS_BH(TcpInErrs); TCP_INC_STATS_BH(TcpInErrs);
} else { } else {
...@@ -1815,18 +1807,17 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1815,18 +1807,17 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_it; goto discard_it;
do_time_wait: do_time_wait:
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
TCP_INC_STATS_BH(TcpInErrs); TCP_INC_STATS_BH(TcpInErrs);
goto discard_and_relse; goto discard_and_relse;
} }
switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, switch (tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
skb, th, skb->len)) { skb, th, skb->len)) {
case TCP_TW_SYN: case TCP_TW_SYN: {
{ struct sock *sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr,
struct sock *sk2; ntohs(th->dest),
tcp_v4_iif(skb));
sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); if (sk2) {
if (sk2 != NULL) {
tcp_tw_deschedule((struct tcp_tw_bucket *)sk); tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
tcp_timewait_kill((struct tcp_tw_bucket *)sk); tcp_timewait_kill((struct tcp_tw_bucket *)sk);
tcp_tw_put((struct tcp_tw_bucket *)sk); tcp_tw_put((struct tcp_tw_bucket *)sk);
...@@ -1884,7 +1875,7 @@ static int tcp_v4_reselect_saddr(struct sock *sk) ...@@ -1884,7 +1875,7 @@ static int tcp_v4_reselect_saddr(struct sock *sk)
if (sysctl_ip_dynaddr > 1) { if (sysctl_ip_dynaddr > 1) {
printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->" printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
"saddr from %d.%d.%d.%d to %d.%d.%d.%d\n", "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
NIPQUAD(old_saddr), NIPQUAD(old_saddr),
NIPQUAD(new_saddr)); NIPQUAD(new_saddr));
} }
...@@ -1910,7 +1901,7 @@ int tcp_v4_rebuild_header(struct sock *sk) ...@@ -1910,7 +1901,7 @@ int tcp_v4_rebuild_header(struct sock *sk)
int err; int err;
/* Route is OK, nothing to do. */ /* Route is OK, nothing to do. */
if (rt != NULL) if (rt)
return 0; return 0;
/* Reroute. */ /* Reroute. */
...@@ -1958,15 +1949,15 @@ int tcp_v4_remember_stamp(struct sock *sk) ...@@ -1958,15 +1949,15 @@ int tcp_v4_remember_stamp(struct sock *sk)
{ {
struct inet_opt *inet = inet_sk(sk); struct inet_opt *inet = inet_sk(sk);
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
struct rtable *rt = (struct rtable*)__sk_dst_get(sk); struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
struct inet_peer *peer = NULL; struct inet_peer *peer = NULL;
int release_it = 0; int release_it = 0;
if (rt == NULL || rt->rt_dst != inet->daddr) { if (!rt || rt->rt_dst != inet->daddr) {
peer = inet_getpeer(inet->daddr, 1); peer = inet_getpeer(inet->daddr, 1);
release_it = 1; release_it = 1;
} else { } else {
if (rt->peer == NULL) if (!rt->peer)
rt_bind_peer(rt, 1); rt_bind_peer(rt, 1);
peer = rt->peer; peer = rt->peer;
} }
...@@ -2007,18 +1998,17 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw) ...@@ -2007,18 +1998,17 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
} }
struct tcp_func ipv4_specific = { struct tcp_func ipv4_specific = {
ip_queue_xmit, queue_xmit: ip_queue_xmit,
tcp_v4_send_check, send_check: tcp_v4_send_check,
tcp_v4_rebuild_header, rebuild_header: tcp_v4_rebuild_header,
tcp_v4_conn_request, conn_request: tcp_v4_conn_request,
tcp_v4_syn_recv_sock, syn_recv_sock: tcp_v4_syn_recv_sock,
tcp_v4_remember_stamp, remember_stamp: tcp_v4_remember_stamp,
sizeof(struct iphdr), net_header_len: sizeof(struct iphdr),
setsockopt: ip_setsockopt,
ip_setsockopt, getsockopt: ip_getsockopt,
ip_getsockopt, addr2sockaddr: v4_addr2sockaddr,
v4_addr2sockaddr, sockaddr_len: sizeof(struct sockaddr_in),
sizeof(struct sockaddr_in)
}; };
/* NOTE: A lot of things set to zero explicitly by call to /* NOTE: A lot of things set to zero explicitly by call to
...@@ -2034,7 +2024,7 @@ static int tcp_v4_init_sock(struct sock *sk) ...@@ -2034,7 +2024,7 @@ static int tcp_v4_init_sock(struct sock *sk)
tp->rto = TCP_TIMEOUT_INIT; tp->rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT; tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the /* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control * initial SYN frame in their delayed-ACK and congestion control
* algorithms that we must have the following bandaid to talk * algorithms that we must have the following bandaid to talk
...@@ -2082,7 +2072,7 @@ static int tcp_v4_destroy_sock(struct sock *sk) ...@@ -2082,7 +2072,7 @@ static int tcp_v4_destroy_sock(struct sock *sk)
__skb_queue_purge(&tp->ucopy.prequeue); __skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */ /* Clean up a referenced TCP bind bucket. */
if(sk->prev != NULL) if (sk->prev)
tcp_put_port(sk); tcp_put_port(sk);
/* If sendmsg cached page exists, toss it. */ /* If sendmsg cached page exists, toss it. */
...@@ -2095,7 +2085,8 @@ static int tcp_v4_destroy_sock(struct sock *sk) ...@@ -2095,7 +2085,8 @@ static int tcp_v4_destroy_sock(struct sock *sk)
} }
/* Proc filesystem TCP sock list dumping. */ /* Proc filesystem TCP sock list dumping. */
static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf, int i, int uid) static void get_openreq(struct sock *sk, struct open_request *req,
char *tmpbuf, int i, int uid)
{ {
int ttd = req->expires - jiffies; int ttd = req->expires - jiffies;
...@@ -2107,31 +2098,28 @@ static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf, ...@@ -2107,31 +2098,28 @@ static void get_openreq(struct sock *sk, struct open_request *req, char *tmpbuf,
req->af.v4_req.rmt_addr, req->af.v4_req.rmt_addr,
ntohs(req->rmt_port), ntohs(req->rmt_port),
TCP_SYN_RECV, TCP_SYN_RECV,
0,0, /* could print option size, but that is af dependent. */ 0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */ 1, /* timers active (only the expire timer) */
ttd, ttd,
req->retrans, req->retrans,
uid, uid,
0, /* non standard timer */ 0, /* non standard timer */
0, /* open_requests have no inode */ 0, /* open_requests have no inode */
atomic_read(&sk->refcnt), atomic_read(&sk->refcnt),
req req);
);
} }
static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i) static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i)
{ {
unsigned int dest, src;
__u16 destp, srcp;
int timer_active; int timer_active;
unsigned long timer_expires; unsigned long timer_expires;
struct tcp_opt *tp = tcp_sk(sp); struct tcp_opt *tp = tcp_sk(sp);
struct inet_opt *inet = inet_sk(sp); struct inet_opt *inet = inet_sk(sp);
unsigned int dest = inet->daddr;
unsigned int src = inet->rcv_saddr;
__u16 destp = ntohs(inet->dport);
__u16 srcp = ntohs(inet->sport);
dest = inet->daddr;
src = inet->rcv_saddr;
destp = ntohs(inet->dport);
srcp = ntohs(inet->sport);
if (tp->pending == TCP_TIME_RETRANS) { if (tp->pending == TCP_TIME_RETRANS) {
timer_active = 1; timer_active = 1;
timer_expires = tp->timeout; timer_expires = tp->timeout;
...@@ -2146,19 +2134,19 @@ static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i) ...@@ -2146,19 +2134,19 @@ static void get_tcp_sock(struct sock *sp, char *tmpbuf, int i)
timer_expires = jiffies; timer_expires = jiffies;
} }
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d", "%08X %5d %8d %lu %d %p %u %u %u %u %d",
i, src, srcp, dest, destp, sp->state, i, src, srcp, dest, destp, sp->state,
tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq,
timer_active, timer_expires-jiffies, timer_active, timer_expires - jiffies,
tp->retransmits, tp->retransmits,
sock_i_uid(sp), sock_i_uid(sp),
tp->probes_out, tp->probes_out,
sock_i_ino(sp), sock_i_ino(sp),
atomic_read(&sp->refcnt), sp, atomic_read(&sp->refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong,
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh tp->snd_cwnd,
); tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh);
} }
static void get_timewait_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i) static void get_timewait_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
...@@ -2188,18 +2176,19 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -2188,18 +2176,19 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
{ {
int len = 0, num = 0, i; int len = 0, num = 0, i;
off_t begin, pos = 0; off_t begin, pos = 0;
char tmpbuf[TMPSZ+1]; char tmpbuf[TMPSZ + 1];
if (offset < TMPSZ) if (offset < TMPSZ)
len += sprintf(buffer, "%-*s\n", TMPSZ-1, len += sprintf(buffer, "%-*s\n", TMPSZ - 1,
" sl local_address rem_address st tx_queue " " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout inode"); "rx_queue tr tm->when retrnsmt uid timeout "
"inode");
pos = TMPSZ; pos = TMPSZ;
/* First, walk listening socket table. */ /* First, walk listening socket table. */
tcp_listen_lock(); tcp_listen_lock();
for(i = 0; i < TCP_LHTABLE_SIZE; i++) { for (i = 0; i < TCP_LHTABLE_SIZE; i++) {
struct sock *sk; struct sock *sk;
struct tcp_listen_opt *lopt; struct tcp_listen_opt *lopt;
int k; int k;
...@@ -2215,7 +2204,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -2215,7 +2204,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
pos += TMPSZ; pos += TMPSZ;
if (pos >= offset) { if (pos >= offset) {
get_tcp_sock(sk, tmpbuf, num); get_tcp_sock(sk, tmpbuf, num);
len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); len += sprintf(buffer + len, "%-*s\n",
TMPSZ - 1, tmpbuf);
if (pos >= offset + length) { if (pos >= offset + length) {
tcp_listen_unlock(); tcp_listen_unlock();
goto out_no_bh; goto out_no_bh;
...@@ -2226,17 +2216,22 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -2226,17 +2216,22 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
uid = sock_i_uid(sk); uid = sock_i_uid(sk);
read_lock_bh(&tp->syn_wait_lock); read_lock_bh(&tp->syn_wait_lock);
lopt = tp->listen_opt; lopt = tp->listen_opt;
if (lopt && lopt->qlen != 0) { if (lopt && lopt->qlen) {
for (k=0; k<TCP_SYNQ_HSIZE; k++) { for (k = 0; k < TCP_SYNQ_HSIZE; k++) {
for (req = lopt->syn_table[k]; req; req = req->dl_next, num++) { for (req = lopt->syn_table[k];
req; req = req->dl_next, num++) {
if (!TCP_INET_FAMILY(req->class->family)) if (!TCP_INET_FAMILY(req->class->family))
continue; continue;
pos += TMPSZ; pos += TMPSZ;
if (pos <= offset) if (pos <= offset)
continue; continue;
get_openreq(sk, req, tmpbuf, num, uid); get_openreq(sk, req, tmpbuf,
len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); num, uid);
len += sprintf(buffer + len,
"%-*s\n",
TMPSZ - 1,
tmpbuf);
if (pos >= offset + length) { if (pos >= offset + length) {
read_unlock_bh(&tp->syn_wait_lock); read_unlock_bh(&tp->syn_wait_lock);
tcp_listen_unlock(); tcp_listen_unlock();
...@@ -2261,21 +2256,23 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -2261,21 +2256,23 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
struct tcp_tw_bucket *tw; struct tcp_tw_bucket *tw;
read_lock(&head->lock); read_lock(&head->lock);
for(sk = head->chain; sk; sk = sk->next, num++) { for (sk = head->chain; sk; sk = sk->next, num++) {
if (!TCP_INET_FAMILY(sk->family)) if (!TCP_INET_FAMILY(sk->family))
continue; continue;
pos += TMPSZ; pos += TMPSZ;
if (pos <= offset) if (pos <= offset)
continue; continue;
get_tcp_sock(sk, tmpbuf, num); get_tcp_sock(sk, tmpbuf, num);
len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); len += sprintf(buffer + len, "%-*s\n",
TMPSZ - 1, tmpbuf);
if (pos >= offset + length) { if (pos >= offset + length) {
read_unlock(&head->lock); read_unlock(&head->lock);
goto out; goto out;
} }
} }
for (tw = (struct tcp_tw_bucket *)tcp_ehash[i+tcp_ehash_size].chain; for (tw = (struct tcp_tw_bucket *)tcp_ehash[i +
tw != NULL; tcp_ehash_size].chain;
tw;
tw = (struct tcp_tw_bucket *)tw->next, num++) { tw = (struct tcp_tw_bucket *)tw->next, num++) {
if (!TCP_INET_FAMILY(tw->family)) if (!TCP_INET_FAMILY(tw->family))
continue; continue;
...@@ -2283,7 +2280,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -2283,7 +2280,8 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
if (pos <= offset) if (pos <= offset)
continue; continue;
get_timewait_sock(tw, tmpbuf, num); get_timewait_sock(tw, tmpbuf, num);
len += sprintf(buffer+len, "%-*s\n", TMPSZ-1, tmpbuf); len += sprintf(buffer + len, "%-*s\n",
TMPSZ - 1, tmpbuf);
if (pos >= offset + length) { if (pos >= offset + length) {
read_unlock(&head->lock); read_unlock(&head->lock);
goto out; goto out;
...@@ -2302,7 +2300,7 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length) ...@@ -2302,7 +2300,7 @@ int tcp_get_info(char *buffer, char **start, off_t offset, int length)
if (len > length) if (len > length)
len = length; len = length;
if (len < 0) if (len < 0)
len = 0; len = 0;
return len; return len;
} }
...@@ -2333,7 +2331,7 @@ void __init tcp_v4_init(struct net_proto_family *ops) ...@@ -2333,7 +2331,7 @@ void __init tcp_v4_init(struct net_proto_family *ops)
int err = sock_create(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket); int err = sock_create(PF_INET, SOCK_RAW, IPPROTO_TCP, &tcp_socket);
if (err < 0) if (err < 0)
panic("Failed to create the TCP control socket.\n"); panic("Failed to create the TCP control socket.\n");
tcp_socket->sk->allocation=GFP_ATOMIC; tcp_socket->sk->allocation = GFP_ATOMIC;
inet_sk(tcp_socket->sk)->ttl = MAXTTL; inet_sk(tcp_socket->sk)->ttl = MAXTTL;
/* Unhash it so that IP input processing does not even /* Unhash it so that IP input processing does not even
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
* *
* Fixes: * Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support * Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -487,11 +488,18 @@ static int rawv6_frag_cksum(const void *data, struct in6_addr *addr, ...@@ -487,11 +488,18 @@ static int rawv6_frag_cksum(const void *data, struct in6_addr *addr,
hdr->cksum = csum_ipv6_magic(addr, daddr, hdr->len, hdr->cksum = csum_ipv6_magic(addr, daddr, hdr->len,
hdr->proto, hdr->cksum); hdr->proto, hdr->cksum);
if (opt->offset < len) { if (opt->offset + 1 < len) {
__u16 *csum; __u16 *csum;
csum = (__u16 *) (buff + opt->offset); csum = (__u16 *) (buff + opt->offset);
*csum = hdr->cksum; if (*csum) {
/* in case cksum was not initialized */
__u32 sum = hdr->cksum;
sum += *csum;
*csum = hdr->cksum = (sum + (sum>>16));
} else {
*csum = hdr->cksum;
}
} else { } else {
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_DEBUG "icmp: cksum offset too big\n"); printk(KERN_DEBUG "icmp: cksum offset too big\n");
...@@ -720,6 +728,10 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname, ...@@ -720,6 +728,10 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
switch (optname) { switch (optname) {
case IPV6_CHECKSUM: case IPV6_CHECKSUM:
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if (val > 0 && (val&1))
return(-EINVAL);
if (val < 0) { if (val < 0) {
opt->checksum = 0; opt->checksum = 0;
} else { } else {
...@@ -817,6 +829,11 @@ static void rawv6_close(struct sock *sk, long timeout) ...@@ -817,6 +829,11 @@ static void rawv6_close(struct sock *sk, long timeout)
static int rawv6_init_sk(struct sock *sk) static int rawv6_init_sk(struct sock *sk)
{ {
if (inet_sk(sk)->num == IPPROTO_ICMPV6) {
struct raw6_opt *opt = raw6_sk(sk);
opt->checksum = 1;
opt->offset = 2;
}
return(0); return(0);
} }
......
...@@ -1117,7 +1117,7 @@ static void psched_tick(unsigned long dummy) ...@@ -1117,7 +1117,7 @@ static void psched_tick(unsigned long dummy)
psched_timer.expires = jiffies + 1*HZ; psched_timer.expires = jiffies + 1*HZ;
#else #else
unsigned long now = jiffies; unsigned long now = jiffies;
psched_time_base = ((u64)now)<<PSCHED_JSCALE; psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
psched_time_mark = now; psched_time_mark = now;
psched_timer.expires = now + 60*60*HZ; psched_timer.expires = now + 60*60*HZ;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment