Commit ed651326 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.120pre1

parent 8b109aa9
...@@ -1233,12 +1233,12 @@ S: 7546 JA Enschede ...@@ -1233,12 +1233,12 @@ S: 7546 JA Enschede
S: Netherlands S: Netherlands
N: David S. Miller N: David S. Miller
E: davem@caip.rutgers.edu E: davem@dm.cobaltmicro.com
D: Sparc hacker D: Sparc and blue box hacker
D: New Linux-Activists maintainer D: Vger Linux mailing list co-maintainer
D: Linux Emacs elf/qmagic support + other libc/gcc things D: Linux Emacs elf/qmagic support + other libc/gcc things
D: Yee bore de yee bore! ;-) D: Yee bore de yee bore! ;-)
S: 111 Alta Tierra Court S: 331 Santa Rosa Drive
S: Los Gatos, California 95032 S: Los Gatos, California 95032
S: USA S: USA
......
VERSION = 2 VERSION = 2
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 119 SUBLEVEL = 120
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/) ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
This diff is collapsed.
This diff is collapsed.
...@@ -91,85 +91,90 @@ bitrev(int b) ...@@ -91,85 +91,90 @@ bitrev(int b)
int int
mace_probe(struct device *dev) mace_probe(struct device *dev)
{ {
int j, rev; int j, rev;
struct mace_data *mp; struct mace_data *mp;
struct device_node *maces; struct device_node *mace;
unsigned char *addr; unsigned char *addr;
static int maces_found = 0;
maces = find_devices("mace"); static struct device_node *next_mace;
if (maces == 0)
return ENODEV; if (!maces_found) {
next_mace = find_devices("mace");
do { maces_found = 1;
if (maces->n_addrs != 3 || maces->n_intrs != 3) { }
printk(KERN_ERR "can't use MACE %s: expect 3 addrs and 3 intrs\n", mace = next_mace;
maces->full_name); if (mace == 0)
continue; return -ENODEV;
next_mace = mace->next;
if (mace->n_addrs != 3 || mace->n_intrs != 3) {
printk(KERN_ERR "can't use MACE %s: expect 3 addrs and 3 intrs\n",
mace->full_name);
return -ENODEV;
} }
if (dev == NULL) if (dev == NULL)
dev = init_etherdev(0, PRIV_BYTES); dev = init_etherdev(0, PRIV_BYTES);
else { else {
/* XXX this doesn't look right (but it's never used :-) */ dev->priv = kmalloc(PRIV_BYTES, GFP_KERNEL);
dev->priv = kmalloc(PRIV_BYTES, GFP_KERNEL); if (dev->priv == 0)
if (dev->priv == 0) return -ENOMEM;
return -ENOMEM;
} }
mp = (struct mace_data *) dev->priv; mp = (struct mace_data *) dev->priv;
dev->base_addr = maces->addrs[0].address; dev->base_addr = mace->addrs[0].address;
mp->mace = (volatile struct mace *) mp->mace = (volatile struct mace *)
ioremap(maces->addrs[0].address, 0x1000); ioremap(mace->addrs[0].address, 0x1000);
dev->irq = maces->intrs[0].line; dev->irq = mace->intrs[0].line;
if (request_irq(dev->irq, mace_interrupt, 0, "MACE", dev)) { if (request_irq(dev->irq, mace_interrupt, 0, "MACE", dev)) {
printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq); printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
return -EAGAIN; return -EAGAIN;
} }
if (request_irq(maces->intrs[1].line, mace_txdma_intr, 0, "MACE-txdma", if (request_irq(mace->intrs[1].line, mace_txdma_intr, 0, "MACE-txdma",
dev)) { dev)) {
printk(KERN_ERR "MACE: can't get irq %d\n", maces->intrs[1].line); printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[1].line);
return -EAGAIN; return -EAGAIN;
} }
if (request_irq(maces->intrs[2].line, mace_rxdma_intr, 0, "MACE-rxdma", if (request_irq(mace->intrs[2].line, mace_rxdma_intr, 0, "MACE-rxdma",
dev)) { dev)) {
printk(KERN_ERR "MACE: can't get irq %d\n", maces->intrs[2].line); printk(KERN_ERR "MACE: can't get irq %d\n", mace->intrs[2].line);
return -EAGAIN; return -EAGAIN;
} }
addr = get_property(maces, "mac-address", NULL); addr = get_property(mace, "mac-address", NULL);
if (addr == NULL) { if (addr == NULL) {
addr = get_property(maces, "local-mac-address", NULL); addr = get_property(mace, "local-mac-address", NULL);
if (addr == NULL) { if (addr == NULL) {
printk(KERN_ERR "Can't get mac-address for MACE at %lx\n", printk(KERN_ERR "Can't get mac-address for MACE at %lx\n",
dev->base_addr); dev->base_addr);
return -EAGAIN; return -EAGAIN;
} }
} }
printk(KERN_INFO "%s: MACE at", dev->name); printk(KERN_INFO "%s: MACE at", dev->name);
rev = addr[0] == 0 && addr[1] == 0xA0; rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) { for (j = 0; j < 6; ++j) {
dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j]; dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]); printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
} }
printk("\n"); printk("\n");
mp = (struct mace_data *) dev->priv; mp = (struct mace_data *) dev->priv;
mp->maccc = ENXMT | ENRCV; mp->maccc = ENXMT | ENRCV;
mp->tx_dma = (volatile struct dbdma_regs *) mp->tx_dma = (volatile struct dbdma_regs *)
ioremap(maces->addrs[1].address, 0x1000); ioremap(mace->addrs[1].address, 0x1000);
mp->tx_dma_intr = maces->intrs[1].line; mp->tx_dma_intr = mace->intrs[1].line;
mp->rx_dma = (volatile struct dbdma_regs *) mp->rx_dma = (volatile struct dbdma_regs *)
ioremap(maces->addrs[2].address, 0x1000); ioremap(mace->addrs[2].address, 0x1000);
mp->rx_dma_intr = maces->intrs[2].line; mp->rx_dma_intr = mace->intrs[2].line;
mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
memset(&mp->stats, 0, sizeof(mp->stats)); memset(&mp->stats, 0, sizeof(mp->stats));
memset((char *) mp->tx_cmds, 0, memset((char *) mp->tx_cmds, 0,
(NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
init_timer(&mp->tx_timeout); init_timer(&mp->tx_timeout);
mp->timeout_active = 0; mp->timeout_active = 0;
...@@ -182,9 +187,7 @@ mace_probe(struct device *dev) ...@@ -182,9 +187,7 @@ mace_probe(struct device *dev)
ether_setup(dev); ether_setup(dev);
} while ((maces = maces->next) != 0); return 0;
return 0;
} }
static void mace_reset(struct device *dev) static void mace_reset(struct device *dev)
......
...@@ -244,8 +244,8 @@ plip_init_dev(struct device *dev, struct parport *pb)) ...@@ -244,8 +244,8 @@ plip_init_dev(struct device *dev, struct parport *pb))
plip_wakeup, plip_interrupt, plip_wakeup, plip_interrupt,
PARPORT_DEV_LURK, dev); PARPORT_DEV_LURK, dev);
printk(version); printk(KERN_INFO "%s", version);
printk("%s: Parallel port at %#3lx, using IRQ %d\n", dev->name, printk(KERN_INFO "%s: Parallel port at %#3lx, using IRQ %d\n", dev->name,
dev->base_addr, dev->irq); dev->base_addr, dev->irq);
/* Fill in the generic fields of the device structure. */ /* Fill in the generic fields of the device structure. */
...@@ -537,7 +537,7 @@ plip_receive_packet(struct device *dev, struct net_local *nl, ...@@ -537,7 +537,7 @@ plip_receive_packet(struct device *dev, struct net_local *nl,
/* Malloc up new buffer. */ /* Malloc up new buffer. */
rcv->skb = dev_alloc_skb(rcv->length.h); rcv->skb = dev_alloc_skb(rcv->length.h);
if (rcv->skb == NULL) { if (rcv->skb == NULL) {
printk(KERN_WARNING "%s: Memory squeeze.\n", dev->name); printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
return ERROR; return ERROR;
} }
skb_put(rcv->skb,rcv->length.h); skb_put(rcv->skb,rcv->length.h);
...@@ -662,7 +662,7 @@ plip_send_packet(struct device *dev, struct net_local *nl, ...@@ -662,7 +662,7 @@ plip_send_packet(struct device *dev, struct net_local *nl,
unsigned int cx; unsigned int cx;
if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) { if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
printk(KERN_ERR "%s: send skb lost\n", dev->name); printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
snd->state = PLIP_PK_DONE; snd->state = PLIP_PK_DONE;
snd->skb = NULL; snd->skb = NULL;
return ERROR; return ERROR;
...@@ -817,7 +817,7 @@ plip_interrupt(int irq, void *dev_id, struct pt_regs * regs) ...@@ -817,7 +817,7 @@ plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
unsigned char c0; unsigned char c0;
if (dev == NULL) { if (dev == NULL) {
printk(KERN_ERR "plip_interrupt: irq %d for unknown device.\n", irq); printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
return; return;
} }
...@@ -861,7 +861,7 @@ plip_interrupt(int irq, void *dev_id, struct pt_regs * regs) ...@@ -861,7 +861,7 @@ plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
case PLIP_CN_ERROR: case PLIP_CN_ERROR:
spin_unlock_irq(&nl->lock); spin_unlock_irq(&nl->lock);
printk(KERN_WARNING "%s: receive interrupt in error state\n", dev->name); printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
break; break;
} }
} }
...@@ -1083,7 +1083,8 @@ plip_get_stats(struct device *dev) ...@@ -1083,7 +1083,8 @@ plip_get_stats(struct device *dev)
return r; return r;
} }
static int plip_config(struct device *dev, struct ifmap *map) static int
plip_config(struct device *dev, struct ifmap *map)
{ {
struct net_local *nl = (struct net_local *) dev->priv; struct net_local *nl = (struct net_local *) dev->priv;
struct pardevice *pardev = nl->pardev; struct pardevice *pardev = nl->pardev;
...@@ -1091,8 +1092,8 @@ static int plip_config(struct device *dev, struct ifmap *map) ...@@ -1091,8 +1092,8 @@ static int plip_config(struct device *dev, struct ifmap *map)
if (dev->flags & IFF_UP) if (dev->flags & IFF_UP)
return -EBUSY; return -EBUSY;
printk(KERN_INFO "plip: Warning, changing irq with ifconfig will be obsoleted.\n"); printk(KERN_WARNING "plip: Warning, changing irq with ifconfig will be obsoleted.\n");
printk(KERN_INFO "plip: Next time, please set with /proc/parport/*/irq instead.\n"); printk(KERN_WARNING "plip: Next time, please set with /proc/parport/*/irq instead.\n");
if (map->irq != (unsigned char)-1) { if (map->irq != (unsigned char)-1) {
pardev->port->irq = dev->irq = map->irq; pardev->port->irq = dev->irq = map->irq;
...@@ -1177,7 +1178,7 @@ void plip_setup(char *str, int *ints) ...@@ -1177,7 +1178,7 @@ void plip_setup(char *str, int *ints)
/* disable driver on "parport=" or "parport=0" */ /* disable driver on "parport=" or "parport=0" */
parport[0] = -2; parport[0] = -2;
} else { } else {
printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n", printk(KERN_WARNINGING "warning: 'plip=0x%x' ignored\n",
ints[1]); ints[1]);
} }
} }
......
/* $Id: sunlance.c,v 1.79 1998/06/04 09:54:58 jj Exp $ /* $Id: sunlance.c,v 1.81 1998/08/10 09:08:23 jj Exp $
* lance.c: Linux/Sparc/Lance driver * lance.c: Linux/Sparc/Lance driver
* *
* Written 1995, 1996 by Miguel de Icaza * Written 1995, 1996 by Miguel de Icaza
...@@ -1135,9 +1135,10 @@ __initfunc(int sparc_lance_probe (struct device *dev)) ...@@ -1135,9 +1135,10 @@ __initfunc(int sparc_lance_probe (struct device *dev))
return ENODEV; return ENODEV;
called++; called++;
if (idprom->id_machtype == (SM_SUN4|SM_4_330)) { if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
(idprom->id_machtype == (SM_SUN4|SM_4_470))) {
memset (&sdev, 0, sizeof(sdev)); memset (&sdev, 0, sizeof(sdev));
sdev.reg_addrs[0].phys_addr = SUN4_300_ETH_PHYSADDR; sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
sdev.irqs[0] = 6; sdev.irqs[0] = 6;
return sparc_lance_init(dev, &sdev, 0, 0); return sparc_lance_init(dev, &sdev, 0, 0);
} }
......
UPDATE NEWS: version 1.33 - 26 Aug 98
Interrupt management in this driver has become, over
time, increasingly odd and difficult to explain - this
has been mostly due to my own mental inadequacies. In
recent kernels, it has failed to function at all when
compiled for SMP. I've fixed that problem, and after
taking a fresh look at interrupts in general, greatly
reduced the number of places where they're fiddled
with. Done some heavy testing and it looks very good.
The driver now makes use of the __initfunc() and
__initdata macros to save about 4k of kernel memory.
Once again, the same code works for both 2.0.xx and
2.1.xx kernels.
UPDATE NEWS: version 1.32 - 28 Mar 98 UPDATE NEWS: version 1.32 - 28 Mar 98
Removed the check for legal IN2000 hardware versions: Removed the check for legal IN2000 hardware versions:
......
This diff is collapsed.
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* in2000.h - Linux device driver definitions for the * in2000.h - Linux device driver definitions for the
* Always IN2000 ISA SCSI card. * Always IN2000 ISA SCSI card.
* *
* IMPORTANT: This file is for version 1.32 - 28/Mar/1998 * IMPORTANT: This file is for version 1.33 - 26/Aug/1998
* *
* Copyright (c) 1996 John Shifflett, GeoLog Consulting * Copyright (c) 1996 John Shifflett, GeoLog Consulting
* john@geolog.com * john@geolog.com
...@@ -377,10 +377,29 @@ struct IN2000_hostdata { ...@@ -377,10 +377,29 @@ struct IN2000_hostdata {
#define PR_STOP 1<<7 #define PR_STOP 1<<7
int in2000_detect(Scsi_Host_Template *); #include <linux/version.h>
#if LINUX_VERSION_CODE < 0x020100 /* 2.0.xx */
# define in2000__INITFUNC(function) function
# define in2000__INIT
# define in2000__INITDATA
# define CLISPIN_LOCK(flags) do { save_flags(flags); cli(); } while(0)
# define CLISPIN_UNLOCK(flags) restore_flags(flags)
#else /* 2.1.xxx */
# include <linux/init.h>
# include <asm/spinlock.h>
# define in2000__INITFUNC(function) __initfunc(function)
# define in2000__INIT __init
# define in2000__INITDATA __initdata
# define CLISPIN_LOCK(flags) spin_lock_irqsave(&io_request_lock, flags)
# define CLISPIN_UNLOCK(flags) spin_unlock_irqrestore(&io_request_lock, flags)
#endif
int in2000_detect(Scsi_Host_Template *) in2000__INIT;
int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
int in2000_abort(Scsi_Cmnd *); int in2000_abort(Scsi_Cmnd *);
void in2000_setup(char *, int *); void in2000_setup(char *, int *) in2000__INIT;
int in2000_proc_info(char *, char **, off_t, int, int, int); int in2000_proc_info(char *, char **, off_t, int, int, int);
struct proc_dir_entry proc_scsi_in2000; struct proc_dir_entry proc_scsi_in2000;
int in2000_biosparam(struct scsi_disk *, kdev_t, int *); int in2000_biosparam(struct scsi_disk *, kdev_t, int *);
...@@ -392,6 +411,33 @@ int in2000_reset(Scsi_Cmnd *, unsigned int); ...@@ -392,6 +411,33 @@ int in2000_reset(Scsi_Cmnd *, unsigned int);
#define IN2000_CPL 2 #define IN2000_CPL 2
#define IN2000_HOST_ID 7 #define IN2000_HOST_ID 7
#if LINUX_VERSION_CODE < 0x020100 /* 2.0.xx */
#define IN2000 { NULL, /* link pointer for modules */ \
NULL, /* usage_count for modules */ \
&proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \
in2000_proc_info, /* pointer to proc info function */ \
"Always IN2000", /* device name */ \
in2000_detect, /* returns number of in2000's found */ \
NULL, /* optional unload function for modules */ \
NULL, /* optional misc info function */ \
NULL, /* send scsi command, wait for completion */ \
in2000_queuecommand, /* queue scsi command, don't wait */ \
in2000_abort, /* abort current command */ \
in2000_reset, /* reset scsi bus */ \
NULL, /* slave_attach - unused */ \
in2000_biosparam, /* figures out BIOS parameters for lilo, etc */ \
IN2000_CAN_Q, /* max commands we can queue up */ \
IN2000_HOST_ID, /* host-adapter scsi id */ \
IN2000_SG, /* scatter-gather table size */ \
IN2000_CPL, /* commands per lun */ \
0, /* board counter */ \
0, /* unchecked dma */ \
DISABLE_CLUSTERING \
}
#else /* 2.1.xxx */
#define IN2000 { proc_dir: &proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \ #define IN2000 { proc_dir: &proc_scsi_in2000, /* pointer to /proc/scsi directory entry */ \
proc_info: in2000_proc_info, /* pointer to proc info function */ \ proc_info: in2000_proc_info, /* pointer to proc info function */ \
name: "Always IN2000", /* device name */ \ name: "Always IN2000", /* device name */ \
...@@ -408,5 +454,7 @@ int in2000_reset(Scsi_Cmnd *, unsigned int); ...@@ -408,5 +454,7 @@ int in2000_reset(Scsi_Cmnd *, unsigned int);
use_new_eh_code: 0 /* new error code - not using it yet */ \ use_new_eh_code: 0 /* new error code - not using it yet */ \
} }
#endif
#endif /* IN2000_H */ #endif /* IN2000_H */
...@@ -569,6 +569,15 @@ int flush_old_exec(struct linux_binprm * bprm) ...@@ -569,6 +569,15 @@ int flush_old_exec(struct linux_binprm * bprm)
return retval; return retval;
} }
/*
* We mustn't allow tracing of suid binaries, unless
* the tracer has the capability to trace anything..
*/
static inline int must_not_trace_exec(struct task_struct * p)
{
return (p->flags & PF_PTRACED) && !cap_raised(p->p_pptr->cap_effective, CAP_SYS_PTRACE);
}
/* /*
* Fill the binprm structure from the inode. * Fill the binprm structure from the inode.
* Check permissions, then read the first 512 bytes * Check permissions, then read the first 512 bytes
...@@ -657,15 +666,12 @@ int prepare_binprm(struct linux_binprm *bprm) ...@@ -657,15 +666,12 @@ int prepare_binprm(struct linux_binprm *bprm)
} }
} }
if (id_change || cap_raised) { if (id_change || cap_raised) {
/* We can't suid-execute if we're sharing parts of the executable */ /* We can't suid-execute if we're sharing parts of the executable */
/* or if we're being traced (or if suid execs are not allowed) */ /* or if we're being traced (or if suid execs are not allowed) */
/* (current->mm->count > 1 is ok, as we'll get a new mm anyway) */ /* (current->mm->count > 1 is ok, as we'll get a new mm anyway) */
if (IS_NOSUID(inode) if (IS_NOSUID(inode)
|| (current->flags & PF_PTRACED) || must_not_trace_exec(current)
|| (atomic_read(&current->fs->count) > 1) || (atomic_read(&current->fs->count) > 1)
|| (atomic_read(&current->sig->count) > 1) || (atomic_read(&current->sig->count) > 1)
|| (atomic_read(&current->files->count) > 1)) { || (atomic_read(&current->files->count) > 1)) {
......
...@@ -114,6 +114,7 @@ struct ifreq ...@@ -114,6 +114,7 @@ struct ifreq
int ifru_mtu; int ifru_mtu;
struct ifmap ifru_map; struct ifmap ifru_map;
char ifru_slave[IFNAMSIZ]; /* Just fits the size */ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
char ifru_newname[IFNAMSIZ];
__kernel_caddr_t ifru_data; __kernel_caddr_t ifru_data;
} ifr_ifru; } ifr_ifru;
}; };
...@@ -133,6 +134,7 @@ struct ifreq ...@@ -133,6 +134,7 @@ struct ifreq
#define ifr_ifindex ifr_ifru.ifru_ivalue /* interface index */ #define ifr_ifindex ifr_ifru.ifru_ivalue /* interface index */
#define ifr_bandwidth ifr_ifru.ifru_ivalue /* link bandwidth */ #define ifr_bandwidth ifr_ifru.ifru_ivalue /* link bandwidth */
#define ifr_qlen ifr_ifru.ifru_ivalue /* Queue length */ #define ifr_qlen ifr_ifru.ifru_ivalue /* Queue length */
#define ifr_newname ifr_ifru.ifru_newname /* New name */
/* /*
* Structure used in SIOCGIFCONF request. * Structure used in SIOCGIFCONF request.
......
...@@ -25,9 +25,10 @@ struct sockaddr_ll ...@@ -25,9 +25,10 @@ struct sockaddr_ll
#define PACKET_BROADCAST 1 /* To all */ #define PACKET_BROADCAST 1 /* To all */
#define PACKET_MULTICAST 2 /* To group */ #define PACKET_MULTICAST 2 /* To group */
#define PACKET_OTHERHOST 3 /* To someone else */ #define PACKET_OTHERHOST 3 /* To someone else */
#define PACKET_OUTGOING 4 /* Originated by us */ #define PACKET_OUTGOING 4 /* Outgoing of any type */
#define PACKET_LOOPBACK 5 /* These ones are invisible by user level */
#define PACKET_FASTROUTE 6 #define PACKET_LOOPBACK 5 /* MC/BRD frame looped back */
#define PACKET_FASTROUTE 6 /* Fastrouted frame */
/* Packet socket options */ /* Packet socket options */
......
...@@ -35,7 +35,14 @@ struct in6_addr ...@@ -35,7 +35,14 @@ struct in6_addr
__u16 u6_addr16[8]; __u16 u6_addr16[8];
__u32 u6_addr32[4]; __u32 u6_addr32[4];
#if (~0UL) > 0xffffffff #if (~0UL) > 0xffffffff
#ifndef __RELAX_IN6_ADDR_ALIGNMENT
/* Alas, protocols do not respect 64bit alignmnet.
rsvp/pim/... are broken. However, it is good
idea to force correct alignment always, when
it is possible.
*/
__u64 u6_addr64[2]; __u64 u6_addr64[2];
#endif
#endif #endif
} in6_u; } in6_u;
#define s6_addr in6_u.u6_addr8 #define s6_addr in6_u.u6_addr8
...@@ -100,20 +107,35 @@ struct ipv6_mreq { ...@@ -100,20 +107,35 @@ struct ipv6_mreq {
#define IPPROTO_NONE 59 /* IPv6 no next header */ #define IPPROTO_NONE 59 /* IPv6 no next header */
#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */ #define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
/*
* IPv6 TLV options.
*/
#define IPV6_TLV_PAD0 0
#define IPV6_TLV_PADN 1
#define IPV6_TLV_ROUTERALERT 20
#define IPV6_TLV_JUMBO 194
/* /*
* IPV6 socket options * IPV6 socket options
*/ */
#define IPV6_ADDRFORM 1 #define IPV6_ADDRFORM 1
#define IPV6_PKTINFO 2 #define IPV6_PKTINFO 2
#define IPV6_RXHOPOPTS 3 /* obsolete name */ #define IPV6_HOPOPTS 3
#define IPV6_RXDSTOPTS 4 /* obsolete name */ #define IPV6_DSTOPTS 4
#define IPV6_HOPOPTS IPV6_RXHOPOPTS /* new name */ #define IPV6_RTHDR 5
#define IPV6_DSTOPTS IPV6_RXDSTOPTS /* new name */
#define IPV6_RXSRCRT 5
#define IPV6_PKTOPTIONS 6 #define IPV6_PKTOPTIONS 6
#define IPV6_CHECKSUM 7 #define IPV6_CHECKSUM 7
#define IPV6_HOPLIMIT 8 #define IPV6_HOPLIMIT 8
#define IPV6_NEXTHOP 9
#define IPV6_AUTHHDR 10
#if 0
/* Aliases for obsolete names */
#define IPV6_RXHOPOPTS IPV6_HOPOPTS
#define IPV6_RXDSTOPTS IPV6_DSTOPTS
#define IPV6_RXSRCRT IPV6_RTHDR
#endif
/* /*
* Alternative names * Alternative names
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
#include <linux/in6.h> #include <linux/in6.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
/* The latest drafts declared increase in minimal mtu up to 1280. */
#define IPV6_MIN_MTU 1280
/* /*
* Advanced API * Advanced API
...@@ -58,8 +61,6 @@ struct ipv6_opt_hdr { ...@@ -58,8 +61,6 @@ struct ipv6_opt_hdr {
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#endif #endif
/* /*
* routing header type 0 (used in cmsghdr struct) * routing header type 0 (used in cmsghdr struct)
*/ */
...@@ -72,10 +73,11 @@ struct rt0_hdr { ...@@ -72,10 +73,11 @@ struct rt0_hdr {
#define rt0_type rt_hdr.type; #define rt0_type rt_hdr.type;
}; };
#ifdef __KERNEL__
/* /*
* IPv6 fixed header * IPv6 fixed header
*
* BEWARE, it is incorrect. The first 4 bits of flow_lbl
* are glued to priority now, forming "class".
*/ */
struct ipv6hdr { struct ipv6hdr {
...@@ -87,7 +89,7 @@ struct ipv6hdr { ...@@ -87,7 +89,7 @@ struct ipv6hdr {
priority:4; priority:4;
#else #else
#error "Please fix <asm/byteorder.h>" #error "Please fix <asm/byteorder.h>"
#endif #endif
__u8 flow_lbl[3]; __u8 flow_lbl[3];
__u16 payload_len; __u16 payload_len;
...@@ -98,28 +100,24 @@ struct ipv6hdr { ...@@ -98,28 +100,24 @@ struct ipv6hdr {
struct in6_addr daddr; struct in6_addr daddr;
}; };
/* #ifdef __KERNEL__
* The length of this struct cannot be greater than the length of
* the proto_priv field in a sk_buff which is currently
* defined to be 16 bytes.
* Pointers take upto 8 bytes (sizeof(void *) is 8 on the alpha).
*/
struct ipv6_options
{
/* length of extension headers */
__u16 opt_flen; /* after fragment hdr */
__u16 opt_nflen; /* before fragment hdr */
/* /*
* protocol options This structure contains results of exthdrs parsing
* usually carried in IPv6 extension headers as offsets from skb->nh.
*/ */
struct ipv6_rt_hdr *srcrt; /* Routing Header */ struct inet6_skb_parm
{
int iif;
__u16 ra;
__u16 hop;
__u16 auth;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
}; };
#endif #endif
#endif #endif
...@@ -26,7 +26,6 @@ enum ...@@ -26,7 +26,6 @@ enum
#define RTF_ALLONLINK 0x00020000 /* fallback, no routers on link */ #define RTF_ALLONLINK 0x00020000 /* fallback, no routers on link */
#define RTF_ADDRCONF 0x00040000 /* addrconf route - RA */ #define RTF_ADDRCONF 0x00040000 /* addrconf route - RA */
#define RTF_LINKRT 0x00100000 /* link specific - device match */
#define RTF_NONEXTHOP 0x00200000 /* route with no nexthop */ #define RTF_NONEXTHOP 0x00200000 /* route with no nexthop */
#define RTF_EXPIRES 0x00400000 #define RTF_EXPIRES 0x00400000
......
...@@ -232,8 +232,7 @@ struct device ...@@ -232,8 +232,7 @@ struct device
unsigned short flags; /* interface flags (a la BSD) */ unsigned short flags; /* interface flags (a la BSD) */
unsigned short gflags; unsigned short gflags;
unsigned short metric; /* routing metric (not used) */ unsigned mtu; /* interface MTU value */
unsigned short mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */ unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */ unsigned short hard_header_len; /* hardware hdr length */
void *priv; /* pointer to private data */ void *priv; /* pointer to private data */
......
...@@ -16,8 +16,8 @@ struct sockaddr_nl ...@@ -16,8 +16,8 @@ struct sockaddr_nl
{ {
sa_family_t nl_family; /* AF_NETLINK */ sa_family_t nl_family; /* AF_NETLINK */
unsigned short nl_pad; /* zero */ unsigned short nl_pad; /* zero */
__kernel_pid_t nl_pid; /* process pid */ __u32 nl_pid; /* process pid */
unsigned nl_groups; /* multicast groups mask */ __u32 nl_groups; /* multicast groups mask */
}; };
struct nlmsghdr struct nlmsghdr
...@@ -26,7 +26,7 @@ struct nlmsghdr ...@@ -26,7 +26,7 @@ struct nlmsghdr
__u16 nlmsg_type; /* Message content */ __u16 nlmsg_type; /* Message content */
__u16 nlmsg_flags; /* Additional flags */ __u16 nlmsg_flags; /* Additional flags */
__u32 nlmsg_seq; /* Sequence number */ __u32 nlmsg_seq; /* Sequence number */
__kernel_pid_t nlmsg_pid; /* Sending process PID */ __u32 nlmsg_pid; /* Sending process PID */
}; };
/* Flags values */ /* Flags values */
...@@ -64,7 +64,7 @@ struct nlmsghdr ...@@ -64,7 +64,7 @@ struct nlmsghdr
#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) #define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ #define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
(struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len))) (struct nlmsghdr*)(((char*)(nlh)) + NLMSG_ALIGN((nlh)->nlmsg_len)))
#define NLMSG_OK(nlh,len) ((nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \ #define NLMSG_OK(nlh,len) ((len) > 0 && (nlh)->nlmsg_len >= sizeof(struct nlmsghdr) && \
(nlh)->nlmsg_len <= (len)) (nlh)->nlmsg_len <= (len))
#define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len))) #define NLMSG_PAYLOAD(nlh,len) ((nlh)->nlmsg_len - NLMSG_SPACE((len)))
...@@ -86,10 +86,11 @@ struct nlmsgerr ...@@ -86,10 +86,11 @@ struct nlmsgerr
struct netlink_skb_parms struct netlink_skb_parms
{ {
struct ucred creds; /* Skb credentials */ struct ucred creds; /* Skb credentials */
pid_t pid; __u32 pid;
unsigned groups; __u32 groups;
pid_t dst_pid; __u32 dst_pid;
unsigned dst_groups; __u32 dst_groups;
kernel_cap_t eff_cap;
}; };
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
...@@ -102,10 +103,10 @@ extern int netlink_post(int unit, struct sk_buff *skb); ...@@ -102,10 +103,10 @@ extern int netlink_post(int unit, struct sk_buff *skb);
extern int init_netlink(void); extern int init_netlink(void);
extern struct sock *netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len)); extern struct sock *netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len));
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, pid_t pid, int nonblock); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, pid_t pid, extern void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
unsigned group, int allocation); __u32 group, int allocation);
extern void netlink_set_err(struct sock *ssk, pid_t pid, unsigned group, int code); extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
/* /*
* skb should fit one page. This choice is good for headerless malloc. * skb should fit one page. This choice is good for headerless malloc.
...@@ -125,28 +126,8 @@ struct netlink_callback ...@@ -125,28 +126,8 @@ struct netlink_callback
long args[4]; long args[4];
}; };
#if 0
void* nlmsg_broadcast(struct sock*, unsigned long type, int len, unsigned groups);
struct skb_buff *nlmsg_alloc(unsigned long type, int len,
unsigned long seq, unsigned long pid, int allocation);
void __nlmsg_transmit(struct sock*, int allocation);
extern __inline__ void nlmsg_release(struct sk_buff *skb)
{
atomic_dec(skb->users);
}
extern __inline__ void nlmsg_transmit(struct sk_buff *sk, int allocation)
{
if (sk->write_queue.qlen)
__nlmsg_transmit(sk, allocation);
}
#endif
extern __inline__ struct nlmsghdr * extern __inline__ struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, pid_t pid, u32 seq, int type, int len) __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
{ {
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len); int size = NLMSG_LENGTH(len);
......
...@@ -98,6 +98,7 @@ extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned lo ...@@ -98,6 +98,7 @@ extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned lo
#define NETDEV_CHANGEMTU 0x0007 #define NETDEV_CHANGEMTU 0x0007
#define NETDEV_CHANGEADDR 0x0008 #define NETDEV_CHANGEADDR 0x0008
#define NETDEV_GOING_DOWN 0x0009 #define NETDEV_GOING_DOWN 0x0009
#define NETDEV_CHANGENAME 0x000A
#define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN #define SYS_RESTART SYS_DOWN
......
...@@ -119,7 +119,7 @@ enum net_directory_inos { ...@@ -119,7 +119,7 @@ enum net_directory_inos {
PROC_NET_AX25_BPQETHER, PROC_NET_AX25_BPQETHER,
PROC_NET_IP_MASQ_APP, PROC_NET_IP_MASQ_APP,
PROC_NET_RT6, PROC_NET_RT6,
PROC_NET_RT6_TREE, PROC_NET_SNMP6,
PROC_NET_RT6_STATS, PROC_NET_RT6_STATS,
PROC_NET_NDISC, PROC_NET_NDISC,
PROC_NET_STRIP_STATUS, PROC_NET_STRIP_STATUS,
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <linux/netlink.h> #include <linux/netlink.h>
#define RTNL_DEBUG 1 #define RTNL_DEBUG 1
/* #define CONFIG_RTNL_OLD_IFINFO 1 */
/**** /****
...@@ -66,14 +65,14 @@ struct rtattr ...@@ -66,14 +65,14 @@ struct rtattr
#define RTA_ALIGNTO 4 #define RTA_ALIGNTO 4
#define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
#define RTA_OK(rta,len) ((rta)->rta_len >= sizeof(struct rtattr) && \ #define RTA_OK(rta,len) ((len) > 0 && (rta)->rta_len >= sizeof(struct rtattr) && \
(rta)->rta_len <= (len)) (rta)->rta_len <= (len))
#define RTA_NEXT(rta,attrlen) ((attrlen) -= RTA_ALIGN((rta)->rta_len), \ #define RTA_NEXT(rta,attrlen) ((attrlen) -= RTA_ALIGN((rta)->rta_len), \
(struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len))) (struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
#define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len))
#define RTA_SPACE(len) RTA_ALIGN(RTA_LENGTH(len)) #define RTA_SPACE(len) RTA_ALIGN(RTA_LENGTH(len))
#define RTA_DATA(rta) ((void*)(((char*)(rta)) + RTA_LENGTH(0))) #define RTA_DATA(rta) ((void*)(((char*)(rta)) + RTA_LENGTH(0)))
#define RTA_PAYLOAD(rta) ((rta)->rta_len - RTA_LENGTH(0)) #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0))
...@@ -91,18 +90,9 @@ struct rtmsg ...@@ -91,18 +90,9 @@ struct rtmsg
unsigned char rtm_table; /* Routing table id */ unsigned char rtm_table; /* Routing table id */
unsigned char rtm_protocol; /* Routing protocol; see below */ unsigned char rtm_protocol; /* Routing protocol; see below */
#ifdef CONFIG_RTNL_OLD_IFINFO
unsigned char rtm_nhs; /* Number of nexthops */
#else
unsigned char rtm_scope; /* See below */ unsigned char rtm_scope; /* See below */
#endif
unsigned char rtm_type; /* See below */ unsigned char rtm_type; /* See below */
#ifdef CONFIG_RTNL_OLD_IFINFO
unsigned short rtm_optlen; /* Byte length of rtm_opt */
unsigned char rtm_scope; /* See below */
unsigned char rtm_whatsit; /* Unused byte */
#endif
unsigned rtm_flags; unsigned rtm_flags;
}; };
...@@ -176,9 +166,6 @@ enum rt_scope_t ...@@ -176,9 +166,6 @@ enum rt_scope_t
#define RTM_F_NOTIFY 0x100 /* Notify user of route change */ #define RTM_F_NOTIFY 0x100 /* Notify user of route change */
#define RTM_F_CLONED 0x200 /* This route is cloned */ #define RTM_F_CLONED 0x200 /* This route is cloned */
#define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */ #define RTM_F_EQUALIZE 0x400 /* Multipath equalizer: NI */
#ifdef CONFIG_RTNL_OLD_IFINFO
#define RTM_F_NOPMTUDISC 0x800 /* Do not make PMTU discovery */
#endif
/* Reserved table identifiers */ /* Reserved table identifiers */
...@@ -206,17 +193,10 @@ enum rtattr_type_t ...@@ -206,17 +193,10 @@ enum rtattr_type_t
RTA_GATEWAY, RTA_GATEWAY,
RTA_PRIORITY, RTA_PRIORITY,
RTA_PREFSRC, RTA_PREFSRC,
#ifndef CONFIG_RTNL_OLD_IFINFO
RTA_METRICS, RTA_METRICS,
RTA_MULTIPATH, RTA_MULTIPATH,
RTA_PROTOINFO, RTA_PROTOINFO,
RTA_FLOW, RTA_FLOW,
#else
RTA_WINDOW,
RTA_RTT,
RTA_MTU,
RTA_IFNAME,
#endif
RTA_CACHEINFO RTA_CACHEINFO
}; };
...@@ -253,18 +233,12 @@ struct rtnexthop ...@@ -253,18 +233,12 @@ struct rtnexthop
#define RTNH_ALIGNTO 4 #define RTNH_ALIGNTO 4
#define RTNH_ALIGN(len) ( ((len)+RTNH_ALIGNTO-1) & ~(RTNH_ALIGNTO-1) ) #define RTNH_ALIGN(len) ( ((len)+RTNH_ALIGNTO-1) & ~(RTNH_ALIGNTO-1) )
#define RTNH_OK(rtnh,len) ((rtnh)->rtnh_len >= sizeof(struct rtnexthop) && \ #define RTNH_OK(rtnh,len) ((rtnh)->rtnh_len >= sizeof(struct rtnexthop) && \
(rtnh)->rtnh_len <= (len)) ((int)(rtnh)->rtnh_len) <= (len))
#define RTNH_NEXT(rtnh) ((struct rtnexthop*)(((char*)(rtnh)) + RTNH_ALIGN((rtnh)->rtnh_len))) #define RTNH_NEXT(rtnh) ((struct rtnexthop*)(((char*)(rtnh)) + RTNH_ALIGN((rtnh)->rtnh_len)))
#define RTNH_LENGTH(len) (RTNH_ALIGN(sizeof(struct rtnexthop)) + (len)) #define RTNH_LENGTH(len) (RTNH_ALIGN(sizeof(struct rtnexthop)) + (len))
#define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len)) #define RTNH_SPACE(len) RTNH_ALIGN(RTNH_LENGTH(len))
#define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0))) #define RTNH_DATA(rtnh) ((struct rtattr*)(((char*)(rtnh)) + RTNH_LENGTH(0)))
#ifdef CONFIG_RTNL_OLD_IFINFO
#define RTM_RTNH(r) ((struct rtnexthop*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct rtmsg)) \
+ NLMSG_ALIGN((r)->rtm_optlen)))
#define RTM_NHLEN(nlh,r) ((nlh)->nlmsg_len - NLMSG_SPACE(sizeof(struct rtmsg)) - NLMSG_ALIGN((r)->rtm_optlen))
#endif
/* RTM_CACHEINFO */ /* RTM_CACHEINFO */
struct rta_cacheinfo struct rta_cacheinfo
...@@ -424,35 +398,6 @@ struct rtgenmsg ...@@ -424,35 +398,6 @@ struct rtgenmsg
* on network protocol. * on network protocol.
*/ */
#ifdef CONFIG_RTNL_OLD_IFINFO
struct ifinfomsg
{
unsigned char ifi_family; /* Dummy */
unsigned char ifi_addrlen; /* Length of HW address */
unsigned short ifi_pad__;
int ifi_index; /* Link index */
int ifi_link; /* Physical device */
char ifi_name[IFNAMSIZ];
struct sockaddr ifi_address; /* HW address */
struct sockaddr ifi_broadcast; /* HW broadcast */
unsigned ifi_flags; /* IFF_* flags */
int ifi_mtu; /* Link mtu */
char ifi_qdiscname[IFNAMSIZ];/* Id of packet scheduler */
int ifi_qdisc; /* Packet scheduler handle */
};
enum
{
IFLA_UNSPEC,
IFLA_ADDRESS,
IFLA_BROADCAST,
IFLA_IFNAME,
IFLA_QDISC,
IFLA_STATS
};
#else
struct ifinfomsg struct ifinfomsg
{ {
unsigned char ifi_family; unsigned char ifi_family;
...@@ -475,8 +420,6 @@ enum ...@@ -475,8 +420,6 @@ enum
IFLA_STATS IFLA_STATS
}; };
#endif
#define IFLA_MAX IFLA_STATS #define IFLA_MAX IFLA_STATS
...@@ -588,7 +531,7 @@ struct rtnetlink_link ...@@ -588,7 +531,7 @@ struct rtnetlink_link
extern struct rtnetlink_link * rtnetlink_links[NPROTO]; extern struct rtnetlink_link * rtnetlink_links[NPROTO];
extern int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb); extern int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb);
extern int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo); extern int rtnetlink_send(struct sk_buff *skb, u32 pid, u32 group, int echo);
extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data); extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
......
...@@ -537,6 +537,19 @@ extern __inline__ struct sk_buff *dev_alloc_skb(unsigned int length) ...@@ -537,6 +537,19 @@ extern __inline__ struct sk_buff *dev_alloc_skb(unsigned int length)
return skb; return skb;
} }
extern __inline__ struct sk_buff *
skb_cow(struct sk_buff *skb, unsigned int headroom)
{
headroom = (headroom+15)&~15;
if ((unsigned)skb_headroom(skb) < headroom || skb_cloned(skb)) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
kfree_skb(skb);
skb = skb2;
}
return skb;
}
extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err); extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
extern int skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size); extern int skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
......
...@@ -88,20 +88,27 @@ struct cmsghdr { ...@@ -88,20 +88,27 @@ struct cmsghdr {
/* /*
* Get the next cmsg header * Get the next cmsg header
*
* PLEASE, do not touch this function. If you think, that it is
* incorrect, grep kernel sources and think about consequences
* before trying to improve it.
*
* Now it always returns valid, not truncated ancillary object
* HEADER. But caller still MUST check, that cmsg->cmsg_len is
* inside range, given by msg->msg_controllen before using
* ansillary object DATA. --ANK (980731)
*/ */
__KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, __KINLINE struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
struct cmsghdr *__cmsg) struct cmsghdr *__cmsg)
{ {
unsigned char * __ptr; struct cmsghdr * __ptr;
if (__cmsg->cmsg_len < sizeof(struct cmsghdr)) __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + CMSG_ALIGN(__cmsg->cmsg_len));
return NULL; if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
__ptr = ((unsigned char *) __cmsg) + CMSG_ALIGN(__cmsg->cmsg_len);
if (__ptr >= (unsigned char *) __ctl + __size)
return NULL; return NULL;
return (struct cmsghdr *) __ptr; return __ptr;
} }
__KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg) __KINLINE struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#define SIOCSIFMEM 0x8920 /* set memory address (BSD) */ #define SIOCSIFMEM 0x8920 /* set memory address (BSD) */
#define SIOCGIFMTU 0x8921 /* get MTU size */ #define SIOCGIFMTU 0x8921 /* get MTU size */
#define SIOCSIFMTU 0x8922 /* set MTU size */ #define SIOCSIFMTU 0x8922 /* set MTU size */
#define SIOCSIFNAME 0x8923 /* set interface name */
#define SIOCSIFHWADDR 0x8924 /* set hardware address */ #define SIOCSIFHWADDR 0x8924 /* set hardware address */
#define SIOCGIFENCAP 0x8925 /* get/set encapsulations */ #define SIOCGIFENCAP 0x8925 /* get/set encapsulations */
#define SIOCSIFENCAP 0x8926 #define SIOCSIFENCAP 0x8926
......
...@@ -35,7 +35,6 @@ struct dst_entry ...@@ -35,7 +35,6 @@ struct dst_entry
atomic_t use; /* client references */ atomic_t use; /* client references */
struct device *dev; struct device *dev;
int obsolete; int obsolete;
__u32 priority;
unsigned long lastuse; unsigned long lastuse;
unsigned mxlock; unsigned mxlock;
unsigned window; unsigned window;
......
...@@ -21,6 +21,10 @@ struct flowi { ...@@ -21,6 +21,10 @@ struct flowi {
struct in6_addr * saddr; struct in6_addr * saddr;
} ip6_u; } ip6_u;
} nl_u; } nl_u;
#define fl6_dst nl_u.ip6_u.daddr
#define fl6_src nl_u.ip6_u.saddr
#define fl4_dst nl_u.ip4_u.daddr
#define fl4_src nl_u.ip4_u.saddr
int oif; int oif;
......
...@@ -23,7 +23,8 @@ ...@@ -23,7 +23,8 @@
struct rt6_info; struct rt6_info;
struct fib6_node { struct fib6_node
{
struct fib6_node *parent; struct fib6_node *parent;
struct fib6_node *left; struct fib6_node *left;
struct fib6_node *right; struct fib6_node *right;
...@@ -43,12 +44,14 @@ struct fib6_node { ...@@ -43,12 +44,14 @@ struct fib6_node {
* *
*/ */
struct rt6key { struct rt6key
{
struct in6_addr addr; struct in6_addr addr;
int plen; int plen;
}; };
struct rt6_info { struct rt6_info
{
union { union {
struct dst_entry dst; struct dst_entry dst;
struct rt6_info *next; struct rt6_info *next;
...@@ -56,21 +59,16 @@ struct rt6_info { ...@@ -56,21 +59,16 @@ struct rt6_info {
#define rt6i_dev u.dst.dev #define rt6i_dev u.dst.dev
#define rt6i_nexthop u.dst.neighbour #define rt6i_nexthop u.dst.neighbour
#define rt6i_use u.dst.use
#define rt6i_ref u.dst.refcnt
#define rt6i_tstamp u.dst.lastuse
struct fib6_node *rt6i_node; struct fib6_node *rt6i_node;
struct in6_addr rt6i_gateway; struct in6_addr rt6i_gateway;
int rt6i_keylen;
u32 rt6i_flags; u32 rt6i_flags;
u32 rt6i_metric; u32 rt6i_metric;
u8 rt6i_hoplimit; u8 rt6i_hoplimit;
unsigned long rt6i_expires; unsigned long rt6i_expires;
atomic_t rt6i_ref;
union { union {
struct flow_rule *rt6iu_flowr; struct flow_rule *rt6iu_flowr;
...@@ -84,6 +82,33 @@ struct rt6_info { ...@@ -84,6 +82,33 @@ struct rt6_info {
struct rt6key rt6i_src; struct rt6key rt6i_src;
}; };
struct fib6_walker_t
{
struct fib6_walker_t *prev, *next;
struct fib6_node *root, *node;
struct rt6_info *leaf;
unsigned char state;
unsigned char prune;
int (*func)(struct fib6_walker_t *);
void *args;
};
extern struct fib6_walker_t fib6_walker_list;
extern __inline__ void fib6_walker_link(struct fib6_walker_t *w)
{
w->next = fib6_walker_list.next;
w->prev = &fib6_walker_list;
w->next->prev = w;
w->prev->next = w;
}
extern __inline__ void fib6_walker_unlink(struct fib6_walker_t *w)
{
w->next->prev = w->prev;
w->prev->next = w->next;
w->prev = w->next = w;
}
struct rt6_statistics { struct rt6_statistics {
__u32 fib_nodes; __u32 fib_nodes;
...@@ -97,8 +122,6 @@ struct rt6_statistics { ...@@ -97,8 +122,6 @@ struct rt6_statistics {
#define RTN_ROOT 0x0002 /* tree root node */ #define RTN_ROOT 0x0002 /* tree root node */
#define RTN_RTINFO 0x0004 /* node with valid routing info */ #define RTN_RTINFO 0x0004 /* node with valid routing info */
#define RTN_TAG 0x0100
/* /*
* priority levels (or metrics) * priority levels (or metrics)
* *
...@@ -128,11 +151,16 @@ extern struct fib6_node *fib6_lookup(struct fib6_node *root, ...@@ -128,11 +151,16 @@ extern struct fib6_node *fib6_lookup(struct fib6_node *root,
struct in6_addr *daddr, struct in6_addr *daddr,
struct in6_addr *saddr); struct in6_addr *saddr);
#define RT6_FILTER_RTNODES 1 struct fib6_node *fib6_locate(struct fib6_node *root,
struct in6_addr *daddr, int dst_len,
struct in6_addr *saddr, int src_len);
extern void fib6_clean_tree(struct fib6_node *root,
int (*func)(struct rt6_info *, void *arg),
int prune, void *arg);
extern void fib6_walk_tree(struct fib6_node *root, extern int fib6_walk(struct fib6_walker_t *w);
f_pnode func, void *arg, extern int fib6_walk_continue(struct fib6_walker_t *w);
int filter);
extern int fib6_add(struct fib6_node *root, extern int fib6_add(struct fib6_node *root,
struct rt6_info *rt); struct rt6_info *rt);
......
...@@ -12,23 +12,6 @@ ...@@ -12,23 +12,6 @@
#include <net/flow.h> #include <net/flow.h>
#include <net/ip6_fib.h> #include <net/ip6_fib.h>
/*
* Structure for assync processing of operations on the routing
* table
*/
struct rt6_req {
int operation;
struct rt6_info *ptr;
struct rt6_req *next;
struct rt6_req *prev;
#define RT_OPER_ADD 1
#define RT_OPER_DEL 2
};
struct pol_chain { struct pol_chain {
int type; int type;
int priority; int priority;
...@@ -53,8 +36,7 @@ extern void ip6_route_cleanup(void); ...@@ -53,8 +36,7 @@ extern void ip6_route_cleanup(void);
extern int ipv6_route_ioctl(unsigned int cmd, void *arg); extern int ipv6_route_ioctl(unsigned int cmd, void *arg);
extern struct rt6_info * ip6_route_add(struct in6_rtmsg *rtmsg, extern int ip6_route_add(struct in6_rtmsg *rtmsg);
int *err);
extern int ip6_del_rt(struct rt6_info *); extern int ip6_del_rt(struct rt6_info *);
extern int ip6_rt_addr_add(struct in6_addr *addr, extern int ip6_rt_addr_add(struct in6_addr *addr,
...@@ -85,15 +67,15 @@ extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr, ...@@ -85,15 +67,15 @@ extern struct rt6_info * rt6_add_dflt_router(struct in6_addr *gwaddr,
extern void rt6_purge_dflt_routers(int lst_resort); extern void rt6_purge_dflt_routers(int lst_resort);
extern struct rt6_info * rt6_redirect(struct in6_addr *dest, extern void rt6_redirect(struct in6_addr *dest,
struct in6_addr *saddr, struct in6_addr *saddr,
struct in6_addr *target, struct neighbour *neigh,
struct device *dev,
int on_link); int on_link);
extern void rt6_pmtu_discovery(struct in6_addr *addr, extern void rt6_pmtu_discovery(struct in6_addr *daddr,
struct in6_addr *saddr,
struct device *dev, struct device *dev,
int pmtu); u32 pmtu);
struct nlmsghdr; struct nlmsghdr;
struct netlink_callback; struct netlink_callback;
...@@ -103,22 +85,25 @@ extern int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a ...@@ -103,22 +85,25 @@ extern int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a
extern int inet6_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg); extern int inet6_rtm_getroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg);
extern void rt6_ifdown(struct device *dev); extern void rt6_ifdown(struct device *dev);
extern void rt6_mtu_change(struct device *dev, unsigned mtu);
/* /*
* Store a destination cache entry in a socket * Store a destination cache entry in a socket
* For UDP/RAW sockets this is done on udp_connect. * For UDP/RAW sockets this is done on udp_connect.
*/ */
extern __inline__ void ip6_dst_store(struct sock *sk, struct dst_entry *dst) extern __inline__ void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
struct in6_addr *daddr)
{ {
struct ipv6_pinfo *np; struct ipv6_pinfo *np;
struct rt6_info *rt; struct rt6_info *rt;
np = &sk->net_pinfo.af_inet6; np = &sk->net_pinfo.af_inet6;
dst_release(xchg(&sk->dst_cache,dst)); dst_release(xchg(&sk->dst_cache,dst));
rt = (struct rt6_info *) dst; rt = (struct rt6_info *) dst;
np->daddr_cache = daddr;
np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
} }
......
...@@ -212,7 +212,7 @@ extern int fib_semantic_match(int type, struct fib_info *, ...@@ -212,7 +212,7 @@ extern int fib_semantic_match(int type, struct fib_info *,
extern struct fib_info *fib_create_info(const struct rtmsg *r, struct kern_rta *rta, extern struct fib_info *fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
const struct nlmsghdr *, int *err); const struct nlmsghdr *, int *err);
extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *, struct kern_rta *rta, struct fib_info *fi); extern int fib_nh_match(struct rtmsg *r, struct nlmsghdr *, struct kern_rta *rta, struct fib_info *fi);
extern int fib_dump_info(struct sk_buff *skb, pid_t pid, u32 seq, int event, extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos, u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos,
struct fib_info *fi); struct fib_info *fi);
extern int fib_sync_down(u32 local, struct device *dev, int force); extern int fib_sync_down(u32 local, struct device *dev, int force);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Authors: * Authors:
* Pedro Roque <roque@di.fc.ul.pt> * Pedro Roque <roque@di.fc.ul.pt>
* *
* $Id: ipv6.h,v 1.12 1998/07/15 05:05:02 davem Exp $ * $Id: ipv6.h,v 1.13 1998/08/26 12:02:11 davem Exp $
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -86,53 +86,44 @@ struct frag_hdr { ...@@ -86,53 +86,44 @@ struct frag_hdr {
#include <net/sock.h> #include <net/sock.h>
extern struct ipv6_mib ipv6_statistics; extern struct ipv6_mib ipv6_statistics;
extern struct icmpv6_mib icmpv6_statistics;
extern struct udp_mib udp_stats_in6;
struct ipv6_frag { struct ip6_ra_chain
__u16 offset; {
__u16 len; struct ip6_ra_chain *next;
struct sk_buff *skb; struct sock *sk;
int sel;
struct frag_hdr *fhdr; void (*destructor)(struct sock *);
struct ipv6_frag *next;
}; };
extern struct ip6_ra_chain *ip6_ra_chain;
/* /*
* Equivalent of ipv4 struct ipq This structure is prepared by protocol, when parsing
ancillary data and passed to IPv6.
*/ */
struct frag_queue { struct ipv6_txoptions
{
/* Length of this structure */
int tot_len;
struct frag_queue *next; /* length of extension headers */
struct frag_queue *prev;
__u32 id; /* fragment id */ __u16 opt_flen; /* after fragment hdr */
struct in6_addr saddr; __u16 opt_nflen; /* before fragment hdr */
struct in6_addr daddr;
struct timer_list timer; /* expire timer */
struct ipv6_frag *fragments;
struct device *dev;
__u8 last_in; /* has last segment arrived? */
__u8 nexthdr;
__u8 *nhptr;
};
struct ipv6_tlvtype struct ipv6_opt_hdr *hopopt;
{ struct ipv6_opt_hdr *dst0opt;
u8 type; struct ipv6_rt_hdr *srcrt; /* Routing Header */
u8 len; struct ipv6_opt_hdr *auth;
}; struct ipv6_opt_hdr *dst1opt;
struct ip6_ra_chain /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
{
struct ip6_ra_chain *next;
struct sock *sk;
int sel;
void (*destructor)(struct sock *);
}; };
extern struct ip6_ra_chain *ip6_ra_chain;
extern int ip6_ra_control(struct sock *sk, int sel, extern int ip6_ra_control(struct sock *sk, int sel,
void (*destructor)(struct sock *)); void (*destructor)(struct sock *));
...@@ -140,18 +131,13 @@ extern int ip6_ra_control(struct sock *sk, int sel, ...@@ -140,18 +131,13 @@ extern int ip6_ra_control(struct sock *sk, int sel,
extern int ip6_call_ra_chain(struct sk_buff *skb, int sel); extern int ip6_call_ra_chain(struct sk_buff *skb, int sel);
extern int ip6_dstopt_unknown(struct sk_buff *skb, extern u8 * ipv6_reassembly(struct sk_buff **skb, u8 *nhptr);
struct ipv6_tlvtype *hdr);
extern int ipv6_routing_header(struct sk_buff **skb, extern u8 * ipv6_parse_hopopts(struct sk_buff *skb, u8 *nhptr);
struct device *dev,
__u8 *nhptr,
struct ipv6_options *opt);
extern int ipv6_reassembly(struct sk_buff **skb, extern u8 * ipv6_parse_exthdrs(struct sk_buff **skb, u8 *nhptr);
struct device *dev,
__u8 *nhptr, extern struct ipv6_txoptions * ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
struct ipv6_options *opt);
#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ #define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */
...@@ -226,7 +212,7 @@ extern int ipv6_rcv(struct sk_buff *skb, ...@@ -226,7 +212,7 @@ extern int ipv6_rcv(struct sk_buff *skb,
extern int ip6_xmit(struct sock *sk, extern int ip6_xmit(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
struct flowi *fl, struct flowi *fl,
struct ipv6_options *opt); struct ipv6_txoptions *opt);
extern int ip6_nd_hdr(struct sock *sk, extern int ip6_nd_hdr(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -240,7 +226,7 @@ extern int ip6_build_xmit(struct sock *sk, ...@@ -240,7 +226,7 @@ extern int ip6_build_xmit(struct sock *sk,
const void *data, const void *data,
struct flowi *fl, struct flowi *fl,
unsigned length, unsigned length,
struct ipv6_options *opt, struct ipv6_txoptions *opt,
int hlimit, int flags); int hlimit, int flags);
/* /*
...@@ -256,28 +242,27 @@ extern int ip6_mc_input(struct sk_buff *skb); ...@@ -256,28 +242,27 @@ extern int ip6_mc_input(struct sk_buff *skb);
* Extension header (options) processing * Extension header (options) processing
*/ */
extern int ipv6opt_bld_rthdr(struct sk_buff *skb, extern u8 * ipv6_build_nfrag_opts(struct sk_buff *skb,
struct ipv6_options *opt, u8 *prev_hdr,
struct in6_addr *addr, struct ipv6_txoptions *opt,
int proto); struct in6_addr *daddr,
u32 jumbolen);
extern int ipv6opt_srcrt_co(struct sockaddr_in6 *sin6, extern u8 * ipv6_build_frag_opts(struct sk_buff *skb,
int len, u8 *prev_hdr,
struct ipv6_options *opt); struct ipv6_txoptions *opt);
extern void ipv6_push_nfrag_opts(struct sk_buff *skb,
extern int ipv6opt_srcrt_cl(struct sockaddr_in6 *sin6, struct ipv6_txoptions *opt,
int num_addrs, u8 *proto,
struct ipv6_options *opt); struct in6_addr **daddr_p);
extern void ipv6_push_frag_opts(struct sk_buff *skb,
extern int ipv6opt_srt_tosin(struct ipv6_options *opt, struct ipv6_txoptions *opt,
struct sockaddr_in6 *sin6, u8 *proto);
int len);
extern u8 * ipv6_skip_exthdr(struct ipv6_opt_hdr *hdr,
extern void ipv6opt_free(struct ipv6_options *opt);
extern struct ipv6_opt_hdr * ipv6_skip_exthdr(struct ipv6_opt_hdr *hdr,
u8 *nexthdrp, int len); u8 *nexthdrp, int len);
extern struct ipv6_txoptions * ipv6_invert_rthdr(struct sock *sk,
struct ipv6_rt_hdr *hdr);
/* /*
......
...@@ -60,12 +60,7 @@ extern int ndisc_init(struct net_proto_family *ops); ...@@ -60,12 +60,7 @@ extern int ndisc_init(struct net_proto_family *ops);
extern void ndisc_cleanup(void); extern void ndisc_cleanup(void);
extern int ndisc_rcv(struct sk_buff *skb, extern int ndisc_rcv(struct sk_buff *skb, unsigned long len);
struct device *dev,
struct in6_addr *saddr,
struct in6_addr *daddr,
struct ipv6_options *opt,
unsigned short len);
extern void ndisc_send_ns(struct device *dev, extern void ndisc_send_ns(struct device *dev,
struct neighbour *neigh, struct neighbour *neigh,
......
...@@ -147,12 +147,12 @@ extern psched_time_t psched_time_base; ...@@ -147,12 +147,12 @@ extern psched_time_t psched_time_base;
#if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES #if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
#define PSCHED_WATCHER #define PSCHED_WATCHER unsigned long
extern unsigned long psched_time_mark; extern PSCHED_WATCHER psched_time_mark;
#if HZ == 100 #if HZ == 100
#define PSCHED_JSCALE 7 #define PSCHED_JSCALE 13
#elif HZ == 1024 #elif HZ == 1024
#define PSCHED_JSCALE 10 #define PSCHED_JSCALE 10
#else #else
...@@ -179,9 +179,9 @@ extern int psched_clock_scale; ...@@ -179,9 +179,9 @@ extern int psched_clock_scale;
#elif defined (__alpha__) #elif defined (__alpha__)
#define PSCHED_WATCHER #define PSCHED_WATCHER u32
extern u32 psched_time_mark; extern PSCHED_WATCHER psched_time_mark;
#define PSCHED_GET_TIME(stamp) \ #define PSCHED_GET_TIME(stamp) \
({ u32 __res; \ ({ u32 __res; \
......
...@@ -48,17 +48,13 @@ struct inet_protocol ...@@ -48,17 +48,13 @@ struct inet_protocol
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
struct inet6_protocol struct inet6_protocol
{ {
int (*handler)(struct sk_buff *skb, struct device *dev, int (*handler)(struct sk_buff *skb,
struct in6_addr *saddr, unsigned long len);
struct in6_addr *daddr,
struct ipv6_options *opt,
unsigned short len,
int redo, struct inet6_protocol *protocol);
void (*err_handler)(struct sk_buff *skb, int type, int code, unsigned char *buff, void (*err_handler)(struct sk_buff *skb, struct ipv6hdr *hdr,
__u32 info, struct in6_addr *saddr, struct inet6_skb_parm *opt,
struct in6_addr *daddr, int type, int code, unsigned char *buff,
struct inet6_protocol *protocol); __u32 info);
struct inet6_protocol *next; struct inet6_protocol *next;
unsigned char protocol; unsigned char protocol;
unsigned char copy:1; unsigned char copy:1;
......
...@@ -10,19 +10,17 @@ extern struct sock *raw_v6_htable[RAWV6_HTABLE_SIZE]; ...@@ -10,19 +10,17 @@ extern struct sock *raw_v6_htable[RAWV6_HTABLE_SIZE];
extern struct sock *raw_v6_lookup(struct sock *sk, unsigned short num, extern struct sock *raw_v6_lookup(struct sock *sk, unsigned short num,
struct in6_addr *loc_addr, struct in6_addr *rmt_addr); struct in6_addr *loc_addr, struct in6_addr *rmt_addr);
extern int rawv6_rcv(struct sk_buff *skb, extern int rawv6_rcv(struct sock *sk,
struct device *dev, struct sk_buff *skb,
struct in6_addr *saddr, unsigned long len);
struct in6_addr *daddr,
struct ipv6_options *opt,
unsigned short len);
extern void rawv6_err(struct sock *sk, extern void rawv6_err(struct sock *sk,
struct sk_buff *skb,
struct ipv6hdr *hdr,
struct inet6_skb_parm *opt,
int type, int code, int type, int code,
unsigned char *buff, unsigned char *buff, u32 info);
struct in6_addr *saddr,
struct in6_addr *daddr);
#endif #endif
......
...@@ -27,6 +27,11 @@ ...@@ -27,6 +27,11 @@
#include <net/dst.h> #include <net/dst.h>
#include <linux/in_route.h> #include <linux/in_route.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/route.h>
#ifndef __KERNEL__
#warning This file is not supposed to be used outside of kernel.
#endif
#define RT_HASH_DIVISOR 256 #define RT_HASH_DIVISOR 256
...@@ -36,7 +41,6 @@ ...@@ -36,7 +41,6 @@
*/ */
#define RT_CACHE_BUBBLE_THRESHOLD (5*HZ) #define RT_CACHE_BUBBLE_THRESHOLD (5*HZ)
#include <linux/route.h>
#define RTO_ONLINK 0x01 #define RTO_ONLINK 0x01
#define RTO_TPROXY 0x80000000 #define RTO_TPROXY 0x80000000
...@@ -87,7 +91,8 @@ struct rtable ...@@ -87,7 +91,8 @@ struct rtable
#endif #endif
}; };
#ifdef __KERNEL__ extern struct rtable *rt_hash_table[RT_HASH_DIVISOR];
extern void ip_rt_init(void); extern void ip_rt_init(void);
extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw, extern void ip_rt_redirect(u32 old_gw, u32 dst, u32 new_gw,
u32 src, u8 tos, struct device *dev); u32 src, u8 tos, struct device *dev);
...@@ -131,7 +136,4 @@ extern __inline__ int ip_route_connect(struct rtable **rp, u32 dst, u32 src, u32 ...@@ -131,7 +136,4 @@ extern __inline__ int ip_route_connect(struct rtable **rp, u32 dst, u32 src, u32
return ip_route_output(rp, dst, src, tos, oif); return ip_route_output(rp, dst, src, tos, oif);
} }
#endif
#endif /* _ROUTE_H */ #endif /* _ROUTE_H */
...@@ -52,11 +52,14 @@ struct ipv6_mib ...@@ -52,11 +52,14 @@ struct ipv6_mib
{ {
unsigned long Ip6InReceives; unsigned long Ip6InReceives;
unsigned long Ip6InHdrErrors; unsigned long Ip6InHdrErrors;
unsigned long Ip6InTooBigErrors;
unsigned long Ip6InNoRoutes;
unsigned long Ip6InAddrErrors; unsigned long Ip6InAddrErrors;
unsigned long Ip6ForwDatagrams;
unsigned long Ip6InUnknownProtos; unsigned long Ip6InUnknownProtos;
unsigned long Ip6InTruncatedPkts;
unsigned long Ip6InDiscards; unsigned long Ip6InDiscards;
unsigned long Ip6InDelivers; unsigned long Ip6InDelivers;
unsigned long Ip6OutForwDatagrams;
unsigned long Ip6OutRequests; unsigned long Ip6OutRequests;
unsigned long Ip6OutDiscards; unsigned long Ip6OutDiscards;
unsigned long Ip6OutNoRoutes; unsigned long Ip6OutNoRoutes;
...@@ -67,6 +70,8 @@ struct ipv6_mib ...@@ -67,6 +70,8 @@ struct ipv6_mib
unsigned long Ip6FragOKs; unsigned long Ip6FragOKs;
unsigned long Ip6FragFails; unsigned long Ip6FragFails;
unsigned long Ip6FragCreates; unsigned long Ip6FragCreates;
unsigned long Ip6InMcastPkts;
unsigned long Ip6OutMcastPkts;
}; };
struct icmp_mib struct icmp_mib
...@@ -98,6 +103,43 @@ struct icmp_mib ...@@ -98,6 +103,43 @@ struct icmp_mib
unsigned long IcmpOutAddrMasks; unsigned long IcmpOutAddrMasks;
unsigned long IcmpOutAddrMaskReps; unsigned long IcmpOutAddrMaskReps;
}; };
struct icmpv6_mib
{
unsigned long Icmp6InMsgs;
unsigned long Icmp6InErrors;
unsigned long Icmp6InDestUnreachs;
unsigned long Icmp6InPktTooBigs;
unsigned long Icmp6InTimeExcds;
unsigned long Icmp6InParmProblems;
unsigned long Icmp6InEchos;
unsigned long Icmp6InEchoReplies;
unsigned long Icmp6InGroupMembQueries;
unsigned long Icmp6InGroupMembResponses;
unsigned long Icmp6InGroupMembReductions;
unsigned long Icmp6InRouterSolicits;
unsigned long Icmp6InRouterAdvertisements;
unsigned long Icmp6InNeighborSolicits;
unsigned long Icmp6InNeighborAdvertisements;
unsigned long Icmp6InRedirects;
unsigned long Icmp6OutMsgs;
unsigned long Icmp6OutDestUnreachs;
unsigned long Icmp6OutPktTooBigs;
unsigned long Icmp6OutTimeExcds;
unsigned long Icmp6OutParmProblems;
unsigned long Icmp6OutEchoReplies;
unsigned long Icmp6OutRouterSolicits;
unsigned long Icmp6OutNeighborSolicits;
unsigned long Icmp6OutNeighborAdvertisements;
unsigned long Icmp6OutRedirects;
unsigned long Icmp6OutGroupMembResponses;
unsigned long Icmp6OutGroupMembReductions;
};
struct tcp_mib struct tcp_mib
{ {
...@@ -131,6 +173,9 @@ struct linux_mib ...@@ -131,6 +173,9 @@ struct linux_mib
unsigned long SyncookiesRecv; unsigned long SyncookiesRecv;
unsigned long SyncookiesFailed; unsigned long SyncookiesFailed;
unsigned long EmbryonicRsts; unsigned long EmbryonicRsts;
unsigned long PruneCalled;
unsigned long RcvPruned;
unsigned long OfoPruned;
}; };
#endif #endif
...@@ -87,6 +87,8 @@ ...@@ -87,6 +87,8 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#define MIN_WRITE_SPACE 2048
/* The AF_UNIX specific socket options */ /* The AF_UNIX specific socket options */
struct unix_opt { struct unix_opt {
int family; int family;
...@@ -134,6 +136,7 @@ struct ipv6_pinfo { ...@@ -134,6 +136,7 @@ struct ipv6_pinfo {
struct in6_addr saddr; struct in6_addr saddr;
struct in6_addr rcv_saddr; struct in6_addr rcv_saddr;
struct in6_addr daddr; struct in6_addr daddr;
struct in6_addr *daddr_cache;
__u32 flow_lbl; __u32 flow_lbl;
int hop_limit; int hop_limit;
...@@ -141,21 +144,28 @@ struct ipv6_pinfo { ...@@ -141,21 +144,28 @@ struct ipv6_pinfo {
int mcast_oif; int mcast_oif;
__u8 priority; __u8 priority;
/* pktoption flags */
/* sockopt flags */ union {
struct {
__u8 recvsrcrt:1, __u8 srcrt:2,
rxinfo:1, rxinfo:1,
rxhlim:1, rxhlim:1,
hopopts:1, hopopts:1,
dstopts:1, dstopts:1,
mc_loop:1, authhdr:1,
unused:2; unused:1;
} bits;
__u8 all;
} rxopt;
/* sockopt flags */
__u8 mc_loop:1;
struct ipv6_mc_socklist *ipv6_mc_list; struct ipv6_mc_socklist *ipv6_mc_list;
__u32 dst_cookie; __u32 dst_cookie;
struct ipv6_options *opt; struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
}; };
struct raw6_opt { struct raw6_opt {
...@@ -207,6 +217,10 @@ struct tcp_opt { ...@@ -207,6 +217,10 @@ struct tcp_opt {
__u32 snd_wl2; /* Ack sequence for update */ __u32 snd_wl2; /* Ack sequence for update */
__u32 snd_wnd; /* The window we expect to receive */ __u32 snd_wnd; /* The window we expect to receive */
__u32 max_window; __u32 max_window;
__u32 pmtu_cookie; /* Last pmtu seen by socket */
__u16 mss_cache; /* Cached effective mss, not including SACKS */
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
__u16 ext_header_len; /* Dave, do you allow mw to use this hole? 8) --ANK */
__u8 pending; /* pending events */ __u8 pending; /* pending events */
__u8 retransmits; __u8 retransmits;
__u32 last_ack_sent; /* last ack we sent */ __u32 last_ack_sent; /* last ack we sent */
...@@ -226,6 +240,7 @@ struct tcp_opt { ...@@ -226,6 +240,7 @@ struct tcp_opt {
__u32 snd_ssthresh; /* Slow start size threshold */ __u32 snd_ssthresh; /* Slow start size threshold */
__u8 dup_acks; /* Consequetive duplicate acks seen from other end */ __u8 dup_acks; /* Consequetive duplicate acks seen from other end */
__u8 delayed_acks; __u8 delayed_acks;
__u16 user_mss; /* mss requested by user in ioctl */
/* Two commonly used timers in both sender and receiver paths. */ /* Two commonly used timers in both sender and receiver paths. */
struct timer_list retransmit_timer; /* Resend (no ack) */ struct timer_list retransmit_timer; /* Resend (no ack) */
...@@ -252,7 +267,6 @@ struct tcp_opt { ...@@ -252,7 +267,6 @@ struct tcp_opt {
wscale_ok, /* Wscale seen on SYN packet */ wscale_ok, /* Wscale seen on SYN packet */
sack_ok; /* SACK seen on SYN packet */ sack_ok; /* SACK seen on SYN packet */
char saw_tstamp; /* Saw TIMESTAMP on last packet */ char saw_tstamp; /* Saw TIMESTAMP on last packet */
__u16 in_mss; /* MSS option received from sender */
__u8 snd_wscale; /* Window scaling received from sender */ __u8 snd_wscale; /* Window scaling received from sender */
__u8 rcv_wscale; /* Window scaling to send to receiver */ __u8 rcv_wscale; /* Window scaling to send to receiver */
__u32 rcv_tsval; /* Time stamp value */ __u32 rcv_tsval; /* Time stamp value */
...@@ -270,6 +284,9 @@ struct tcp_opt { ...@@ -270,6 +284,9 @@ struct tcp_opt {
__u32 urg_seq; __u32 urg_seq;
__u32 urg_data; __u32 urg_data;
__u32 last_seg_size; /* Size of last incoming segment */
__u32 rcv_mss; /* MSS used for delayed ACK decisions */
struct open_request *syn_wait_queue; struct open_request *syn_wait_queue;
struct open_request **syn_wait_last; struct open_request **syn_wait_last;
...@@ -390,12 +407,6 @@ struct sock { ...@@ -390,12 +407,6 @@ struct sock {
struct proto *prot; struct proto *prot;
/* mss is min(mtu, max_window)
* XXX Fix this, mtu only used in one TCP place and that is it -DaveM
*/
unsigned short mtu; /* mss negotiated in the syn's */
unsigned short mss; /* current eff. mss - can change */
unsigned short user_mss; /* mss requested by user in ioctl */
unsigned short shutdown; unsigned short shutdown;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
...@@ -868,6 +879,26 @@ extern __inline__ int sock_error(struct sock *sk) ...@@ -868,6 +879,26 @@ extern __inline__ int sock_error(struct sock *sk)
return -err; return -err;
} }
extern __inline__ unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
if (!(sk->shutdown & SEND_SHUTDOWN)) {
amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
if (amt < 0)
amt = 0;
}
return amt;
}
/*
* Default write policy as shown to user space via poll/select/SIGIO
* Kernel internally doesn't use the MIN_WRITE_SPACE threshold.
*/
extern __inline__ int sock_writeable(struct sock *sk)
{
return sock_wspace(sk) >= MIN_WRITE_SPACE;
}
/* /*
* Declarations from timer.c * Declarations from timer.c
......
...@@ -78,6 +78,7 @@ struct tcp_bind_bucket { ...@@ -78,6 +78,7 @@ struct tcp_bind_bucket {
unsigned short flags; unsigned short flags;
#define TCPB_FLAG_LOCKED 0x0001 #define TCPB_FLAG_LOCKED 0x0001
#define TCPB_FLAG_FASTREUSE 0x0002 #define TCPB_FLAG_FASTREUSE 0x0002
#define TCPB_FLAG_GOODSOCKNUM 0x0004
struct tcp_bind_bucket *next; struct tcp_bind_bucket *next;
struct sock *owners; struct sock *owners;
...@@ -230,11 +231,8 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) ...@@ -230,11 +231,8 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
return tcp_lhashfn(sk->num); return tcp_lhashfn(sk->num);
} }
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /* Note, that it is > than ipv6 header */
#define NETHDR_SIZE sizeof(struct ipv6hdr) #define NETHDR_SIZE (sizeof(struct iphdr) + 40)
#else
#define NETHDR_SIZE sizeof(struct iphdr) + 40
#endif
/* /*
* 40 is maximal IP options size * 40 is maximal IP options size
...@@ -257,7 +255,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) ...@@ -257,7 +255,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
#define MIN_WINDOW 2048 #define MIN_WINDOW 2048
#define MAX_ACK_BACKLOG 2 #define MAX_ACK_BACKLOG 2
#define MAX_DELAY_ACK 2 #define MAX_DELAY_ACK 2
#define MIN_WRITE_SPACE 2048
#define TCP_WINDOW_DIFF 2048 #define TCP_WINDOW_DIFF 2048
/* urg_data states */ /* urg_data states */
...@@ -354,7 +351,7 @@ struct tcp_v4_open_req { ...@@ -354,7 +351,7 @@ struct tcp_v4_open_req {
struct tcp_v6_open_req { struct tcp_v6_open_req {
struct in6_addr loc_addr; struct in6_addr loc_addr;
struct in6_addr rmt_addr; struct in6_addr rmt_addr;
struct ipv6_options *opt; struct sk_buff *pktopts;
int iif; int iif;
}; };
#endif #endif
...@@ -400,6 +397,13 @@ extern kmem_cache_t *tcp_openreq_cachep; ...@@ -400,6 +397,13 @@ extern kmem_cache_t *tcp_openreq_cachep;
/* /*
* Pointers to address related TCP functions * Pointers to address related TCP functions
* (i.e. things that depend on the address family) * (i.e. things that depend on the address family)
*
* BUGGG_FUTURE: all the idea behind this struct is wrong.
* It mixes socket frontend with transport function.
* With port sharing between IPv6/v4 it gives the only advantage,
* only poor IPv6 needs to permanently recheck, that it
* is still IPv6 8)8) It must be cleaned up as soon as possible.
* --ANK (980802)
*/ */
struct tcp_func { struct tcp_func {
...@@ -414,7 +418,7 @@ struct tcp_func { ...@@ -414,7 +418,7 @@ struct tcp_func {
int (*conn_request) (struct sock *sk, int (*conn_request) (struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
void *opt, __u32 isn); __u32 isn);
struct sock * (*syn_recv_sock) (struct sock *sk, struct sock * (*syn_recv_sock) (struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -424,6 +428,10 @@ struct tcp_func { ...@@ -424,6 +428,10 @@ struct tcp_func {
struct sock * (*get_sock) (struct sk_buff *skb, struct sock * (*get_sock) (struct sk_buff *skb,
struct tcphdr *th); struct tcphdr *th);
__u16 net_header_len;
int (*setsockopt) (struct sock *sk, int (*setsockopt) (struct sock *sk,
int level, int level,
int optname, int optname,
...@@ -490,22 +498,24 @@ extern int tcp_ioctl(struct sock *sk, ...@@ -490,22 +498,24 @@ extern int tcp_ioctl(struct sock *sk,
extern int tcp_rcv_state_process(struct sock *sk, extern int tcp_rcv_state_process(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
struct tcphdr *th, struct tcphdr *th,
void *opt, __u16 len); unsigned len);
extern int tcp_rcv_established(struct sock *sk, extern int tcp_rcv_established(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
struct tcphdr *th, struct tcphdr *th,
__u16 len); unsigned len);
extern int tcp_timewait_state_process(struct tcp_tw_bucket *tw, extern int tcp_timewait_state_process(struct tcp_tw_bucket *tw,
struct sk_buff *skb, struct sk_buff *skb,
struct tcphdr *th, struct tcphdr *th,
void *opt, __u16 len); unsigned len);
extern void tcp_close(struct sock *sk, extern void tcp_close(struct sock *sk,
unsigned long timeout); unsigned long timeout);
extern struct sock * tcp_accept(struct sock *sk, int flags); extern struct sock * tcp_accept(struct sock *sk, int flags);
extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern void tcp_write_space(struct sock *sk);
extern int tcp_getsockopt(struct sock *sk, int level, extern int tcp_getsockopt(struct sock *sk, int level,
int optname, char *optval, int optname, char *optval,
int *optlen); int *optlen);
...@@ -536,12 +546,11 @@ extern void tcp_v4_send_check(struct sock *sk, ...@@ -536,12 +546,11 @@ extern void tcp_v4_send_check(struct sock *sk,
extern int tcp_v4_conn_request(struct sock *sk, extern int tcp_v4_conn_request(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
void *ptr, __u32 isn); __u32 isn);
extern struct sock * tcp_create_openreq_child(struct sock *sk, extern struct sock * tcp_create_openreq_child(struct sock *sk,
struct open_request *req, struct open_request *req,
struct sk_buff *skb, struct sk_buff *skb);
int mss);
extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
...@@ -628,30 +637,25 @@ struct tcp_sl_timer { ...@@ -628,30 +637,25 @@ struct tcp_sl_timer {
extern struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX]; extern struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX];
extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
/* Compute the current effective MSS, taking SACKs and IP options, /* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account. * and even PMTU discovery events into account.
*/ */
static __inline__ unsigned int tcp_current_mss(struct sock *sk) static __inline__ unsigned int tcp_current_mss(struct sock *sk)
{ {
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp; struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
struct dst_entry *dst = sk->dst_cache; struct dst_entry *dst = sk->dst_cache;
unsigned int mss_now = sk->mss; int mss_now = tp->mss_cache;
if(dst && (sk->mtu < dst->pmtu)) { if (dst && dst->pmtu != tp->pmtu_cookie)
unsigned int mss_distance = (sk->mtu - sk->mss); mss_now = tcp_sync_mss(sk, dst->pmtu);
/* PMTU discovery event has occurred. */
sk->mtu = dst->pmtu;
mss_now = sk->mss = sk->mtu - mss_distance;
}
if(tp->sack_ok && tp->num_sacks) if(tp->sack_ok && tp->num_sacks)
mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
(tp->num_sacks * TCPOLEN_SACK_PERBLOCK)); (tp->num_sacks * TCPOLEN_SACK_PERBLOCK));
if(sk->opt) return mss_now > 8 ? mss_now : 8;
mss_now -= sk->opt->optlen;
return mss_now;
} }
/* Compute the actual receive window we are currently advertising. /* Compute the actual receive window we are currently advertising.
...@@ -715,7 +719,12 @@ extern __inline__ int tcp_raise_window(struct sock *sk) ...@@ -715,7 +719,12 @@ extern __inline__ int tcp_raise_window(struct sock *sk)
* skbuff.h:skbuff->cb[xxx] size appropriately. * skbuff.h:skbuff->cb[xxx] size appropriately.
*/ */
struct tcp_skb_cb { struct tcp_skb_cb {
struct inet_skb_parm header; /* For incoming frames */ union {
struct inet_skb_parm h4;
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
struct inet6_skb_parm h6;
#endif
} header; /* For incoming frames */
__u32 seq; /* Starting sequence number */ __u32 seq; /* Starting sequence number */
__u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */
unsigned long when; /* used to compute rtt's */ unsigned long when; /* used to compute rtt's */
...@@ -787,7 +796,7 @@ static __inline__ int tcp_snd_test(struct sock *sk, struct sk_buff *skb) ...@@ -787,7 +796,7 @@ static __inline__ int tcp_snd_test(struct sock *sk, struct sk_buff *skb)
* *
* Don't use the nagle rule for urgent data. * Don't use the nagle rule for urgent data.
*/ */
if (!sk->nonagle && skb->len < (sk->mss >> 1) && tp->packets_out && if (!sk->nonagle && skb->len < (tp->mss_cache >> 1) && tp->packets_out &&
!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG)) !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG))
nagle_check = 0; nagle_check = 0;
...@@ -913,8 +922,6 @@ extern __inline__ void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sa ...@@ -913,8 +922,6 @@ extern __inline__ void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sa
* SACKs don't matter, we never delay an ACK when we * SACKs don't matter, we never delay an ACK when we
* have any of those going out. * have any of those going out.
*/ */
if(ts)
mss += TCPOLEN_TSTAMP_ALIGNED;
*ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss); *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
if (ts) { if (ts) {
if(sack) if(sack)
......
...@@ -28,7 +28,7 @@ extern int datagram_recv_ctl(struct sock *sk, ...@@ -28,7 +28,7 @@ extern int datagram_recv_ctl(struct sock *sk,
extern int datagram_send_ctl(struct msghdr *msg, extern int datagram_send_ctl(struct msghdr *msg,
int *oif, int *oif,
struct in6_addr **src_addr, struct in6_addr **src_addr,
struct ipv6_options *opt, struct ipv6_txoptions *opt,
int *hlimit); int *hlimit);
#define LOOPBACK4_IPV6 __constant_htonl(0x7f000006) #define LOOPBACK4_IPV6 __constant_htonl(0x7f000006)
...@@ -38,6 +38,8 @@ extern int datagram_send_ctl(struct msghdr *msg, ...@@ -38,6 +38,8 @@ extern int datagram_send_ctl(struct msghdr *msg,
*/ */
extern struct tcp_func ipv4_specific; extern struct tcp_func ipv4_specific;
extern int inet6_destroy_sock(struct sock *sk);
#endif #endif
#endif #endif
...@@ -1129,7 +1129,6 @@ asmlinkage void __init start_kernel(void) ...@@ -1129,7 +1129,6 @@ asmlinkage void __init start_kernel(void)
mtrr_init (); mtrr_init ();
#endif #endif
sock_init();
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
sysctl_init(); sysctl_init();
#endif #endif
...@@ -1200,6 +1199,9 @@ static int init(void * unused) ...@@ -1200,6 +1199,9 @@ static int init(void * unused)
int real_root_mountflags; int real_root_mountflags;
#endif #endif
/* Networking initialization needs a process context */
sock_init();
/* Launch bdflush from here, instead of the old syscall way. */ /* Launch bdflush from here, instead of the old syscall way. */
kernel_thread(bdflush, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND); kernel_thread(bdflush, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
/* Start the background pageout daemon. */ /* Start the background pageout daemon. */
......
...@@ -17,7 +17,9 @@ bool 'TCP/IP networking' CONFIG_INET ...@@ -17,7 +17,9 @@ bool 'TCP/IP networking' CONFIG_INET
if [ "$CONFIG_INET" = "y" ]; then if [ "$CONFIG_INET" = "y" ]; then
source net/ipv4/Config.in source net/ipv4/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'The IPv6 protocol (EXPERIMENTAL)' CONFIG_IPV6 # Sorry, but IPv6 as module is still invalid.
# tristate 'The IPv6 protocol (EXPERIMENTAL)' CONFIG_IPV6
bool 'The IPv6 protocol (EXPERIMENTAL)' CONFIG_IPV6
if [ "$CONFIG_IPV6" != "n" ]; then if [ "$CONFIG_IPV6" != "n" ]; then
source net/ipv6/Config.in source net/ipv6/Config.in
fi fi
......
...@@ -1017,7 +1017,6 @@ static int atalk_create(struct socket *sock, int protocol) ...@@ -1017,7 +1017,6 @@ static int atalk_create(struct socket *sock, int protocol)
sk->destruct = NULL; sk->destruct = NULL;
/* Checksums on by default */ /* Checksums on by default */
sk->mtu = DDP_MAXSZ;
sk->zapped = 1; sk->zapped = 1;
return (0); return (0);
......
...@@ -849,7 +849,6 @@ int ax25_create(struct socket *sock, int protocol) ...@@ -849,7 +849,6 @@ int ax25_create(struct socket *sock, int protocol)
sk->destruct = ax25_free_sock; sk->destruct = ax25_free_sock;
sock->ops = &ax25_proto_ops; sock->ops = &ax25_proto_ops;
sk->protocol = protocol; sk->protocol = protocol;
sk->mtu = AX25_MTU; /* 256 */
ax25->sk = sk; ax25->sk = sk;
sk->protinfo.ax25 = ax25; sk->protinfo.ax25 = ax25;
...@@ -892,7 +891,6 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) ...@@ -892,7 +891,6 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
sk->sndbuf = osk->sndbuf; sk->sndbuf = osk->sndbuf;
sk->debug = osk->debug; sk->debug = osk->debug;
sk->state = TCP_ESTABLISHED; sk->state = TCP_ESTABLISHED;
sk->mtu = osk->mtu;
sk->sleep = osk->sleep; sk->sleep = osk->sleep;
sk->zapped = osk->zapped; sk->zapped = osk->zapped;
......
...@@ -54,15 +54,16 @@ ...@@ -54,15 +54,16 @@
static inline void wait_for_packet(struct sock * sk) static inline void wait_for_packet(struct sock * sk)
{ {
unsigned long flags; struct wait_queue wait = { current, NULL };
add_wait_queue(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
release_sock(sk);
save_flags(flags);
cli();
if (skb_peek(&sk->receive_queue) == NULL) if (skb_peek(&sk->receive_queue) == NULL)
interruptible_sleep_on(sk->sleep); schedule();
restore_flags(flags);
lock_sock(sk); current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
} }
/* /*
...@@ -84,6 +85,14 @@ static inline int connection_based(struct sock *sk) ...@@ -84,6 +85,14 @@ static inline int connection_based(struct sock *sk)
* This function will lock the socket if a skb is returned, so the caller * This function will lock the socket if a skb is returned, so the caller
* needs to unlock the socket in that case (usually by calling skb_free_datagram) * needs to unlock the socket in that case (usually by calling skb_free_datagram)
* *
* * It does not lock socket since today. This function is
* * free of race conditions. This measure should/can improve
* * significantly datagram socket latencies at high loads,
* * when data copying to user space takes lots of time.
* * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
* * 8) Great win.)
* * --ANK (980729)
*
* The order of the tests when we find no data waiting are specified * The order of the tests when we find no data waiting are specified
* quite explicitly by POSIX 1003.1g, don't change them without having * quite explicitly by POSIX 1003.1g, don't change them without having
* the standard around please. * the standard around please.
...@@ -94,7 +103,6 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, ...@@ -94,7 +103,6 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int error; int error;
struct sk_buff *skb; struct sk_buff *skb;
lock_sock(sk);
restart: restart:
while(skb_queue_empty(&sk->receive_queue)) /* No data */ while(skb_queue_empty(&sk->receive_queue)) /* No data */
{ {
...@@ -129,13 +137,24 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, ...@@ -129,13 +137,24 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
will suddenly eat the receive_queue */ will suddenly eat the receive_queue */
if (flags & MSG_PEEK) if (flags & MSG_PEEK)
{ {
unsigned long flags; unsigned long cpu_flags;
save_flags(flags);
cli(); /* It is the only POTENTIAL race condition
in this function. skb may be stolen by
another receiver after peek, but before
incrementing use count, provided kernel
is reentearble (it is not) or this function
is called by interrupts.
Protect it with global skb spinlock,
though for now even this is overkill.
--ANK (980728)
*/
spin_lock_irqsave(&skb_queue_lock, cpu_flags);
skb = skb_peek(&sk->receive_queue); skb = skb_peek(&sk->receive_queue);
if(skb!=NULL) if(skb!=NULL)
atomic_inc(&skb->users); atomic_inc(&skb->users);
restore_flags(flags); spin_unlock_irqrestore(&skb_queue_lock, cpu_flags);
} else } else
skb = skb_dequeue(&sk->receive_queue); skb = skb_dequeue(&sk->receive_queue);
...@@ -144,7 +163,6 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, ...@@ -144,7 +163,6 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
return skb; return skb;
no_packet: no_packet:
release_sock(sk);
*err = error; *err = error;
return NULL; return NULL;
} }
...@@ -152,7 +170,6 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, ...@@ -152,7 +170,6 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
void skb_free_datagram(struct sock * sk, struct sk_buff *skb) void skb_free_datagram(struct sock * sk, struct sk_buff *skb)
{ {
kfree_skb(skb); kfree_skb(skb);
release_sock(sk);
} }
/* /*
...@@ -184,6 +201,10 @@ int skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to, ...@@ -184,6 +201,10 @@ int skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to,
* Datagram poll: Again totally generic. This also handles * Datagram poll: Again totally generic. This also handles
* sequenced packet sockets providing the socket receive queue * sequenced packet sockets providing the socket receive queue
* is only ever holding data ready to receive. * is only ever holding data ready to receive.
*
* Note: when you _don't_ use this routine for this protocol,
* and you use a different write policy from sock_writeable()
* then please supply your own write_space callback.
*/ */
unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *wait) unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *wait)
...@@ -199,7 +220,7 @@ unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table * ...@@ -199,7 +220,7 @@ unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *
mask |= POLLERR; mask |= POLLERR;
if (sk->shutdown & RCV_SHUTDOWN) if (sk->shutdown & RCV_SHUTDOWN)
mask |= POLLHUP; mask |= POLLHUP;
/* readable? */ /* readable? */
if (!skb_queue_empty(&sk->receive_queue)) if (!skb_queue_empty(&sk->receive_queue))
mask |= POLLIN | POLLRDNORM; mask |= POLLIN | POLLRDNORM;
...@@ -214,15 +235,8 @@ unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table * ...@@ -214,15 +235,8 @@ unsigned int datagram_poll(struct file * file, struct socket *sock, poll_table *
} }
/* writable? */ /* writable? */
if (!(sk->shutdown & SEND_SHUTDOWN)) { if (sock_writeable(sk))
if (sk->prot) { mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
if (sock_wspace(sk) >= MIN_WRITE_SPACE)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
} else {
if (sk->sndbuf - atomic_read(&sk->wmem_alloc) >= MIN_WRITE_SPACE)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
}
}
return mask; return mask;
} }
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* Alan Cox <gw4pts@gw4pts.ampr.org> * Alan Cox <gw4pts@gw4pts.ampr.org>
* David Hinds <dhinds@allegro.stanford.edu> * David Hinds <dhinds@allegro.stanford.edu>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
* Adam Sulmicki <adam@cfar.umd.edu>
* *
* Changes: * Changes:
* Alan Cox : device private ioctl copies fields back. * Alan Cox : device private ioctl copies fields back.
...@@ -51,7 +52,10 @@ ...@@ -51,7 +52,10 @@
* Andi Kleen : Fix error reporting for SIOCGIFCONF * Andi Kleen : Fix error reporting for SIOCGIFCONF
* Michael Chastain : Fix signed/unsigned for SIOCGIFCONF * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
* Cyrus Durgin : Cleaned for KMOD * Cyrus Durgin : Cleaned for KMOD
* * Adam Sulmicki : Bug Fix : Network Device Unload
* A network device unload needs to purge
* the backlog queue.
* Paul Rusty Russel : SIOCSIFNAME
*/ */
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -154,6 +158,8 @@ int netdev_fastroute_obstacles; ...@@ -154,6 +158,8 @@ int netdev_fastroute_obstacles;
struct net_fastroute_stats dev_fastroute_stat; struct net_fastroute_stats dev_fastroute_stat;
#endif #endif
static void dev_clear_backlog(struct device *dev);
/****************************************************************************************** /******************************************************************************************
...@@ -171,6 +177,16 @@ int netdev_nit=0; ...@@ -171,6 +177,16 @@ int netdev_nit=0;
* Add a protocol ID to the list. Now that the input handler is * Add a protocol ID to the list. Now that the input handler is
* smarter we can dispense with all the messy stuff that used to be * smarter we can dispense with all the messy stuff that used to be
* here. * here.
*
* BEWARE!!! Protocol handlers, mangling input packets,
* MUST BE last in hash buckets and checking protocol handlers
* MUST start from promiscous ptype_all chain in net_bh.
* It is true now, do not change it.
* Explantion follows: if protocol handler, mangling packet, will
* be the first on list, it is not able to sense, that packet
* is cloned and should be copied-on-write, so that it will
* change it and subsequent readers will get broken packet.
* --ANK (980803)
*/ */
void dev_add_pack(struct packet_type *pt) void dev_add_pack(struct packet_type *pt)
...@@ -448,7 +464,8 @@ int dev_close(struct device *dev) ...@@ -448,7 +464,8 @@ int dev_close(struct device *dev)
/* /*
* Device is now down. * Device is now down.
*/ */
dev_clear_backlog(dev);
dev->flags&=~(IFF_UP|IFF_RUNNING); dev->flags&=~(IFF_UP|IFF_RUNNING);
#ifdef CONFIG_NET_FASTROUTE #ifdef CONFIG_NET_FASTROUTE
dev_clear_fastroute(dev); dev_clear_fastroute(dev);
...@@ -457,7 +474,6 @@ int dev_close(struct device *dev) ...@@ -457,7 +474,6 @@ int dev_close(struct device *dev)
/* /*
* Tell people we are going down * Tell people we are going down
*/ */
notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
return(0); return(0);
...@@ -685,6 +701,45 @@ static void netdev_wakeup(void) ...@@ -685,6 +701,45 @@ static void netdev_wakeup(void)
} }
#endif #endif
static void dev_clear_backlog(struct device *dev)
{
struct sk_buff *prev, *curr;
/*
*
* Let now clear backlog queue. -AS
*
* We are competing here both with netif_rx() and net_bh().
* We don't want either of those to mess with skb ptrs
* while we work on them, thus cli()/sti().
*
* It looks better to use net_bh trick, at least
* to be sure, that we keep interrupt latency really low. --ANK (980727)
*/
if (backlog.qlen) {
start_bh_atomic();
curr = backlog.next;
while ( curr != (struct sk_buff *)(&backlog) ) {
unsigned long flags;
curr=curr->next;
if ( curr->prev->dev == dev ) {
prev = curr->prev;
spin_lock_irqsave(&skb_queue_lock, flags);
__skb_unlink(prev, &backlog);
spin_unlock_irqrestore(&skb_queue_lock, flags);
kfree_skb(prev);
}
}
end_bh_atomic();
#ifdef CONFIG_NET_HW_FLOWCONTROL
if (netdev_dropping)
netdev_wakeup();
#else
netdev_dropping = 0;
#endif
}
}
/* /*
* Receive a packet from a device driver and queue it for the upper * Receive a packet from a device driver and queue it for the upper
...@@ -1320,7 +1375,7 @@ int dev_change_flags(struct device *dev, unsigned flags) ...@@ -1320,7 +1375,7 @@ int dev_change_flags(struct device *dev, unsigned flags)
*/ */
dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_RUNNING|IFF_NOARP| dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_RUNNING|IFF_NOARP|
IFF_SLAVE|IFF_MASTER| IFF_NODYNARP|IFF_SLAVE|IFF_MASTER|
IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) | IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) |
(dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI)); (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI));
...@@ -1391,12 +1446,11 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -1391,12 +1446,11 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return dev_change_flags(dev, ifr->ifr_flags); return dev_change_flags(dev, ifr->ifr_flags);
case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
ifr->ifr_metric = dev->metric; ifr->ifr_metric = 0;
return 0; return 0;
case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
dev->metric = ifr->ifr_metric; return -EOPNOTSUPP;
return 0;
case SIOCGIFMTU: /* Get the MTU of a device */ case SIOCGIFMTU: /* Get the MTU of a device */
ifr->ifr_mtu = dev->mtu; ifr->ifr_mtu = dev->mtu;
...@@ -1419,10 +1473,8 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -1419,10 +1473,8 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
dev->mtu = ifr->ifr_mtu; dev->mtu = ifr->ifr_mtu;
err = 0; err = 0;
} }
if (!err && dev->flags&IFF_UP) { if (!err && dev->flags&IFF_UP)
printk(KERN_DEBUG "SIFMTU %s(%s)\n", dev->name, current->comm);
notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev); notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev);
}
return err; return err;
case SIOCGIFHWADDR: case SIOCGIFHWADDR:
...@@ -1484,11 +1536,22 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd) ...@@ -1484,11 +1536,22 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return 0; return 0;
case SIOCSIFTXQLEN: case SIOCSIFTXQLEN:
if(ifr->ifr_qlen<2 || ifr->ifr_qlen>1024) /* Why <2? 0 and 1 are valid values. --ANK (980807) */
if(/*ifr->ifr_qlen<2 ||*/ ifr->ifr_qlen>1024)
return -EINVAL; return -EINVAL;
dev->tx_queue_len = ifr->ifr_qlen; dev->tx_queue_len = ifr->ifr_qlen;
return 0; return 0;
case SIOCSIFNAME:
if (dev->flags&IFF_UP)
return -EBUSY;
if (dev_get(ifr->ifr_newname))
return -EEXIST;
memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ);
dev->name[IFNAMSIZ-1] = 0;
notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
return 0;
/* /*
* Unknown or private ioctl * Unknown or private ioctl
*/ */
...@@ -1597,6 +1660,7 @@ int dev_ioctl(unsigned int cmd, void *arg) ...@@ -1597,6 +1660,7 @@ int dev_ioctl(unsigned int cmd, void *arg)
case SIOCDELMULTI: case SIOCDELMULTI:
case SIOCSIFHWBROADCAST: case SIOCSIFHWBROADCAST:
case SIOCSIFTXQLEN: case SIOCSIFTXQLEN:
case SIOCSIFNAME:
if (!capable(CAP_NET_ADMIN)) if (!capable(CAP_NET_ADMIN))
return -EPERM; return -EPERM;
dev_load(ifr.ifr_name); dev_load(ifr.ifr_name);
...@@ -1668,6 +1732,17 @@ int register_netdevice(struct device *dev) ...@@ -1668,6 +1732,17 @@ int register_netdevice(struct device *dev)
struct device *d, **dp; struct device *d, **dp;
if (dev_boot_phase) { if (dev_boot_phase) {
/* This is NOT bug, but I am not sure, that all the
devices, initialized before netdev module is started
are sane.
Now they are chained to device boot list
and probed later. If a module is initialized
before netdev, but assumes that dev->init
is really called by register_netdev(), it will fail.
So that this message should be printed for a while.
*/
printk(KERN_INFO "early initialization of device %s is deferred\n", dev->name); printk(KERN_INFO "early initialization of device %s is deferred\n", dev->name);
/* Check for existence, and append to tail of chain */ /* Check for existence, and append to tail of chain */
......
...@@ -215,7 +215,7 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov, ...@@ -215,7 +215,7 @@ int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
partial_cnt = 0; partial_cnt = 0;
} }
if (len - copy > 0) if (len > copy)
{ {
partial_cnt = copy % 4; partial_cnt = copy % 4;
if (partial_cnt) if (partial_cnt)
......
...@@ -9,6 +9,9 @@ ...@@ -9,6 +9,9 @@
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*
* Fixes:
* Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -1033,7 +1036,8 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ...@@ -1033,7 +1036,8 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ndm->ndm_state, ndm->ndm_state,
nlh->nlmsg_flags&NLM_F_REPLACE, 0); nlh->nlmsg_flags&NLM_F_REPLACE, 0);
} }
neigh_release(n); if (n)
neigh_release(n);
end_bh_atomic(); end_bh_atomic();
return err; return err;
} }
...@@ -1043,7 +1047,7 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) ...@@ -1043,7 +1047,7 @@ int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n, static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
pid_t pid, u32 seq, int event) u32 pid, u32 seq, int event)
{ {
unsigned long now = jiffies; unsigned long now = jiffies;
struct ndmsg *ndm; struct ndmsg *ndm;
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
* as published by the Free Software Foundation; either version * as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
* *
* Fixes:
* Vitaly E. Lavrov RTA_OK arithmetics was wrong.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -29,6 +31,7 @@ ...@@ -29,6 +31,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -135,47 +138,8 @@ int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) ...@@ -135,47 +138,8 @@ int rtnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
return err; return err;
} }
#ifdef CONFIG_RTNL_OLD_IFINFO
static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct device *dev, static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct device *dev,
int type, pid_t pid, u32 seq) int type, u32 pid, u32 seq)
{
struct ifinfomsg *r;
struct nlmsghdr *nlh;
unsigned char *b = skb->tail;
nlh = NLMSG_PUT(skb, pid, seq, type, sizeof(*r));
if (pid) nlh->nlmsg_flags |= NLM_F_MULTI;
r = NLMSG_DATA(nlh);
r->ifi_addrlen = dev->addr_len;
r->ifi_address.sa_family = dev->type;
memcpy(&r->ifi_address.sa_data, dev->dev_addr, dev->addr_len);
r->ifi_broadcast.sa_family = dev->type;
memcpy(&r->ifi_broadcast.sa_data, dev->broadcast, dev->addr_len);
r->ifi_flags = dev->flags;
r->ifi_mtu = dev->mtu;
r->ifi_index = dev->ifindex;
r->ifi_link = dev->iflink;
strncpy(r->ifi_name, dev->name, IFNAMSIZ-1);
r->ifi_qdiscname[0] = 0;
r->ifi_qdisc = dev->qdisc_sleeping->handle;
if (dev->qdisc_sleeping)
strcpy(r->ifi_qdiscname, dev->qdisc_sleeping->ops->id);
if (dev->get_stats) {
struct net_device_stats *stats = dev->get_stats(dev);
if (stats)
RTA_PUT(skb, IFLA_STATS, sizeof(*stats), stats);
}
nlh->nlmsg_len = skb->tail - b;
return skb->len;
nlmsg_failure:
rtattr_failure:
skb_trim(skb, b - skb->data);
return -1;
}
#else
static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct device *dev,
int type, pid_t pid, u32 seq)
{ {
struct ifinfomsg *r; struct ifinfomsg *r;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
...@@ -218,7 +182,6 @@ static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct device *dev, ...@@ -218,7 +182,6 @@ static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct device *dev,
skb_trim(skb, b - skb->data); skb_trim(skb, b - skb->data);
return -1; return -1;
} }
#endif
int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{ {
...@@ -266,12 +229,7 @@ int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -266,12 +229,7 @@ int rtnetlink_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
void rtmsg_ifinfo(int type, struct device *dev) void rtmsg_ifinfo(int type, struct device *dev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
#ifdef CONFIG_RTNL_OLD_IFINFO
int size = NLMSG_SPACE(sizeof(struct ifinfomsg)+
RTA_LENGTH(sizeof(struct net_device_stats)));
#else
int size = NLMSG_GOODSIZE; int size = NLMSG_GOODSIZE;
#endif
skb = alloc_skb(size, GFP_KERNEL); skb = alloc_skb(size, GFP_KERNEL);
if (!skb) if (!skb)
...@@ -287,7 +245,7 @@ void rtmsg_ifinfo(int type, struct device *dev) ...@@ -287,7 +245,7 @@ void rtmsg_ifinfo(int type, struct device *dev)
static int rtnetlink_done(struct netlink_callback *cb) static int rtnetlink_done(struct netlink_callback *cb)
{ {
if (NETLINK_CREDS(cb->skb)->uid == 0 && cb->nlh->nlmsg_flags&NLM_F_ATOMIC) if (cap_raised(NETLINK_CB(cb->skb).eff_cap, CAP_NET_ADMIN) && cb->nlh->nlmsg_flags&NLM_F_ATOMIC)
rtnl_shunlock(); rtnl_shunlock();
return 0; return 0;
} }
...@@ -342,13 +300,13 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) ...@@ -342,13 +300,13 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
sz_idx = type>>2; sz_idx = type>>2;
kind = type&3; kind = type&3;
if (kind != 2 && NETLINK_CREDS(skb)->uid) { if (kind != 2 && !cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) {
*errp = -EPERM; *errp = -EPERM;
return -1; return -1;
} }
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
int rlen; u32 rlen;
if (link->dumpit == NULL) if (link->dumpit == NULL)
link = &(rtnetlink_links[PF_UNSPEC][type]); link = &(rtnetlink_links[PF_UNSPEC][type]);
...@@ -357,12 +315,13 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) ...@@ -357,12 +315,13 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
goto err_inval; goto err_inval;
/* Super-user locks all the tables to get atomic snapshot */ /* Super-user locks all the tables to get atomic snapshot */
if (NETLINK_CREDS(skb)->uid == 0 && nlh->nlmsg_flags&NLM_F_ATOMIC) if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)
&& nlh->nlmsg_flags&NLM_F_ATOMIC)
atomic_inc(&rtnl_rlockct); atomic_inc(&rtnl_rlockct);
if ((*errp = netlink_dump_start(rtnl, skb, nlh, if ((*errp = netlink_dump_start(rtnl, skb, nlh,
link->dumpit, link->dumpit,
rtnetlink_done)) != 0) { rtnetlink_done)) != 0) {
if (NETLINK_CREDS(skb)->uid == 0 && nlh->nlmsg_flags&NLM_F_ATOMIC) if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN) && nlh->nlmsg_flags&NLM_F_ATOMIC)
atomic_dec(&rtnl_rlockct); atomic_dec(&rtnl_rlockct);
return -1; return -1;
} }
...@@ -431,7 +390,7 @@ extern __inline__ int rtnetlink_rcv_skb(struct sk_buff *skb) ...@@ -431,7 +390,7 @@ extern __inline__ int rtnetlink_rcv_skb(struct sk_buff *skb)
struct nlmsghdr * nlh; struct nlmsghdr * nlh;
while (skb->len >= NLMSG_SPACE(0)) { while (skb->len >= NLMSG_SPACE(0)) {
int rlen; u32 rlen;
nlh = (struct nlmsghdr *)skb->data; nlh = (struct nlmsghdr *)skb->data;
if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
......
...@@ -138,11 +138,15 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) ...@@ -138,11 +138,15 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg))
{ {
err = -EINVAL;
if ((unsigned long)(((char*)cmsg - (char*)msg->msg_control)
+ cmsg->cmsg_len) > msg->msg_controllen)
goto error;
if (cmsg->cmsg_level != SOL_SOCKET) if (cmsg->cmsg_level != SOL_SOCKET)
continue; continue;
err = -EINVAL;
switch (cmsg->cmsg_type) switch (cmsg->cmsg_type)
{ {
case SCM_RIGHTS: case SCM_RIGHTS:
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Authors: Alan Cox <iiitac@pyr.swan.ac.uk> * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de> * Florian La Roche <rzsfl@rz.uni-sb.de>
* *
* Version: $Id: skbuff.c,v 1.53 1998/08/19 13:32:44 freitag Exp $
*
* Fixes: * Fixes:
* Alan Cox : Fixed the worst of the load balancer bugs. * Alan Cox : Fixed the worst of the load balancer bugs.
* Dave Platt : Interrupt stacking fix. * Dave Platt : Interrupt stacking fix.
...@@ -96,14 +98,14 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) ...@@ -96,14 +98,14 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
void show_net_buffers(void) void show_net_buffers(void)
{ {
printk(KERN_INFO "Networking buffers in use : %u\n", printk("Networking buffers in use : %u\n",
atomic_read(&net_skbcount)); atomic_read(&net_skbcount));
printk(KERN_INFO "Total network buffer allocations : %u\n", printk("Total network buffer allocations : %u\n",
atomic_read(&net_allocs)); atomic_read(&net_allocs));
printk(KERN_INFO "Total failed network buffer allocs : %u\n", printk("Total failed network buffer allocs : %u\n",
atomic_read(&net_fails)); atomic_read(&net_fails));
#ifdef CONFIG_INET #ifdef CONFIG_INET
printk(KERN_INFO "IP fragment buffer size : %u\n", printk("IP fragment buffer size : %u\n",
atomic_read(&ip_frag_mem)); atomic_read(&ip_frag_mem));
#endif #endif
} }
...@@ -365,7 +367,7 @@ void skb_add_mtu(int mtu) ...@@ -365,7 +367,7 @@ void skb_add_mtu(int mtu)
} }
#endif #endif
__initfunc(void skb_init(void)) void __init skb_init(void)
{ {
skbuff_head_cache = kmem_cache_create("skbuff_head_cache", skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff), sizeof(struct sk_buff),
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* handler for protocols to use and generic option handler. * handler for protocols to use and generic option handler.
* *
* *
* Version: @(#)sock.c 1.0.17 06/02/93 * Version: $Id: sock.c,v 1.70 1998/08/26 12:03:07 davem Exp $
* *
* Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
...@@ -78,6 +78,7 @@ ...@@ -78,6 +78,7 @@
* Chris Evans : Call suser() check last on F_SETOWN * Chris Evans : Call suser() check last on F_SETOWN
* Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
* Andi Kleen : Add sock_kmalloc()/sock_kfree_s() * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
* Andi Kleen : Fix write_space callback
* *
* To Fix: * To Fix:
* *
...@@ -445,6 +446,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, ...@@ -445,6 +446,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_RCVLOWAT: case SO_RCVLOWAT:
case SO_SNDLOWAT: case SO_SNDLOWAT:
v.val=1; v.val=1;
break;
case SO_PASSCRED: case SO_PASSCRED:
v.val = sock->passcred; v.val = sock->passcred;
...@@ -615,19 +617,6 @@ unsigned long sock_rspace(struct sock *sk) ...@@ -615,19 +617,6 @@ unsigned long sock_rspace(struct sock *sk)
} }
/* FIXME: this is also insane. See above comment */
unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
if (sk != NULL && !(sk->shutdown & SEND_SHUTDOWN)) {
amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
if (amt < 0)
amt = 0;
}
return amt;
}
/* It is almost wait_for_tcp_memory minus release_sock/lock_sock. /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
I think, these locks should be removed for datagram sockets. I think, these locks should be removed for datagram sockets.
*/ */
...@@ -746,17 +735,15 @@ void __release_sock(struct sock *sk) ...@@ -746,17 +735,15 @@ void __release_sock(struct sock *sk)
void sklist_remove_socket(struct sock **list, struct sock *sk) void sklist_remove_socket(struct sock **list, struct sock *sk)
{ {
unsigned long flags;
struct sock *s; struct sock *s;
save_flags(flags); start_bh_atomic();
cli();
s= *list; s= *list;
if(s==sk) if(s==sk)
{ {
*list = s->next; *list = s->next;
restore_flags(flags); end_bh_atomic();
return; return;
} }
while(s && s->next) while(s && s->next)
...@@ -764,22 +751,19 @@ void sklist_remove_socket(struct sock **list, struct sock *sk) ...@@ -764,22 +751,19 @@ void sklist_remove_socket(struct sock **list, struct sock *sk)
if(s->next==sk) if(s->next==sk)
{ {
s->next=sk->next; s->next=sk->next;
restore_flags(flags); break;
return;
} }
s=s->next; s=s->next;
} }
restore_flags(flags); end_bh_atomic();
} }
void sklist_insert_socket(struct sock **list, struct sock *sk) void sklist_insert_socket(struct sock **list, struct sock *sk)
{ {
unsigned long flags; start_bh_atomic();
save_flags(flags);
cli();
sk->next= *list; sk->next= *list;
*list=sk; *list=sk;
restore_flags(flags); end_bh_atomic();
} }
/* /*
...@@ -914,6 +898,10 @@ int sock_no_getsockopt(struct socket *sock, int level, int optname, ...@@ -914,6 +898,10 @@ int sock_no_getsockopt(struct socket *sock, int level, int optname,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/*
* Note: if you add something that sleeps here then change sock_fcntl()
* to do proper fd locking.
*/
int sock_no_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg) int sock_no_fcntl(struct socket *sock, unsigned int cmd, unsigned long arg)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
...@@ -971,12 +959,15 @@ void sock_def_callback2(struct sock *sk, int len) ...@@ -971,12 +959,15 @@ void sock_def_callback2(struct sock *sk, int len)
} }
} }
void sock_def_callback3(struct sock *sk) void sock_def_write_space(struct sock *sk)
{ {
if(!sk->dead) if(!sk->dead)
{ {
wake_up_interruptible(sk->sleep); wake_up_interruptible(sk->sleep);
sock_wake_async(sk->socket, 2);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sock_wake_async(sk->socket, 2);
} }
} }
...@@ -1011,7 +1002,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -1011,7 +1002,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->state_change = sock_def_callback1; sk->state_change = sock_def_callback1;
sk->data_ready = sock_def_callback2; sk->data_ready = sock_def_callback2;
sk->write_space = sock_def_callback3; sk->write_space = sock_def_write_space;
sk->error_report = sock_def_callback1; sk->error_report = sock_def_callback1;
sk->destruct = sock_def_destruct; sk->destruct = sock_def_destruct;
......
This diff is collapsed.
This diff is collapsed.
/* /*
* NET3 IP device support routines. * NET3 IP device support routines.
* *
* Version: $Id: devinet.c,v 1.22 1998/05/08 21:06:26 davem Exp $ * Version: $Id: devinet.c,v 1.23 1998/08/26 12:03:21 davem Exp $
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -533,8 +533,6 @@ int devinet_ioctl(unsigned int cmd, void *arg) ...@@ -533,8 +533,6 @@ int devinet_ioctl(unsigned int cmd, void *arg)
inet_del_ifa(in_dev, ifap, 0); inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_broadcast = 0; ifa->ifa_broadcast = 0;
ifa->ifa_anycast = 0; ifa->ifa_anycast = 0;
ifa->ifa_prefixlen = 32;
ifa->ifa_mask = inet_make_mask(32);
} }
ifa->ifa_address = ifa->ifa_address =
...@@ -545,6 +543,9 @@ int devinet_ioctl(unsigned int cmd, void *arg) ...@@ -545,6 +543,9 @@ int devinet_ioctl(unsigned int cmd, void *arg)
ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen); ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
if ((dev->flags&IFF_BROADCAST) && ifa->ifa_prefixlen < 31) if ((dev->flags&IFF_BROADCAST) && ifa->ifa_prefixlen < 31)
ifa->ifa_broadcast = ifa->ifa_address|~ifa->ifa_mask; ifa->ifa_broadcast = ifa->ifa_address|~ifa->ifa_mask;
} else {
ifa->ifa_prefixlen = 32;
ifa->ifa_mask = inet_make_mask(32);
} }
ret = inet_set_ifa(dev, ifa); ret = inet_set_ifa(dev, ifa);
break; break;
...@@ -702,6 +703,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void ...@@ -702,6 +703,16 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
inetdev_destroy(in_dev); inetdev_destroy(in_dev);
break; break;
case NETDEV_CHANGENAME:
if (in_dev->ifa_list) {
struct in_ifaddr *ifa;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
/* Do not notify about label change, this event is
not interesting to applications using netlink.
*/
}
break;
} }
return NOTIFY_DONE; return NOTIFY_DONE;
...@@ -716,7 +727,7 @@ struct notifier_block ip_netdev_notifier={ ...@@ -716,7 +727,7 @@ struct notifier_block ip_netdev_notifier={
#ifdef CONFIG_RTNETLINK #ifdef CONFIG_RTNETLINK
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
pid_t pid, u32 seq, int event) u32 pid, u32 seq, int event)
{ {
struct ifaddrmsg *ifm; struct ifaddrmsg *ifm;
struct nlmsghdr *nlh; struct nlmsghdr *nlh;
...@@ -729,7 +740,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, ...@@ -729,7 +740,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
ifm->ifa_flags = ifa->ifa_flags|IFA_F_PERMANENT; ifm->ifa_flags = ifa->ifa_flags|IFA_F_PERMANENT;
ifm->ifa_scope = ifa->ifa_scope; ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex; ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
if (ifa->ifa_prefixlen) if (ifa->ifa_address)
RTA_PUT(skb, IFA_ADDRESS, 4, &ifa->ifa_address); RTA_PUT(skb, IFA_ADDRESS, 4, &ifa->ifa_address);
if (ifa->ifa_local) if (ifa->ifa_local)
RTA_PUT(skb, IFA_LOCAL, 4, &ifa->ifa_local); RTA_PUT(skb, IFA_LOCAL, 4, &ifa->ifa_local);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment