Commit 9c80ee75 authored by David S. Miller's avatar David S. Miller

Merge bk://212.42.230.204/netfilter-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents 3c417db9 e3ede754
...@@ -78,6 +78,7 @@ config PPC_PMAC ...@@ -78,6 +78,7 @@ config PPC_PMAC
bool " Apple G5 based machines" bool " Apple G5 based machines"
default y default y
select ADB_PMU select ADB_PMU
select U3_DART
config PPC config PPC
bool bool
...@@ -109,16 +110,10 @@ config PPC_SPLPAR ...@@ -109,16 +110,10 @@ config PPC_SPLPAR
processors, that is, which share physical processors between processors, that is, which share physical processors between
two or more partitions. two or more partitions.
config PMAC_DART config U3_DART
bool "Enable DART/IOMMU on PowerMac (allow >2G of RAM)" bool
depends on PPC_PMAC depends on PPC_MULTIPLATFORM
depends on EXPERIMENTAL
default n default n
help
Enabling DART makes it possible to boot a PowerMac G5 with more
than 2GB of memory. Note that the code is very new and untested
at this time, so it has to be considered experimental. Enabling
this might result in data loss.
config PPC_PMAC64 config PPC_PMAC64
bool bool
......
...@@ -49,7 +49,7 @@ obj-$(CONFIG_HVCS) += hvcserver.o ...@@ -49,7 +49,7 @@ obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
pmac_time.o pmac_nvram.o pmac_low_i2c.o \ pmac_time.o pmac_nvram.o pmac_low_i2c.o \
open_pic_u3.o open_pic_u3.o
obj-$(CONFIG_PMAC_DART) += pmac_iommu.o obj-$(CONFIG_U3_DART) += u3_iommu.o
ifdef CONFIG_SMP ifdef CONFIG_SMP
obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o
......
...@@ -687,7 +687,7 @@ _GLOBAL(kernel_thread) ...@@ -687,7 +687,7 @@ _GLOBAL(kernel_thread)
ld r30,-16(r1) ld r30,-16(r1)
blr blr
#ifndef CONFIG_PPC_PSERIE /* hack hack hack */ #ifndef CONFIG_PPC_PSERIES /* hack hack hack */
#define ppc_rtas sys_ni_syscall #define ppc_rtas sys_ni_syscall
#endif #endif
......
...@@ -439,7 +439,7 @@ void hpte_init_lpar(void) ...@@ -439,7 +439,7 @@ void hpte_init_lpar(void)
ppc_md.hpte_insert = pSeries_lpar_hpte_insert; ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
ppc_md.hpte_remove = pSeries_lpar_hpte_remove; ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
ppc_md.htpe_clear_all = pSeries_lpar_hptab_clear; ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
htab_finish_init(); htab_finish_init();
} }
...@@ -29,6 +29,4 @@ extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, ...@@ -29,6 +29,4 @@ extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
extern void pmac_nvram_init(void); extern void pmac_nvram_init(void);
extern void pmac_iommu_alloc(void);
#endif /* __PMAC_H__ */ #endif /* __PMAC_H__ */
...@@ -664,9 +664,7 @@ void __init pmac_pcibios_fixup(void) ...@@ -664,9 +664,7 @@ void __init pmac_pcibios_fixup(void)
pci_fix_bus_sysdata(); pci_fix_bus_sysdata();
#ifdef CONFIG_PMAC_DART iommu_setup_u3();
iommu_setup_pmac();
#endif /* CONFIG_PMAC_DART */
} }
......
...@@ -447,16 +447,6 @@ static int __init pmac_probe(int platform) ...@@ -447,16 +447,6 @@ static int __init pmac_probe(int platform)
if (platform != PLATFORM_POWERMAC) if (platform != PLATFORM_POWERMAC)
return 0; return 0;
#ifdef CONFIG_PMAC_DART
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
pmac_iommu_alloc();
#endif /* CONFIG_PMAC_DART */
return 1; return 1;
} }
......
...@@ -423,13 +423,6 @@ static void __init early_cmdline_parse(void) ...@@ -423,13 +423,6 @@ static void __init early_cmdline_parse(void)
else if (!strncmp(opt, RELOC("force"), 5)) else if (!strncmp(opt, RELOC("force"), 5))
RELOC(iommu_force_on) = 1; RELOC(iommu_force_on) = 1;
} }
#ifndef CONFIG_PMAC_DART
if (RELOC(of_platform) == PLATFORM_POWERMAC) {
RELOC(ppc64_iommu_off) = 1;
prom_printf("DART disabled on PowerMac !\n");
}
#endif
} }
/* /*
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/iommu.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
...@@ -405,6 +406,16 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -405,6 +406,16 @@ void __init early_setup(unsigned long dt_ptr)
DBG("Found, Initializing memory management...\n"); DBG("Found, Initializing memory management...\n");
#ifdef CONFIG_U3_DART
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
alloc_u3_dart_table();
#endif /* CONFIG_U3_DART */
/* /*
* Initialize stab / SLB management * Initialize stab / SLB management
*/ */
......
/* /*
* arch/ppc64/kernel/pmac_iommu.c * arch/ppc64/kernel/u3_iommu.c
* *
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
* *
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
* *
* Dynamic DMA mapping support, PowerMac G5 (DART)-specific parts. * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
* *
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
...@@ -89,7 +89,7 @@ static unsigned int *dart; ...@@ -89,7 +89,7 @@ static unsigned int *dart;
/* Dummy val that entries are set to when unused */ /* Dummy val that entries are set to when unused */
static unsigned int dart_emptyval; static unsigned int dart_emptyval;
static struct iommu_table iommu_table_pmac; static struct iommu_table iommu_table_u3;
static int dart_dirty; static int dart_dirty;
#define DBG(...) #define DBG(...)
...@@ -141,7 +141,7 @@ static void dart_flush(struct iommu_table *tbl) ...@@ -141,7 +141,7 @@ static void dart_flush(struct iommu_table *tbl)
dart_dirty = 0; dart_dirty = 0;
} }
static void dart_build_pmac(struct iommu_table *tbl, long index, static void dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr, long npages, unsigned long uaddr,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
...@@ -152,7 +152,7 @@ static void dart_build_pmac(struct iommu_table *tbl, long index, ...@@ -152,7 +152,7 @@ static void dart_build_pmac(struct iommu_table *tbl, long index,
dp = ((unsigned int*)tbl->it_base) + index; dp = ((unsigned int*)tbl->it_base) + index;
/* On pmac, all memory is contigous, so we can move this /* On U3, all memory is contigous, so we can move this
* out of the loop. * out of the loop.
*/ */
while (npages--) { while (npages--) {
...@@ -168,7 +168,7 @@ static void dart_build_pmac(struct iommu_table *tbl, long index, ...@@ -168,7 +168,7 @@ static void dart_build_pmac(struct iommu_table *tbl, long index,
} }
static void dart_free_pmac(struct iommu_table *tbl, long index, long npages) static void dart_free(struct iommu_table *tbl, long index, long npages)
{ {
unsigned int *dp; unsigned int *dp;
...@@ -239,32 +239,32 @@ static int dart_init(struct device_node *dart_node) ...@@ -239,32 +239,32 @@ static int dart_init(struct device_node *dart_node)
/* Invalidate DART to get rid of possible stale TLBs */ /* Invalidate DART to get rid of possible stale TLBs */
dart_tlb_invalidate_all(); dart_tlb_invalidate_all();
iommu_table_pmac.it_busno = 0; iommu_table_u3.it_busno = 0;
/* Units of tce entries */ /* Units of tce entries */
iommu_table_pmac.it_offset = 0; iommu_table_u3.it_offset = 0;
/* Set the tce table size - measured in pages */ /* Set the tce table size - measured in pages */
iommu_table_pmac.it_size = dart_tablesize >> PAGE_SHIFT; iommu_table_u3.it_size = dart_tablesize >> PAGE_SHIFT;
/* Initialize the common IOMMU code */ /* Initialize the common IOMMU code */
iommu_table_pmac.it_base = (unsigned long)dart_vbase; iommu_table_u3.it_base = (unsigned long)dart_vbase;
iommu_table_pmac.it_index = 0; iommu_table_u3.it_index = 0;
iommu_table_pmac.it_blocksize = 1; iommu_table_u3.it_blocksize = 1;
iommu_table_pmac.it_entrysize = sizeof(u32); iommu_table_u3.it_entrysize = sizeof(u32);
iommu_init_table(&iommu_table_pmac); iommu_init_table(&iommu_table_u3);
/* Reserve the last page of the DART to avoid possible prefetch /* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area * past the DART mapped area
*/ */
set_bit(iommu_table_pmac.it_mapsize - 1, iommu_table_pmac.it_map); set_bit(iommu_table_u3.it_mapsize - 1, iommu_table_u3.it_map);
printk(KERN_INFO "U3-DART IOMMU initialized\n"); printk(KERN_INFO "U3/CPC925 DART IOMMU initialized\n");
return 0; return 0;
} }
void iommu_setup_pmac(void) void iommu_setup_u3(void)
{ {
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
struct device_node *dn; struct device_node *dn;
...@@ -275,8 +275,8 @@ void iommu_setup_pmac(void) ...@@ -275,8 +275,8 @@ void iommu_setup_pmac(void)
return; return;
/* Setup low level TCE operations for the core IOMMU code */ /* Setup low level TCE operations for the core IOMMU code */
ppc_md.tce_build = dart_build_pmac; ppc_md.tce_build = dart_build;
ppc_md.tce_free = dart_free_pmac; ppc_md.tce_free = dart_free;
ppc_md.tce_flush = dart_flush; ppc_md.tce_flush = dart_flush;
/* Initialize the DART HW */ /* Initialize the DART HW */
...@@ -296,11 +296,11 @@ void iommu_setup_pmac(void) ...@@ -296,11 +296,11 @@ void iommu_setup_pmac(void)
*/ */
struct device_node *dn = pci_device_to_OF_node(dev); struct device_node *dn = pci_device_to_OF_node(dev);
if (dn) if (dn)
dn->iommu_table = &iommu_table_pmac; dn->iommu_table = &iommu_table_u3;
} }
} }
void __init pmac_iommu_alloc(void) void __init alloc_u3_dart_table(void)
{ {
/* Only reserve DART space if machine has more than 2GB of RAM /* Only reserve DART space if machine has more than 2GB of RAM
* or if requested with iommu=on on cmdline. * or if requested with iommu=on on cmdline.
......
...@@ -71,9 +71,9 @@ ...@@ -71,9 +71,9 @@
* *
*/ */
#ifdef CONFIG_PMAC_DART #ifdef CONFIG_U3_DART
extern unsigned long dart_tablebase; extern unsigned long dart_tablebase;
#endif /* CONFIG_PMAC_DART */ #endif /* CONFIG_U3_DART */
HTAB htab_data = {NULL, 0, 0, 0, 0}; HTAB htab_data = {NULL, 0, 0, 0, 0};
...@@ -203,7 +203,7 @@ void __init htab_initialize(void) ...@@ -203,7 +203,7 @@ void __init htab_initialize(void)
DBG("creating mapping for region: %lx : %lx\n", base, size); DBG("creating mapping for region: %lx : %lx\n", base, size);
#ifdef CONFIG_PMAC_DART #ifdef CONFIG_U3_DART
/* Do not map the DART space. Fortunately, it will be aligned /* Do not map the DART space. Fortunately, it will be aligned
* in such a way that it will not cross two lmb regions and will * in such a way that it will not cross two lmb regions and will
* fit within a single 16Mb page. * fit within a single 16Mb page.
...@@ -223,7 +223,7 @@ void __init htab_initialize(void) ...@@ -223,7 +223,7 @@ void __init htab_initialize(void)
mode_rw, use_largepages); mode_rw, use_largepages);
continue; continue;
} }
#endif /* CONFIG_PMAC_DART */ #endif /* CONFIG_U3_DART */
create_pte_mapping(base, base + size, mode_rw, use_largepages); create_pte_mapping(base, base + size, mode_rw, use_largepages);
} }
DBG(" <- htab_initialize()\n"); DBG(" <- htab_initialize()\n");
......
...@@ -88,7 +88,7 @@ obj-$(CONFIG_FB_68328) += 68328fb.o cfbfillrect.o cfbcopyarea.o cfbim ...@@ -88,7 +88,7 @@ obj-$(CONFIG_FB_68328) += 68328fb.o cfbfillrect.o cfbcopyarea.o cfbim
obj-$(CONFIG_FB_GBE) += gbefb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_GBE) += gbefb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o cfbfillrect.o cfbimgblt.o cfbcopyarea.o obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o cfbfillrect.o cfbimgblt.o cfbcopyarea.o
obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
obj-$(CONFIG_FB_PXA) += pxafb.o cfbimgblt.o cfbcopyarea.o cfbfillrect0.o obj-$(CONFIG_FB_PXA) += pxafb.o cfbimgblt.o cfbcopyarea.o cfbfillrect.o
# Platform or fallback drivers go here # Platform or fallback drivers go here
obj-$(CONFIG_FB_VESA) += vesafb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o obj-$(CONFIG_FB_VESA) += vesafb.o cfbfillrect.o cfbcopyarea.o cfbimgblt.o
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* ldm - Support for Windows Logical Disk Manager (Dynamic Disks) * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
* *
* Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
* Copyright (C) 2001 Anton Altaparmakov <aia21@cantab.net> * Copyright (c) 2001-2004 Anton Altaparmakov
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
* *
* Documentation is available at http://linux-ntfs.sf.net/ldm * Documentation is available at http://linux-ntfs.sf.net/ldm
...@@ -517,9 +517,15 @@ static BOOL ldm_validate_vmdb (struct block_device *bdev, unsigned long base, ...@@ -517,9 +517,15 @@ static BOOL ldm_validate_vmdb (struct block_device *bdev, unsigned long base,
if (vm->vblk_offset != 512) if (vm->vblk_offset != 512)
ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset); ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset);
/* FIXME: How should we handle this situation? */ /*
if ((vm->vblk_size * vm->last_vblk_seq) != (toc->bitmap1_size << 9)) * The last_vblkd_seq can be before the end of the vmdb, just make sure
ldm_info ("VMDB and TOCBLOCK don't agree on the database size."); * it is not out of bounds.
*/
if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) {
ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK. "
"Database is corrupt. Aborting.");
goto out;
}
result = TRUE; result = TRUE;
out: out:
......
...@@ -108,7 +108,7 @@ struct scatterlist; ...@@ -108,7 +108,7 @@ struct scatterlist;
/* Walks all buses and creates iommu tables */ /* Walks all buses and creates iommu tables */
extern void iommu_setup_pSeries(void); extern void iommu_setup_pSeries(void);
extern void iommu_setup_pmac(void); extern void iommu_setup_u3(void);
/* Creates table for an individual device node */ /* Creates table for an individual device node */
extern void iommu_devnode_init(struct device_node *dn); extern void iommu_devnode_init(struct device_node *dn);
...@@ -155,6 +155,8 @@ extern void tce_init_iSeries(void); ...@@ -155,6 +155,8 @@ extern void tce_init_iSeries(void);
extern void pci_iommu_init(void); extern void pci_iommu_init(void);
extern void pci_dma_init_direct(void); extern void pci_dma_init_direct(void);
extern void alloc_u3_dart_table(void);
extern int ppc64_iommu_off; extern int ppc64_iommu_off;
#endif /* _ASM_IOMMU_H */ #endif /* _ASM_IOMMU_H */
...@@ -58,7 +58,7 @@ struct machdep_calls { ...@@ -58,7 +58,7 @@ struct machdep_calls {
int local); int local);
/* special for kexec, to be called in real mode, linar mapping is /* special for kexec, to be called in real mode, linar mapping is
* destroyed as well */ * destroyed as well */
void (*htpe_clear_all)(void); void (*hpte_clear_all)(void);
void (*tce_build)(struct iommu_table * tbl, void (*tce_build)(struct iommu_table * tbl,
long index, long index,
......
...@@ -59,54 +59,7 @@ struct systemcfg { ...@@ -59,54 +59,7 @@ struct systemcfg {
#ifdef __KERNEL__ #ifdef __KERNEL__
extern struct systemcfg *systemcfg; extern struct systemcfg *systemcfg;
#else #endif
/* Processor Version Register (PVR) field extraction */
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
/* Processor Version Numbers */
#define PV_NORTHSTAR 0x0033
#define PV_PULSAR 0x0034
#define PV_POWER4 0x0035
#define PV_ICESTAR 0x0036
#define PV_SSTAR 0x0037
#define PV_POWER4p 0x0038
#define PV_GPUL 0x0039
#define PV_POWER5 0x003a
#define PV_970FX 0x003c
#define PV_630 0x0040
#define PV_630p 0x0041
/* Platforms supported by PPC64 */
#define PLATFORM_PSERIES 0x0100
#define PLATFORM_PSERIES_LPAR 0x0101
#define PLATFORM_ISERIES_LPAR 0x0201
#define PLATFORM_POWERMAC 0x0400
/* Compatibility with drivers coming from PPC32 world */
#define _machine (systemcfg->platform)
#define _MACH_Pmac PLATFORM_POWERMAC
static inline volatile struct systemcfg *systemcfg_init(void)
{
int fd = open("/proc/ppc64/systemcfg", O_RDONLY);
volatile struct systemcfg *ret;
if (fd == -1)
return 0;
ret = mmap(0, sizeof(struct systemcfg), PROT_READ, MAP_SHARED, fd, 0);
close(fd);
if (!ret)
return 0;
if (ret->version.major != SYSTEMCFG_MAJOR || ret->version.minor < SYSTEMCFG_MINOR) {
munmap((void *)ret, sizeof(struct systemcfg));
return 0;
}
return ret;
}
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -205,6 +205,13 @@ typedef struct tcp_pcount { ...@@ -205,6 +205,13 @@ typedef struct tcp_pcount {
__u32 val; __u32 val;
} tcp_pcount_t; } tcp_pcount_t;
enum tcp_congestion_algo {
TCP_RENO=0,
TCP_VEGAS,
TCP_WESTWOOD,
TCP_BIC,
};
struct tcp_opt { struct tcp_opt {
int tcp_header_len; /* Bytes of tcp header to send */ int tcp_header_len; /* Bytes of tcp header to send */
...@@ -265,7 +272,7 @@ struct tcp_opt { ...@@ -265,7 +272,7 @@ struct tcp_opt {
__u8 frto_counter; /* Number of new acks after RTO */ __u8 frto_counter; /* Number of new acks after RTO */
__u32 frto_highmark; /* snd_nxt when RTO occurred */ __u32 frto_highmark; /* snd_nxt when RTO occurred */
__u8 unused_pad; __u8 adv_cong; /* Using Vegas, Westwood, or BIC */
__u8 defer_accept; /* User waits for some data after accept() */ __u8 defer_accept; /* User waits for some data after accept() */
/* one byte hole, try to pack */ /* one byte hole, try to pack */
...@@ -412,7 +419,6 @@ struct tcp_opt { ...@@ -412,7 +419,6 @@ struct tcp_opt {
__u32 beg_snd_nxt; /* right edge during last RTT */ __u32 beg_snd_nxt; /* right edge during last RTT */
__u32 beg_snd_una; /* left edge during last RTT */ __u32 beg_snd_una; /* left edge during last RTT */
__u32 beg_snd_cwnd; /* saves the size of the cwnd */ __u32 beg_snd_cwnd; /* saves the size of the cwnd */
__u8 do_vegas; /* do vegas for this connection */
__u8 doing_vegas_now;/* if true, do vegas for this RTT */ __u8 doing_vegas_now;/* if true, do vegas for this RTT */
__u16 cntRTT; /* # of RTTs measured within last RTT */ __u16 cntRTT; /* # of RTTs measured within last RTT */
__u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ __u32 minRTT; /* min of RTTs measured within last RTT (in usec) */
......
...@@ -156,6 +156,29 @@ do { \ ...@@ -156,6 +156,29 @@ do { \
__wait_event(wq, condition); \ __wait_event(wq, condition); \
} while (0) } while (0)
#define __wait_event_timeout(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
if (condition) \
break; \
ret = schedule_timeout(ret); \
if (!ret) \
break; \
} \
finish_wait(&wq, &__wait); \
} while (0)
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!(condition)) \
__wait_event_timeout(wq, condition, __ret); \
__ret; \
})
#define __wait_event_interruptible(wq, condition, ret) \ #define __wait_event_interruptible(wq, condition, ret) \
do { \ do { \
DEFINE_WAIT(__wait); \ DEFINE_WAIT(__wait); \
......
...@@ -74,7 +74,7 @@ extern int ipv6_rcv_saddr_equal(const struct sock *sk, ...@@ -74,7 +74,7 @@ extern int ipv6_rcv_saddr_equal(const struct sock *sk,
const struct sock *sk2); const struct sock *sk2);
extern void addrconf_join_solict(struct net_device *dev, extern void addrconf_join_solict(struct net_device *dev,
struct in6_addr *addr); struct in6_addr *addr);
extern void addrconf_leave_solict(struct net_device *dev, extern void addrconf_leave_solict(struct inet6_dev *idev,
struct in6_addr *addr); struct in6_addr *addr);
/* /*
...@@ -89,6 +89,7 @@ extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr, ...@@ -89,6 +89,7 @@ extern int inet6_mc_check(struct sock *sk, struct in6_addr *mc_addr,
struct in6_addr *src_addr); struct in6_addr *src_addr);
extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr);
extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr);
extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr);
extern void ipv6_mc_up(struct inet6_dev *idev); extern void ipv6_mc_up(struct inet6_dev *idev);
extern void ipv6_mc_down(struct inet6_dev *idev); extern void ipv6_mc_down(struct inet6_dev *idev);
...@@ -111,6 +112,7 @@ extern void ipv6_sock_ac_close(struct sock *sk); ...@@ -111,6 +112,7 @@ extern void ipv6_sock_ac_close(struct sock *sk);
extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex); extern int inet6_ac_check(struct sock *sk, struct in6_addr *addr, int ifindex);
extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr);
extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr);
extern int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); extern int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr);
extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr); extern int ipv6_chk_acast_addr(struct net_device *dev, struct in6_addr *addr);
......
...@@ -18,7 +18,6 @@ struct dn_neigh { ...@@ -18,7 +18,6 @@ struct dn_neigh {
extern void dn_neigh_init(void); extern void dn_neigh_init(void);
extern void dn_neigh_cleanup(void); extern void dn_neigh_cleanup(void);
extern struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr);
extern int dn_neigh_router_hello(struct sk_buff *skb); extern int dn_neigh_router_hello(struct sk_buff *skb);
extern int dn_neigh_endnode_hello(struct sk_buff *skb); extern int dn_neigh_endnode_hello(struct sk_buff *skb);
extern void dn_neigh_pointopoint_hello(struct sk_buff *skb); extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
...@@ -139,9 +140,6 @@ struct pneigh_entry ...@@ -139,9 +140,6 @@ struct pneigh_entry
u8 key[0]; u8 key[0];
}; };
#define NEIGH_HASHMASK 0x1F
#define PNEIGH_HASHMASK 0xF
/* /*
* neighbour table manipulation * neighbour table manipulation
*/ */
...@@ -175,8 +173,11 @@ struct neigh_table ...@@ -175,8 +173,11 @@ struct neigh_table
struct neigh_parms *parms_list; struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep; kmem_cache_t *kmem_cachep;
struct neigh_statistics stats; struct neigh_statistics stats;
struct neighbour *hash_buckets[NEIGH_HASHMASK+1]; struct neighbour **hash_buckets;
struct pneigh_entry *phash_buckets[PNEIGH_HASHMASK+1]; unsigned int hash_mask;
__u32 hash_rnd;
unsigned int hash_chain_gc;
struct pneigh_entry **phash_buckets;
}; };
/* flags for neigh_update() */ /* flags for neigh_update() */
...@@ -191,6 +192,8 @@ extern int neigh_table_clear(struct neigh_table *tbl); ...@@ -191,6 +192,8 @@ extern int neigh_table_clear(struct neigh_table *tbl);
extern struct neighbour * neigh_lookup(struct neigh_table *tbl, extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
const void *pkey, const void *pkey,
struct net_device *dev); struct net_device *dev);
extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
const void *pkey);
extern struct neighbour * neigh_create(struct neigh_table *tbl, extern struct neighbour * neigh_create(struct neigh_table *tbl,
const void *pkey, const void *pkey,
struct net_device *dev); struct net_device *dev);
...@@ -224,6 +227,24 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); ...@@ -224,6 +227,24 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern void neigh_app_ns(struct neighbour *n); extern void neigh_app_ns(struct neighbour *n);
extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct neigh_table *tbl;
void *(*neigh_sub_iter)(struct neigh_seq_state *state,
struct neighbour *n, loff_t *pos);
unsigned int bucket;
unsigned int flags;
#define NEIGH_SEQ_NEIGH_ONLY 0x00000001
#define NEIGH_SEQ_IS_PNEIGH 0x00000002
#define NEIGH_SEQ_SKIP_NOARP 0x00000004
};
extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
extern void neigh_seq_stop(struct seq_file *, void *);
extern int neigh_sysctl_register(struct net_device *dev, extern int neigh_sysctl_register(struct net_device *dev,
struct neigh_parms *p, struct neigh_parms *p,
int p_id, int pdev_id, int p_id, int pdev_id,
......
...@@ -1271,6 +1271,13 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp) ...@@ -1271,6 +1271,13 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
tcp_get_pcount(&tp->retrans_out)); tcp_get_pcount(&tp->retrans_out));
} }
/*
* Which congestion algorithim is in use on the connection.
*/
#define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
#define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
#define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
/* Recalculate snd_ssthresh, we want to set it to: /* Recalculate snd_ssthresh, we want to set it to:
* *
* Reno: * Reno:
...@@ -1283,7 +1290,7 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp) ...@@ -1283,7 +1290,7 @@ static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
*/ */
static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
{ {
if (sysctl_tcp_bic) { if (tcp_is_bic(tp)) {
if (sysctl_tcp_bic_fast_convergence && if (sysctl_tcp_bic_fast_convergence &&
tp->snd_cwnd < tp->bictcp.last_max_cwnd) tp->snd_cwnd < tp->bictcp.last_max_cwnd)
tp->bictcp.last_max_cwnd tp->bictcp.last_max_cwnd
...@@ -1303,11 +1310,6 @@ static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp) ...@@ -1303,11 +1310,6 @@ static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
/* Stop taking Vegas samples for now. */ /* Stop taking Vegas samples for now. */
#define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0) #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
/* Is this TCP connection using Vegas (regardless of whether it is taking
* Vegas measurements at the current time)?
*/
#define tcp_is_vegas(__tp) ((__tp)->vegas.do_vegas)
static inline void tcp_vegas_enable(struct tcp_opt *tp) static inline void tcp_vegas_enable(struct tcp_opt *tp)
{ {
/* There are several situations when we must "re-start" Vegas: /* There are several situations when we must "re-start" Vegas:
...@@ -1340,7 +1342,7 @@ static inline void tcp_vegas_enable(struct tcp_opt *tp) ...@@ -1340,7 +1342,7 @@ static inline void tcp_vegas_enable(struct tcp_opt *tp)
/* Should we be taking Vegas samples right now? */ /* Should we be taking Vegas samples right now? */
#define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now) #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
extern void tcp_vegas_init(struct tcp_opt *tp); extern void tcp_ca_init(struct tcp_opt *tp);
static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state) static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
{ {
...@@ -2024,7 +2026,7 @@ extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo); ...@@ -2024,7 +2026,7 @@ extern void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo);
static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq) static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
{ {
if (sysctl_tcp_westwood) if (tcp_is_westwood(tp))
tp->westwood.rtt = rtt_seq; tp->westwood.rtt = rtt_seq;
} }
...@@ -2033,13 +2035,13 @@ void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *); ...@@ -2033,13 +2035,13 @@ void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
{ {
if (sysctl_tcp_westwood) if (tcp_is_westwood(tcp_sk(sk)))
__tcp_westwood_fast_bw(sk, skb); __tcp_westwood_fast_bw(sk, skb);
} }
static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
{ {
if (sysctl_tcp_westwood) if (tcp_is_westwood(tcp_sk(sk)))
__tcp_westwood_slow_bw(sk, skb); __tcp_westwood_slow_bw(sk, skb);
} }
...@@ -2052,14 +2054,14 @@ static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp) ...@@ -2052,14 +2054,14 @@ static inline __u32 __tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp) static inline __u32 tcp_westwood_bw_rttmin(const struct tcp_opt *tp)
{ {
return sysctl_tcp_westwood ? __tcp_westwood_bw_rttmin(tp) : 0; return tcp_is_westwood(tp) ? __tcp_westwood_bw_rttmin(tp) : 0;
} }
static inline int tcp_westwood_ssthresh(struct tcp_opt *tp) static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
{ {
__u32 ssthresh = 0; __u32 ssthresh = 0;
if (sysctl_tcp_westwood) { if (tcp_is_westwood(tp)) {
ssthresh = __tcp_westwood_bw_rttmin(tp); ssthresh = __tcp_westwood_bw_rttmin(tp);
if (ssthresh) if (ssthresh)
tp->snd_ssthresh = ssthresh; tp->snd_ssthresh = ssthresh;
...@@ -2072,7 +2074,7 @@ static inline int tcp_westwood_cwnd(struct tcp_opt *tp) ...@@ -2072,7 +2074,7 @@ static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
{ {
__u32 cwnd = 0; __u32 cwnd = 0;
if (sysctl_tcp_westwood) { if (tcp_is_westwood(tp)) {
cwnd = __tcp_westwood_bw_rttmin(tp); cwnd = __tcp_westwood_bw_rttmin(tp);
if (cwnd) if (cwnd)
tp->snd_cwnd = cwnd; tp->snd_cwnd = cwnd;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <net/route.h> /* for struct rtable and routing */ #include <net/route.h> /* for struct rtable and routing */
#include <net/icmp.h> /* icmp_send */ #include <net/icmp.h> /* icmp_send */
#include <asm/param.h> /* for HZ */ #include <asm/param.h> /* for HZ */
...@@ -123,64 +124,49 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc) ...@@ -123,64 +124,49 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
spin_unlock_bh(&entry->neigh->dev->xmit_lock); spin_unlock_bh(&entry->neigh->dev->xmit_lock);
} }
/* The neighbour entry n->lock is held. */
static void idle_timer_check(unsigned long dummy) static int neigh_check_cb(struct neighbour *n)
{ {
int i;
/*DPRINTK("idle_timer_check\n");*/
write_lock(&clip_tbl.lock);
for (i = 0; i <= NEIGH_HASHMASK; i++) {
struct neighbour **np;
for (np = &clip_tbl.hash_buckets[i]; *np;) {
struct neighbour *n = *np;
struct atmarp_entry *entry = NEIGH2ENTRY(n); struct atmarp_entry *entry = NEIGH2ENTRY(n);
struct clip_vcc *clip_vcc; struct clip_vcc *cv;
for (cv = entry->vccs; cv; cv = cv->next) {
unsigned long exp = cv->last_use + cv->idle_timeout;
write_lock(&n->lock); if (cv->idle_timeout && time_after(jiffies, exp)) {
DPRINTK("releasing vcc %p->%p of entry %p\n",
for (clip_vcc = entry->vccs; clip_vcc; cv, cv->vcc, entry);
clip_vcc = clip_vcc->next) vcc_release_async(cv->vcc, -ETIMEDOUT);
if (clip_vcc->idle_timeout &&
time_after(jiffies, clip_vcc->last_use+
clip_vcc->idle_timeout)) {
DPRINTK("releasing vcc %p->%p of "
"entry %p\n",clip_vcc,clip_vcc->vcc,
entry);
vcc_release_async(clip_vcc->vcc,
-ETIMEDOUT);
} }
if (entry->vccs ||
time_before(jiffies, entry->expires)) {
np = &n->next;
write_unlock(&n->lock);
continue;
} }
if (entry->vccs || time_before(jiffies, entry->expires))
return 0;
if (atomic_read(&n->refcnt) > 1) { if (atomic_read(&n->refcnt) > 1) {
struct sk_buff *skb; struct sk_buff *skb;
DPRINTK("destruction postponed with ref %d\n", DPRINTK("destruction postponed with ref %d\n",
atomic_read(&n->refcnt)); atomic_read(&n->refcnt));
while ((skb = skb_dequeue(&n->arp_queue)) !=
NULL) while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
dev_kfree_skb(skb); dev_kfree_skb(skb);
np = &n->next;
write_unlock(&n->lock); return 0;
continue;
} }
*np = n->next;
DPRINTK("expired neigh %p\n",n); DPRINTK("expired neigh %p\n",n);
n->dead = 1; return 1;
write_unlock(&n->lock); }
neigh_release(n);
} static void idle_timer_check(unsigned long dummy)
} {
write_lock(&clip_tbl.lock);
__neigh_for_each_release(&clip_tbl, neigh_check_cb);
mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ); mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ);
write_unlock(&clip_tbl.lock); write_unlock(&clip_tbl.lock);
} }
static int clip_arp_rcv(struct sk_buff *skb) static int clip_arp_rcv(struct sk_buff *skb)
{ {
struct atm_vcc *vcc; struct atm_vcc *vcc;
...@@ -343,15 +329,7 @@ static int clip_constructor(struct neighbour *neigh) ...@@ -343,15 +329,7 @@ static int clip_constructor(struct neighbour *neigh)
static u32 clip_hash(const void *pkey, const struct net_device *dev) static u32 clip_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; return jhash_2words(*(u32 *)pkey, dev->ifindex, clip_tbl.hash_rnd);
hash_val = *(u32*)pkey;
hash_val ^= (hash_val>>16);
hash_val ^= hash_val>>8;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
return hash_val;
} }
static struct neigh_table clip_tbl = { static struct neigh_table clip_tbl = {
...@@ -833,120 +811,126 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr) ...@@ -833,120 +811,126 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr)
} }
} }
/* This means the neighbour entry has no attached VCC objects. */
#define SEQ_NO_VCC_TOKEN ((void *) 2)
static void atmarp_info(struct seq_file *seq, struct net_device *dev, static void atmarp_info(struct seq_file *seq, struct net_device *dev,
struct atmarp_entry *entry, struct clip_vcc *clip_vcc) struct atmarp_entry *entry, struct clip_vcc *clip_vcc)
{ {
unsigned long exp;
char buf[17]; char buf[17];
int svc, off; int svc, llc, off;
svc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
(clip_vcc->vcc->sk->sk_family == AF_ATMSVC));
svc = !clip_vcc || clip_vcc->vcc->sk->sk_family == AF_ATMSVC; llc = ((clip_vcc == SEQ_NO_VCC_TOKEN) ||
seq_printf(seq, "%-6s%-4s%-4s%5ld ", dev->name, svc ? "SVC" : "PVC", clip_vcc->encap);
!clip_vcc || clip_vcc->encap ? "LLC" : "NULL",
(jiffies-(clip_vcc ? clip_vcc->last_use : entry->neigh->used))/HZ);
off = scnprintf(buf, sizeof(buf) - 1, "%d.%d.%d.%d", NIPQUAD(entry->ip)); if (clip_vcc == SEQ_NO_VCC_TOKEN)
exp = entry->neigh->used;
else
exp = clip_vcc->last_use;
exp = (jiffies - exp) / HZ;
seq_printf(seq, "%-6s%-4s%-4s%5ld ",
dev->name,
svc ? "SVC" : "PVC",
llc ? "LLC" : "NULL",
exp);
off = scnprintf(buf, sizeof(buf) - 1, "%d.%d.%d.%d",
NIPQUAD(entry->ip));
while (off < 16) while (off < 16)
buf[off++] = ' '; buf[off++] = ' ';
buf[off] = '\0'; buf[off] = '\0';
seq_printf(seq, "%s", buf); seq_printf(seq, "%s", buf);
if (!clip_vcc) { if (clip_vcc == SEQ_NO_VCC_TOKEN) {
if (time_before(jiffies, entry->expires)) if (time_before(jiffies, entry->expires))
seq_printf(seq, "(resolving)\n"); seq_printf(seq, "(resolving)\n");
else else
seq_printf(seq, "(expired, ref %d)\n", seq_printf(seq, "(expired, ref %d)\n",
atomic_read(&entry->neigh->refcnt)); atomic_read(&entry->neigh->refcnt));
} else if (!svc) { } else if (!svc) {
seq_printf(seq, "%d.%d.%d\n", clip_vcc->vcc->dev->number, seq_printf(seq, "%d.%d.%d\n",
clip_vcc->vcc->vpi, clip_vcc->vcc->vci); clip_vcc->vcc->dev->number,
clip_vcc->vcc->vpi,
clip_vcc->vcc->vci);
} else { } else {
svc_addr(seq, &clip_vcc->vcc->remote); svc_addr(seq, &clip_vcc->vcc->remote);
seq_putc(seq, '\n'); seq_putc(seq, '\n');
} }
} }
struct arp_state { struct clip_seq_state {
int bucket; /* This member must be first. */
struct neighbour *n; struct neigh_seq_state ns;
/* Local to clip specific iteration. */
struct clip_vcc *vcc; struct clip_vcc *vcc;
}; };
static void *arp_vcc_walk(struct arp_state *state, static struct clip_vcc *clip_seq_next_vcc(struct atmarp_entry *e,
struct atmarp_entry *e, loff_t *l) struct clip_vcc *curr)
{ {
struct clip_vcc *vcc = state->vcc; if (!curr) {
curr = e->vccs;
if (!vcc) if (!curr)
vcc = e->vccs; return SEQ_NO_VCC_TOKEN;
if (vcc == (void *)1) { return curr;
vcc = e->vccs;
--*l;
}
for (; vcc; vcc = vcc->next) {
if (--*l < 0)
break;
} }
state->vcc = vcc; if (curr == SEQ_NO_VCC_TOKEN)
return (*l < 0) ? state : NULL; return NULL;
curr = curr->next;
return curr;
} }
static void *arp_get_idx(struct arp_state *state, loff_t l) static void *clip_seq_vcc_walk(struct clip_seq_state *state,
struct atmarp_entry *e, loff_t *pos)
{ {
void *v = NULL; struct clip_vcc *vcc = state->vcc;
for (; state->bucket <= NEIGH_HASHMASK; state->bucket++) { vcc = clip_seq_next_vcc(e, vcc);
for (; state->n; state->n = state->n->next) { if (vcc && pos != NULL) {
v = arp_vcc_walk(state, NEIGH2ENTRY(state->n), &l); while (*pos) {
if (v) vcc = clip_seq_next_vcc(e, vcc);
goto done; if (!vcc)
break;
--(*pos);
} }
state->n = clip_tbl.hash_buckets[state->bucket + 1];
} }
done: state->vcc = vcc;
return v;
}
static void *arp_seq_start(struct seq_file *seq, loff_t *pos) return vcc;
{
struct arp_state *state = seq->private;
void *ret = (void *)1;
read_lock_bh(&clip_tbl.lock);
state->bucket = 0;
state->n = clip_tbl.hash_buckets[0];
state->vcc = (void *)1;
if (*pos)
ret = arp_get_idx(state, *pos);
return ret;
} }
static void arp_seq_stop(struct seq_file *seq, void *v) static void *clip_seq_sub_iter(struct neigh_seq_state *_state,
struct neighbour *n, loff_t *pos)
{ {
struct arp_state *state = seq->private; struct clip_seq_state *state = (struct clip_seq_state *) _state;
if (state->bucket != -1) return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos);
read_unlock_bh(&clip_tbl.lock);
} }
static void *arp_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void *clip_seq_start(struct seq_file *seq, loff_t *pos)
{ {
struct arp_state *state = seq->private; return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY);
v = arp_get_idx(state, 1);
*pos += !!PTR_ERR(v);
return v;
} }
static int arp_seq_show(struct seq_file *seq, void *v) static int clip_seq_show(struct seq_file *seq, void *v)
{ {
static char atm_arp_banner[] = static char atm_arp_banner[] =
"IPitf TypeEncp Idle IP address ATM address\n"; "IPitf TypeEncp Idle IP address ATM address\n";
if (v == (void *)1) if (v == SEQ_START_TOKEN) {
seq_puts(seq, atm_arp_banner); seq_puts(seq, atm_arp_banner);
else { } else {
struct arp_state *state = seq->private; struct clip_seq_state *state = seq->private;
struct neighbour *n = state->n; struct neighbour *n = v;
struct clip_vcc *vcc = state->vcc; struct clip_vcc *vcc = state->vcc;
atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc); atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc);
...@@ -955,15 +939,15 @@ static int arp_seq_show(struct seq_file *seq, void *v) ...@@ -955,15 +939,15 @@ static int arp_seq_show(struct seq_file *seq, void *v)
} }
static struct seq_operations arp_seq_ops = { static struct seq_operations arp_seq_ops = {
.start = arp_seq_start, .start = clip_seq_start,
.next = arp_seq_next, .next = neigh_seq_next,
.stop = arp_seq_stop, .stop = neigh_seq_stop,
.show = arp_seq_show, .show = clip_seq_show,
}; };
static int arp_seq_open(struct inode *inode, struct file *file) static int arp_seq_open(struct inode *inode, struct file *file)
{ {
struct arp_state *state; struct clip_seq_state *state;
struct seq_file *seq; struct seq_file *seq;
int rc = -EAGAIN; int rc = -EAGAIN;
...@@ -972,6 +956,8 @@ static int arp_seq_open(struct inode *inode, struct file *file) ...@@ -972,6 +956,8 @@ static int arp_seq_open(struct inode *inode, struct file *file)
rc = -ENOMEM; rc = -ENOMEM;
goto out_kfree; goto out_kfree;
} }
memset(state, 0, sizeof(*state));
state->ns.neigh_sub_iter = clip_seq_sub_iter;
rc = seq_open(file, &arp_seq_ops); rc = seq_open(file, &arp_seq_ops);
if (rc) if (rc)
...@@ -987,16 +973,11 @@ static int arp_seq_open(struct inode *inode, struct file *file) ...@@ -987,16 +973,11 @@ static int arp_seq_open(struct inode *inode, struct file *file)
goto out; goto out;
} }
static int arp_seq_release(struct inode *inode, struct file *file)
{
return seq_release_private(inode, file);
}
static struct file_operations arp_seq_fops = { static struct file_operations arp_seq_fops = {
.open = arp_seq_open, .open = arp_seq_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = arp_seq_release, .release = seq_release_private,
.owner = THIS_MODULE .owner = THIS_MODULE
}; };
#endif #endif
......
This diff is collapsed.
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <net/neighbour.h> #include <net/neighbour.h>
#include <net/dst.h> #include <net/dst.h>
...@@ -122,13 +123,7 @@ struct neigh_table dn_neigh_table = { ...@@ -122,13 +123,7 @@ struct neigh_table dn_neigh_table = {
static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev) static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; return jhash_2words(*(dn_address *)pkey, 0, dn_neigh_table.hash_rnd);
hash_val = *(dn_address *)pkey;
hash_val ^= (hash_val >> 10);
hash_val ^= (hash_val >> 3);
return hash_val & NEIGH_HASHMASK;
} }
static int dn_neigh_construct(struct neighbour *neigh) static int dn_neigh_construct(struct neighbour *neigh)
...@@ -359,27 +354,6 @@ static int dn_phase3_output(struct sk_buff *skb) ...@@ -359,27 +354,6 @@ static int dn_phase3_output(struct sk_buff *skb)
* basically does a neigh_lookup(), but without comparing the device * basically does a neigh_lookup(), but without comparing the device
* field. This is required for the On-Ethernet cache * field. This is required for the On-Ethernet cache
*/ */
struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr)
{
struct neighbour *neigh;
u32 hash_val;
hash_val = tbl->hash(ptr, NULL);
read_lock_bh(&tbl->lock);
for(neigh = tbl->hash_buckets[hash_val]; neigh != NULL; neigh = neigh->next) {
if (memcmp(neigh->primary_key, ptr, tbl->key_len) == 0) {
atomic_inc(&neigh->refcnt);
read_unlock_bh(&tbl->lock);
return neigh;
}
}
read_unlock_bh(&tbl->lock);
return NULL;
}
/* /*
* Any traffic on a pointopoint link causes the timer to be reset * Any traffic on a pointopoint link causes the timer to be reset
* for the entry in the neighbour table. * for the entry in the neighbour table.
...@@ -514,141 +488,66 @@ static char *dn_find_slot(char *base, int max, int priority) ...@@ -514,141 +488,66 @@ static char *dn_find_slot(char *base, int max, int priority)
return (*min < priority) ? (min - 6) : NULL; return (*min < priority) ? (min - 6) : NULL;
} }
int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n) struct elist_cb_state {
struct net_device *dev;
unsigned char *ptr;
unsigned char *rs;
int t, n;
};
static void neigh_elist_cb(struct neighbour *neigh, void *_info)
{ {
int t = 0; struct elist_cb_state *s = _info;
int i; struct dn_dev *dn_db;
struct neighbour *neigh;
struct dn_neigh *dn; struct dn_neigh *dn;
struct neigh_table *tbl = &dn_neigh_table;
unsigned char *rs = ptr;
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
read_lock_bh(&tbl->lock); if (neigh->dev != s->dev)
return;
for(i = 0; i < NEIGH_HASHMASK; i++) { dn = (struct dn_neigh *) neigh;
for(neigh = tbl->hash_buckets[i]; neigh != NULL; neigh = neigh->next) {
if (neigh->dev != dev)
continue;
dn = (struct dn_neigh *)neigh;
if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
continue; return;
if (dn_db->parms.forwarding == 1 && (dn->flags & DN_NDFLAG_R2))
continue;
if (t == n)
rs = dn_find_slot(ptr, n, dn->priority);
else
t++;
if (rs == NULL)
continue;
dn_dn2eth(rs, dn->addr);
rs += 6;
*rs = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0;
*rs |= dn->priority;
rs++;
}
}
read_unlock_bh(&tbl->lock);
return t;
}
#ifdef CONFIG_PROC_FS
struct dn_neigh_iter_state {
int bucket;
};
static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct dn_neigh_iter_state *state = seq->private;
struct neighbour *n = NULL;
for(state->bucket = 0;
state->bucket <= NEIGH_HASHMASK;
++state->bucket) {
n = dn_neigh_table.hash_buckets[state->bucket];
if (n)
break;
}
return n; dn_db = (struct dn_dev *) s->dev->dn_ptr;
} if (dn_db->parms.forwarding == 1 && (dn->flags & DN_NDFLAG_R2))
return;
static struct neighbour *neigh_get_next(struct seq_file *seq, if (s->t == s->n)
struct neighbour *n) s->rs = dn_find_slot(s->ptr, s->n, dn->priority);
{ else
struct dn_neigh_iter_state *state = seq->private; s->t++;
if (s->rs == NULL)
return;
n = n->next; dn_dn2eth(s->rs, dn->addr);
try_again: s->rs += 6;
if (n) *(s->rs) = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0;
goto out; *(s->rs) |= dn->priority;
if (++state->bucket > NEIGH_HASHMASK) s->rs++;
goto out;
n = dn_neigh_table.hash_buckets[state->bucket];
goto try_again;
out:
return n;
} }
static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n)
{ {
struct neighbour *n = neigh_get_first(seq); struct elist_cb_state state;
if (n) state.dev = dev;
while(*pos && (n = neigh_get_next(seq, n))) state.t = 0;
--*pos; state.n = n;
return *pos ? NULL : n; state.ptr = ptr;
} state.rs = ptr;
static void *dn_neigh_get_idx(struct seq_file *seq, loff_t pos) neigh_for_each(&dn_neigh_table, neigh_elist_cb, &state);
{
void *rc;
read_lock_bh(&dn_neigh_table.lock);
rc = neigh_get_idx(seq, &pos);
if (!rc) {
read_unlock_bh(&dn_neigh_table.lock);
}
return rc;
}
static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos) return state.t;
{
return *pos ? dn_neigh_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
} }
static void *dn_neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *rc;
if (v == SEQ_START_TOKEN) {
rc = dn_neigh_get_idx(seq, 0);
goto out;
}
rc = neigh_get_next(seq, v); #ifdef CONFIG_PROC_FS
if (rc)
goto out;
read_unlock_bh(&dn_neigh_table.lock);
out:
++*pos;
return rc;
}
static void dn_neigh_seq_stop(struct seq_file *seq, void *v)
{
if (v && v != SEQ_START_TOKEN)
read_unlock_bh(&dn_neigh_table.lock);
}
static inline void dn_neigh_format_entry(struct seq_file *seq, static inline void dn_neigh_format_entry(struct seq_file *seq,
struct neighbour *n) struct neighbour *n)
{ {
struct dn_neigh *dn = (struct dn_neigh *)n; struct dn_neigh *dn = (struct dn_neigh *) n;
char buf[DN_ASCBUF_LEN]; char buf[DN_ASCBUF_LEN];
read_lock(&n->lock); read_lock(&n->lock);
...@@ -675,10 +574,16 @@ static int dn_neigh_seq_show(struct seq_file *seq, void *v) ...@@ -675,10 +574,16 @@ static int dn_neigh_seq_show(struct seq_file *seq, void *v)
return 0; return 0;
} }
static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos)
{
return neigh_seq_start(seq, pos, &dn_neigh_table,
NEIGH_SEQ_NEIGH_ONLY);
}
static struct seq_operations dn_neigh_seq_ops = { static struct seq_operations dn_neigh_seq_ops = {
.start = dn_neigh_seq_start, .start = dn_neigh_seq_start,
.next = dn_neigh_seq_next, .next = neigh_seq_next,
.stop = dn_neigh_seq_stop, .stop = neigh_seq_stop,
.show = dn_neigh_seq_show, .show = dn_neigh_seq_show,
}; };
...@@ -686,11 +591,12 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file) ...@@ -686,11 +591,12 @@ static int dn_neigh_seq_open(struct inode *inode, struct file *file)
{ {
struct seq_file *seq; struct seq_file *seq;
int rc = -ENOMEM; int rc = -ENOMEM;
struct dn_neigh_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
goto out; goto out;
memset(s, 0, sizeof(*s));
rc = seq_open(file, &dn_neigh_seq_ops); rc = seq_open(file, &dn_neigh_seq_ops);
if (rc) if (rc)
goto out_kfree; goto out_kfree;
......
...@@ -996,7 +996,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old ...@@ -996,7 +996,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
* here * here
*/ */
if (!try_hard) { if (!try_hard) {
neigh = dn_neigh_lookup(&dn_neigh_table, &fl.fld_dst); neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst);
if (neigh) { if (neigh) {
if ((oldflp->oif && if ((oldflp->oif &&
(neigh->dev->ifindex != oldflp->oif)) || (neigh->dev->ifindex != oldflp->oif)) ||
......
...@@ -71,6 +71,7 @@ ...@@ -71,6 +71,7 @@
* arp_xmit so intermediate drivers like * arp_xmit so intermediate drivers like
* bonding can change the skb before * bonding can change the skb before
* sending (e.g. insert 8021q tag). * sending (e.g. insert 8021q tag).
* Harald Welte : convert to make use of jenkins hash
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -97,6 +98,7 @@ ...@@ -97,6 +98,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/jhash.h>
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
#include <linux/sysctl.h> #include <linux/sysctl.h>
#endif #endif
...@@ -223,15 +225,7 @@ int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir) ...@@ -223,15 +225,7 @@ int arp_mc_map(u32 addr, u8 *haddr, struct net_device *dev, int dir)
static u32 arp_hash(const void *pkey, const struct net_device *dev) static u32 arp_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; return jhash_2words(*(u32 *)pkey, dev->ifindex, arp_tbl.hash_rnd);
hash_val = *(u32*)pkey;
hash_val ^= (hash_val>>16);
hash_val ^= hash_val>>8;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
return hash_val;
} }
static int arp_constructor(struct neighbour *neigh) static int arp_constructor(struct neighbour *neigh)
...@@ -1269,161 +1263,9 @@ static char *ax2asc2(ax25_address *a, char *buf) ...@@ -1269,161 +1263,9 @@ static char *ax2asc2(ax25_address *a, char *buf)
} }
#endif /* CONFIG_AX25 */ #endif /* CONFIG_AX25 */
struct arp_iter_state {
int is_pneigh, bucket;
};
static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct arp_iter_state* state = seq->private;
struct neighbour *n = NULL;
state->is_pneigh = 0;
for (state->bucket = 0;
state->bucket <= NEIGH_HASHMASK;
++state->bucket) {
n = arp_tbl.hash_buckets[state->bucket];
while (n && !(n->nud_state & ~NUD_NOARP))
n = n->next;
if (n)
break;
}
return n;
}
static struct neighbour *neigh_get_next(struct seq_file *seq,
struct neighbour *n)
{
struct arp_iter_state* state = seq->private;
do {
n = n->next;
/* Don't confuse "arp -a" w/ magic entries */
try_again:
;
} while (n && !(n->nud_state & ~NUD_NOARP));
if (n)
goto out;
if (++state->bucket > NEIGH_HASHMASK)
goto out;
n = arp_tbl.hash_buckets[state->bucket];
goto try_again;
out:
return n;
}
static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
{
struct neighbour *n = neigh_get_first(seq);
if (n)
while (*pos && (n = neigh_get_next(seq, n)))
--*pos;
return *pos ? NULL : n;
}
static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
{
struct arp_iter_state* state = seq->private;
struct pneigh_entry *pn;
state->is_pneigh = 1;
for (state->bucket = 0;
state->bucket <= PNEIGH_HASHMASK;
++state->bucket) {
pn = arp_tbl.phash_buckets[state->bucket];
if (pn)
break;
}
return pn;
}
static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct pneigh_entry *pn)
{
struct arp_iter_state* state = seq->private;
pn = pn->next;
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
pn = arp_tbl.phash_buckets[state->bucket];
}
return pn;
}
static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t pos)
{
struct pneigh_entry *pn = pneigh_get_first(seq);
if (pn)
while (pos && (pn = pneigh_get_next(seq, pn)))
--pos;
return pos ? NULL : pn;
}
static void *arp_get_idx(struct seq_file *seq, loff_t pos)
{
void *rc;
read_lock_bh(&arp_tbl.lock);
rc = neigh_get_idx(seq, &pos);
if (!rc) {
read_unlock_bh(&arp_tbl.lock);
rc = pneigh_get_idx(seq, pos);
}
return rc;
}
static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct arp_iter_state* state = seq->private;
state->is_pneigh = 0;
state->bucket = 0;
return *pos ? arp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *arp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
void *rc;
struct arp_iter_state* state;
if (v == SEQ_START_TOKEN) {
rc = arp_get_idx(seq, 0);
goto out;
}
state = seq->private;
if (!state->is_pneigh) {
rc = neigh_get_next(seq, v);
if (rc)
goto out;
read_unlock_bh(&arp_tbl.lock);
rc = pneigh_get_first(seq);
} else
rc = pneigh_get_next(seq, v);
out:
++*pos;
return rc;
}
static void arp_seq_stop(struct seq_file *seq, void *v)
{
struct arp_iter_state* state = seq->private;
if (!state->is_pneigh && v != SEQ_START_TOKEN)
read_unlock_bh(&arp_tbl.lock);
}
#define HBUFFERLEN 30 #define HBUFFERLEN 30
static __inline__ void arp_format_neigh_entry(struct seq_file *seq, static void arp_format_neigh_entry(struct seq_file *seq,
struct neighbour *n) struct neighbour *n)
{ {
char hbuffer[HBUFFERLEN]; char hbuffer[HBUFFERLEN];
...@@ -1455,7 +1297,7 @@ static __inline__ void arp_format_neigh_entry(struct seq_file *seq, ...@@ -1455,7 +1297,7 @@ static __inline__ void arp_format_neigh_entry(struct seq_file *seq,
read_unlock(&n->lock); read_unlock(&n->lock);
} }
static __inline__ void arp_format_pneigh_entry(struct seq_file *seq, static void arp_format_pneigh_entry(struct seq_file *seq,
struct pneigh_entry *n) struct pneigh_entry *n)
{ {
struct net_device *dev = n->dev; struct net_device *dev = n->dev;
...@@ -1470,13 +1312,13 @@ static __inline__ void arp_format_pneigh_entry(struct seq_file *seq, ...@@ -1470,13 +1312,13 @@ static __inline__ void arp_format_pneigh_entry(struct seq_file *seq,
static int arp_seq_show(struct seq_file *seq, void *v) static int arp_seq_show(struct seq_file *seq, void *v)
{ {
if (v == SEQ_START_TOKEN) if (v == SEQ_START_TOKEN) {
seq_puts(seq, "IP address HW type Flags " seq_puts(seq, "IP address HW type Flags "
"HW address Mask Device\n"); "HW address Mask Device\n");
else { } else {
struct arp_iter_state* state = seq->private; struct neigh_seq_state *state = seq->private;
if (state->is_pneigh) if (state->flags & NEIGH_SEQ_IS_PNEIGH)
arp_format_pneigh_entry(seq, v); arp_format_pneigh_entry(seq, v);
else else
arp_format_neigh_entry(seq, v); arp_format_neigh_entry(seq, v);
...@@ -1485,12 +1327,20 @@ static int arp_seq_show(struct seq_file *seq, void *v) ...@@ -1485,12 +1327,20 @@ static int arp_seq_show(struct seq_file *seq, void *v)
return 0; return 0;
} }
static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
{
/* Don't want to confuse "arp -a" w/ magic entries,
* so we tell the generic iterator to skip NUD_NOARP.
*/
return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_SKIP_NOARP);
}
/* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */
static struct seq_operations arp_seq_ops = { static struct seq_operations arp_seq_ops = {
.start = arp_seq_start, .start = arp_seq_start,
.next = arp_seq_next, .next = neigh_seq_next,
.stop = arp_seq_stop, .stop = neigh_seq_stop,
.show = arp_seq_show, .show = arp_seq_show,
}; };
...@@ -1498,11 +1348,12 @@ static int arp_seq_open(struct inode *inode, struct file *file) ...@@ -1498,11 +1348,12 @@ static int arp_seq_open(struct inode *inode, struct file *file)
{ {
struct seq_file *seq; struct seq_file *seq;
int rc = -ENOMEM; int rc = -ENOMEM;
struct arp_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); struct neigh_seq_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
goto out; goto out;
memset(s, 0, sizeof(*s));
rc = seq_open(file, &arp_seq_ops); rc = seq_open(file, &arp_seq_ops);
if (rc) if (rc)
goto out_kfree; goto out_kfree;
......
...@@ -438,17 +438,15 @@ static struct fib_alias *fib_find_alias(struct fib_node *fn, u8 tos, u32 prio) ...@@ -438,17 +438,15 @@ static struct fib_alias *fib_find_alias(struct fib_node *fn, u8 tos, u32 prio)
{ {
if (fn) { if (fn) {
struct list_head *head = &fn->fn_alias; struct list_head *head = &fn->fn_alias;
struct fib_alias *fa, *prev_fa; struct fib_alias *fa;
prev_fa = NULL;
list_for_each_entry(fa, head, fa_list) { list_for_each_entry(fa, head, fa_list) {
if (fa->fa_tos != tos) if (fa->fa_tos > tos)
continue; continue;
prev_fa = fa; if (fa->fa_info->fib_priority >= prio ||
if (prio <= fa->fa_info->fib_priority) fa->fa_tos < tos)
break; return fa;
} }
return prev_fa;
} }
return NULL; return NULL;
} }
...@@ -505,7 +503,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -505,7 +503,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
* and we need to allocate a new one of those as well. * and we need to allocate a new one of those as well.
*/ */
if (fa && if (fa && fa->fa_tos == tos &&
fa->fa_info->fib_priority == fi->fib_priority) { fa->fa_info->fib_priority == fi->fib_priority) {
struct fib_alias *fa_orig; struct fib_alias *fa_orig;
...@@ -537,7 +535,8 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -537,7 +535,8 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
* information. * information.
*/ */
fa_orig = fa; fa_orig = fa;
list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) { fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
if (fa->fa_tos != tos) if (fa->fa_tos != tos)
break; break;
if (fa->fa_info->fib_priority != fi->fib_priority) if (fa->fa_info->fib_priority != fi->fib_priority)
...@@ -585,7 +584,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -585,7 +584,7 @@ fn_hash_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
write_lock_bh(&fib_hash_lock); write_lock_bh(&fib_hash_lock);
if (new_f) if (new_f)
fib_insert_node(fz, new_f); fib_insert_node(fz, new_f);
list_add(&new_fa->fa_list, list_add_tail(&new_fa->fa_list,
(fa ? &fa->fa_list : &f->fn_alias)); (fa ? &fa->fa_list : &f->fn_alias));
write_unlock_bh(&fib_hash_lock); write_unlock_bh(&fib_hash_lock);
...@@ -611,7 +610,6 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -611,7 +610,6 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
struct fn_hash *table = (struct fn_hash*)tb->tb_data; struct fn_hash *table = (struct fn_hash*)tb->tb_data;
struct fib_node *f; struct fib_node *f;
struct fib_alias *fa, *fa_to_delete; struct fib_alias *fa, *fa_to_delete;
struct list_head *fa_head;
int z = r->rtm_dst_len; int z = r->rtm_dst_len;
struct fn_zone *fz; struct fn_zone *fz;
u32 key; u32 key;
...@@ -637,8 +635,8 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, ...@@ -637,8 +635,8 @@ fn_hash_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
return -ESRCH; return -ESRCH;
fa_to_delete = NULL; fa_to_delete = NULL;
fa_head = fa->fa_list.prev; fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
list_for_each_entry(fa, fa_head, fa_list) { list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
struct fib_info *fi = fa->fa_info; struct fib_info *fi = fa->fa_info;
if (fa->fa_tos != tos) if (fa->fa_tos != tos)
......
...@@ -41,6 +41,12 @@ static struct sock *tcpnl; ...@@ -41,6 +41,12 @@ static struct sock *tcpnl;
rta->rta_len = rtalen; \ rta->rta_len = rtalen; \
RTA_DATA(rta); }) RTA_DATA(rta); })
static inline unsigned int jiffies_to_usecs(const unsigned long j)
{
return 1000*jiffies_to_msecs(j);
}
/* Return information about state of tcp endpoint in API format. */ /* Return information about state of tcp endpoint in API format. */
void tcp_get_info(struct sock *sk, struct tcp_info *info) void tcp_get_info(struct sock *sk, struct tcp_info *info)
{ {
...@@ -68,8 +74,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -68,8 +74,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
if (tp->ecn_flags&TCP_ECN_OK) if (tp->ecn_flags&TCP_ECN_OK)
info->tcpi_options |= TCPI_OPT_ECN; info->tcpi_options |= TCPI_OPT_ECN;
info->tcpi_rto = (1000000*tp->rto)/HZ; info->tcpi_rto = jiffies_to_usecs(tp->rto);
info->tcpi_ato = (1000000*tp->ack.ato)/HZ; info->tcpi_ato = jiffies_to_usecs(tp->ack.ato);
info->tcpi_snd_mss = tp->mss_cache_std; info->tcpi_snd_mss = tp->mss_cache_std;
info->tcpi_rcv_mss = tp->ack.rcv_mss; info->tcpi_rcv_mss = tp->ack.rcv_mss;
...@@ -79,20 +85,20 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -79,20 +85,20 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tcp_get_pcount(&tp->retrans_out); info->tcpi_retrans = tcp_get_pcount(&tp->retrans_out);
info->tcpi_fackets = tcp_get_pcount(&tp->fackets_out); info->tcpi_fackets = tcp_get_pcount(&tp->fackets_out);
info->tcpi_last_data_sent = ((now - tp->lsndtime)*1000)/HZ; info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = ((now - tp->ack.lrcvtime)*1000)/HZ; info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime);
info->tcpi_last_ack_recv = ((now - tp->rcv_tstamp)*1000)/HZ; info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
info->tcpi_pmtu = tp->pmtu_cookie; info->tcpi_pmtu = tp->pmtu_cookie;
info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
info->tcpi_rtt = ((1000000*tp->srtt)/HZ)>>3; info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
info->tcpi_rttvar = ((1000000*tp->mdev)/HZ)>>2; info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
info->tcpi_snd_ssthresh = tp->snd_ssthresh; info->tcpi_snd_ssthresh = tp->snd_ssthresh;
info->tcpi_snd_cwnd = tp->snd_cwnd; info->tcpi_snd_cwnd = tp->snd_cwnd;
info->tcpi_advmss = tp->advmss; info->tcpi_advmss = tp->advmss;
info->tcpi_reordering = tp->reordering; info->tcpi_reordering = tp->reordering;
info->tcpi_rcv_rtt = ((1000000*tp->rcv_rtt_est.rtt)/HZ)>>3; info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
info->tcpi_rcv_space = tp->rcvq_space.space; info->tcpi_rcv_space = tp->rcvq_space.space;
} }
...@@ -116,7 +122,8 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -116,7 +122,8 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
if (ext & (1<<(TCPDIAG_INFO-1))) if (ext & (1<<(TCPDIAG_INFO-1)))
info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info)); info = TCPDIAG_PUT(skb, TCPDIAG_INFO, sizeof(*info));
if (tcp_is_vegas(tp) && (ext & (1<<(TCPDIAG_VEGASINFO-1)))) if ((tcp_is_westwood(tp) || tcp_is_vegas(tp))
&& (ext & (1<<(TCPDIAG_VEGASINFO-1))))
vinfo = TCPDIAG_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*vinfo)); vinfo = TCPDIAG_PUT(skb, TCPDIAG_VEGASINFO, sizeof(*vinfo));
} }
r->tcpdiag_family = sk->sk_family; r->tcpdiag_family = sk->sk_family;
...@@ -209,10 +216,17 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, ...@@ -209,10 +216,17 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
tcp_get_info(sk, info); tcp_get_info(sk, info);
if (vinfo) { if (vinfo) {
if (tcp_is_vegas(tp)) {
vinfo->tcpv_enabled = tp->vegas.doing_vegas_now; vinfo->tcpv_enabled = tp->vegas.doing_vegas_now;
vinfo->tcpv_rttcnt = tp->vegas.cntRTT; vinfo->tcpv_rttcnt = tp->vegas.cntRTT;
vinfo->tcpv_rtt = tp->vegas.baseRTT; vinfo->tcpv_rtt = jiffies_to_usecs(tp->vegas.baseRTT);
vinfo->tcpv_minrtt = tp->vegas.minRTT; vinfo->tcpv_minrtt = jiffies_to_usecs(tp->vegas.minRTT);
} else {
vinfo->tcpv_enabled = 0;
vinfo->tcpv_rttcnt = 0;
vinfo->tcpv_rtt = jiffies_to_usecs(tp->westwood.rtt);
vinfo->tcpv_minrtt = jiffies_to_usecs(tp->westwood.rtt_min);
}
} }
nlh->nlmsg_len = skb->tail - b; nlh->nlmsg_len = skb->tail - b;
......
...@@ -555,17 +555,20 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b ...@@ -555,17 +555,20 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b
tcp_grow_window(sk, tp, skb); tcp_grow_window(sk, tp, skb);
} }
/* Set up a new TCP connection, depending on whether it should be /* When starting a new connection, pin down the current choice of
* using Vegas or not. * congestion algorithm.
*/ */
void tcp_vegas_init(struct tcp_opt *tp) void tcp_ca_init(struct tcp_opt *tp)
{ {
if (sysctl_tcp_vegas_cong_avoid) { if (sysctl_tcp_westwood)
tp->vegas.do_vegas = 1; tp->adv_cong = TCP_WESTWOOD;
else if (sysctl_tcp_bic)
tp->adv_cong = TCP_BIC;
else if (sysctl_tcp_vegas_cong_avoid) {
tp->adv_cong = TCP_VEGAS;
tp->vegas.baseRTT = 0x7fffffff; tp->vegas.baseRTT = 0x7fffffff;
tcp_vegas_enable(tp); tcp_vegas_enable(tp);
} else }
tcp_vegas_disable(tp);
} }
/* Do RTT sampling needed for Vegas. /* Do RTT sampling needed for Vegas.
...@@ -2039,7 +2042,7 @@ tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt) ...@@ -2039,7 +2042,7 @@ tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt)
static inline __u32 bictcp_cwnd(struct tcp_opt *tp) static inline __u32 bictcp_cwnd(struct tcp_opt *tp)
{ {
/* orignal Reno behaviour */ /* orignal Reno behaviour */
if (!sysctl_tcp_bic) if (!tcp_is_bic(tp))
return tp->snd_cwnd; return tp->snd_cwnd;
if (tp->bictcp.last_cwnd == tp->snd_cwnd && if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
...@@ -2617,18 +2620,16 @@ static void westwood_filter(struct sock *sk, __u32 delta) ...@@ -2617,18 +2620,16 @@ static void westwood_filter(struct sock *sk, __u32 delta)
* WESTWOOD_RTT_MIN minimum bound since we could be on a LAN! * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
*/ */
static inline __u32 westwood_update_rttmin(struct sock *sk) static inline __u32 westwood_update_rttmin(const struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); const struct tcp_opt *tp = tcp_sk(sk);
__u32 rttmin = tp->westwood.rtt_min; __u32 rttmin = tp->westwood.rtt_min;
if (tp->westwood.rtt == 0) if (tp->westwood.rtt != 0 &&
return(rttmin); (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin))
if (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)
rttmin = tp->westwood.rtt; rttmin = tp->westwood.rtt;
return(rttmin); return rttmin;
} }
/* /*
...@@ -2636,11 +2637,11 @@ static inline __u32 westwood_update_rttmin(struct sock *sk) ...@@ -2636,11 +2637,11 @@ static inline __u32 westwood_update_rttmin(struct sock *sk)
* Evaluate increases for dk. * Evaluate increases for dk.
*/ */
static inline __u32 westwood_acked(struct sock *sk) static inline __u32 westwood_acked(const struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); const struct tcp_opt *tp = tcp_sk(sk);
return ((tp->snd_una) - (tp->westwood.snd_una)); return tp->snd_una - tp->westwood.snd_una;
} }
/* /*
...@@ -2652,9 +2653,9 @@ static inline __u32 westwood_acked(struct sock *sk) ...@@ -2652,9 +2653,9 @@ static inline __u32 westwood_acked(struct sock *sk)
* window, 1 if the sample has to be considered in the next window. * window, 1 if the sample has to be considered in the next window.
*/ */
static int westwood_new_window(struct sock *sk) static int westwood_new_window(const struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); const struct tcp_opt *tp = tcp_sk(sk);
__u32 left_bound; __u32 left_bound;
__u32 rtt; __u32 rtt;
int ret = 0; int ret = 0;
...@@ -2688,14 +2689,13 @@ static void __westwood_update_window(struct sock *sk, __u32 now) ...@@ -2688,14 +2689,13 @@ static void __westwood_update_window(struct sock *sk, __u32 now)
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
__u32 delta = now - tp->westwood.rtt_win_sx; __u32 delta = now - tp->westwood.rtt_win_sx;
if (!delta) if (delta) {
return;
if (tp->westwood.rtt) if (tp->westwood.rtt)
westwood_filter(sk, delta); westwood_filter(sk, delta);
tp->westwood.bk = 0; tp->westwood.bk = 0;
tp->westwood.rtt_win_sx = tcp_time_stamp; tp->westwood.rtt_win_sx = tcp_time_stamp;
}
} }
...@@ -2739,7 +2739,7 @@ static void westwood_dupack_update(struct sock *sk) ...@@ -2739,7 +2739,7 @@ static void westwood_dupack_update(struct sock *sk)
static inline int westwood_may_change_cumul(struct tcp_opt *tp) static inline int westwood_may_change_cumul(struct tcp_opt *tp)
{ {
return ((tp->westwood.cumul_ack) > tp->mss_cache_std); return (tp->westwood.cumul_ack > tp->mss_cache_std);
} }
static inline void westwood_partial_update(struct tcp_opt *tp) static inline void westwood_partial_update(struct tcp_opt *tp)
...@@ -2760,7 +2760,7 @@ static inline void westwood_complete_update(struct tcp_opt *tp) ...@@ -2760,7 +2760,7 @@ static inline void westwood_complete_update(struct tcp_opt *tp)
* delayed or partial acks. * delayed or partial acks.
*/ */
static __u32 westwood_acked_count(struct sock *sk) static inline __u32 westwood_acked_count(struct sock *sk)
{ {
struct tcp_opt *tp = tcp_sk(sk); struct tcp_opt *tp = tcp_sk(sk);
...@@ -2774,7 +2774,7 @@ static __u32 westwood_acked_count(struct sock *sk) ...@@ -2774,7 +2774,7 @@ static __u32 westwood_acked_count(struct sock *sk)
if (westwood_may_change_cumul(tp)) { if (westwood_may_change_cumul(tp)) {
/* Partial or delayed ack */ /* Partial or delayed ack */
if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack)) if (tp->westwood.accounted >= tp->westwood.cumul_ack)
westwood_partial_update(tp); westwood_partial_update(tp);
else else
westwood_complete_update(tp); westwood_complete_update(tp);
......
...@@ -841,7 +841,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, ...@@ -841,7 +841,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
if (newtp->ecn_flags&TCP_ECN_OK) if (newtp->ecn_flags&TCP_ECN_OK)
newsk->sk_no_largesend = 1; newsk->sk_no_largesend = 1;
tcp_vegas_init(newtp); tcp_ca_init(newtp);
TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
} }
return newsk; return newsk;
......
...@@ -1359,7 +1359,7 @@ static inline void tcp_connect_init(struct sock *sk) ...@@ -1359,7 +1359,7 @@ static inline void tcp_connect_init(struct sock *sk)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
tp->advmss = dst_metric(dst, RTAX_ADVMSS); tp->advmss = dst_metric(dst, RTAX_ADVMSS);
tcp_initialize_rcv_mss(sk); tcp_initialize_rcv_mss(sk);
tcp_vegas_init(tp); tcp_ca_init(tp);
tcp_select_initial_window(tcp_full_space(sk), tcp_select_initial_window(tcp_full_space(sk),
tp->advmss - (tp->ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), tp->advmss - (tp->ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
...@@ -1411,7 +1411,7 @@ int tcp_connect(struct sock *sk) ...@@ -1411,7 +1411,7 @@ int tcp_connect(struct sock *sk)
TCP_SKB_CB(buff)->end_seq = tp->write_seq; TCP_SKB_CB(buff)->end_seq = tp->write_seq;
tp->snd_nxt = tp->write_seq; tp->snd_nxt = tp->write_seq;
tp->pushed_seq = tp->write_seq; tp->pushed_seq = tp->write_seq;
tcp_vegas_init(tp); tcp_ca_init(tp);
/* Send it off. */ /* Send it off. */
TCP_SKB_CB(buff)->when = tcp_time_stamp; TCP_SKB_CB(buff)->when = tcp_time_stamp;
......
...@@ -128,6 +128,9 @@ static struct timer_list addr_chk_timer = ...@@ -128,6 +128,9 @@ static struct timer_list addr_chk_timer =
TIMER_INITIALIZER(addrconf_verify, 0, 0); TIMER_INITIALIZER(addrconf_verify, 0, 0);
static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED; static spinlock_t addrconf_verify_lock = SPIN_LOCK_UNLOCKED;
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
static int addrconf_ifdown(struct net_device *dev, int how); static int addrconf_ifdown(struct net_device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags); static void addrconf_dad_start(struct inet6_ifaddr *ifp, int flags);
...@@ -419,32 +422,27 @@ static void dev_forward_change(struct inet6_dev *idev) ...@@ -419,32 +422,27 @@ static void dev_forward_change(struct inet6_dev *idev)
ipv6_dev_mc_dec(dev, &addr); ipv6_dev_mc_dec(dev, &addr);
} }
for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
ipv6_addr_prefix(&addr, &ifa->addr, ifa->prefix_len);
if (ipv6_addr_any(&addr))
continue;
if (idev->cnf.forwarding) if (idev->cnf.forwarding)
ipv6_dev_ac_inc(idev->dev, &addr); addrconf_join_anycast(ifa);
else else
ipv6_dev_ac_dec(idev->dev, &addr); addrconf_leave_anycast(ifa);
} }
} }
static void addrconf_forward_change(struct inet6_dev *idev) static void addrconf_forward_change(void)
{ {
struct net_device *dev; struct net_device *dev;
struct inet6_dev *idev;
if (idev) {
dev_forward_change(idev);
return;
}
read_lock(&dev_base_lock); read_lock(&dev_base_lock);
for (dev=dev_base; dev; dev=dev->next) { for (dev=dev_base; dev; dev=dev->next) {
read_lock(&addrconf_lock); read_lock(&addrconf_lock);
idev = __in6_dev_get(dev); idev = __in6_dev_get(dev);
if (idev) { if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!ipv6_devconf.forwarding);
idev->cnf.forwarding = ipv6_devconf.forwarding; idev->cnf.forwarding = ipv6_devconf.forwarding;
if (changed)
dev_forward_change(idev); dev_forward_change(idev);
} }
read_unlock(&addrconf_lock); read_unlock(&addrconf_lock);
...@@ -1062,17 +1060,34 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr) ...@@ -1062,17 +1060,34 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
ipv6_dev_mc_inc(dev, &maddr); ipv6_dev_mc_inc(dev, &maddr);
} }
void addrconf_leave_solict(struct net_device *dev, struct in6_addr *addr) void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr)
{ {
struct in6_addr maddr; struct in6_addr maddr;
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return; return;
addrconf_addr_solict_mult(addr, &maddr); addrconf_addr_solict_mult(addr, &maddr);
ipv6_dev_mc_dec(dev, &maddr); __ipv6_dev_mc_dec(idev, &maddr);
}
void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
} }
void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
__ipv6_dev_ac_dec(ifp->idev, &addr);
}
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{ {
...@@ -2225,14 +2240,6 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) ...@@ -2225,14 +2240,6 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval); addrconf_mod_timer(ifp, AC_RS, ifp->idev->cnf.rtr_solicit_interval);
spin_unlock_bh(&ifp->lock); spin_unlock_bh(&ifp->lock);
} }
if (ifp->idev->cnf.forwarding) {
struct in6_addr addr;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (!ipv6_addr_any(&addr))
ipv6_dev_ac_inc(ifp->idev->dev, &addr);
}
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
...@@ -2994,16 +3001,13 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) ...@@ -2994,16 +3001,13 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
dst_hold(&ifp->rt->u.dst); dst_hold(&ifp->rt->u.dst);
if (ip6_ins_rt(ifp->rt, NULL, NULL)) if (ip6_ins_rt(ifp->rt, NULL, NULL))
dst_release(&ifp->rt->u.dst); dst_release(&ifp->rt->u.dst);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
break; break;
case RTM_DELADDR: case RTM_DELADDR:
addrconf_leave_solict(ifp->idev->dev, &ifp->addr); if (ifp->idev->cnf.forwarding)
if (ifp->idev->cnf.forwarding) { addrconf_leave_anycast(ifp);
struct in6_addr addr; addrconf_leave_solict(ifp->idev, &ifp->addr);
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (!ipv6_addr_any(&addr))
ipv6_dev_ac_dec(ifp->idev->dev, &addr);
}
dst_hold(&ifp->rt->u.dst); dst_hold(&ifp->rt->u.dst);
if (ip6_del_rt(ifp->rt, NULL, NULL)) if (ip6_del_rt(ifp->rt, NULL, NULL))
dst_free(&ifp->rt->u.dst); dst_free(&ifp->rt->u.dst);
...@@ -3025,18 +3029,18 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp, ...@@ -3025,18 +3029,18 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos); ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (write && *valp != val && valp != &ipv6_devconf_dflt.forwarding) { if (write && valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev = NULL;
if (valp != &ipv6_devconf.forwarding) { if (valp != &ipv6_devconf.forwarding) {
idev = (struct inet6_dev *)ctl->extra1; if ((!*valp) ^ (!val)) {
struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
if (idev == NULL) if (idev == NULL)
return ret; return ret;
} else dev_forward_change(idev);
}
} else {
ipv6_devconf_dflt.forwarding = ipv6_devconf.forwarding; ipv6_devconf_dflt.forwarding = ipv6_devconf.forwarding;
addrconf_forward_change();
addrconf_forward_change(idev); }
if (*valp) if (*valp)
rt6_purge_dflt_routers(0); rt6_purge_dflt_routers(0);
} }
...@@ -3077,15 +3081,19 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table, ...@@ -3077,15 +3081,19 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
} }
if (valp != &ipv6_devconf_dflt.forwarding) { if (valp != &ipv6_devconf_dflt.forwarding) {
struct inet6_dev *idev;
if (valp != &ipv6_devconf.forwarding) { if (valp != &ipv6_devconf.forwarding) {
idev = (struct inet6_dev *)table->extra1; struct inet6_dev *idev = (struct inet6_dev *)table->extra1;
int changed;
if (unlikely(idev == NULL)) if (unlikely(idev == NULL))
return -ENODEV; return -ENODEV;
} else changed = (!*valp) ^ (!new);
idev = NULL; *valp = new;
if (changed)
dev_forward_change(idev);
} else {
*valp = new; *valp = new;
addrconf_forward_change(idev); addrconf_forward_change();
}
if (*valp) if (*valp)
rt6_purge_dflt_routers(0); rt6_purge_dflt_routers(0);
......
...@@ -377,15 +377,10 @@ int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) ...@@ -377,15 +377,10 @@ int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr)
/* /*
* device anycast group decrement * device anycast group decrement
*/ */
int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
{ {
struct inet6_dev *idev;
struct ifacaddr6 *aca, *prev_aca; struct ifacaddr6 *aca, *prev_aca;
idev = in6_dev_get(dev);
if (idev == NULL)
return -ENODEV;
write_lock_bh(&idev->lock); write_lock_bh(&idev->lock);
prev_aca = NULL; prev_aca = NULL;
for (aca = idev->ac_list; aca; aca = aca->aca_next) { for (aca = idev->ac_list; aca; aca = aca->aca_next) {
...@@ -395,12 +390,10 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -395,12 +390,10 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
} }
if (!aca) { if (!aca) {
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENOENT; return -ENOENT;
} }
if (--aca->aca_users > 0) { if (--aca->aca_users > 0) {
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return 0; return 0;
} }
if (prev_aca) if (prev_aca)
...@@ -408,7 +401,7 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -408,7 +401,7 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
else else
idev->ac_list = aca->aca_next; idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
addrconf_leave_solict(dev, &aca->aca_addr); addrconf_leave_solict(idev, &aca->aca_addr);
dst_hold(&aca->aca_rt->u.dst); dst_hold(&aca->aca_rt->u.dst);
if (ip6_del_rt(aca->aca_rt, NULL, NULL)) if (ip6_del_rt(aca->aca_rt, NULL, NULL))
...@@ -417,10 +410,20 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -417,10 +410,20 @@ int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
dst_release(&aca->aca_rt->u.dst); dst_release(&aca->aca_rt->u.dst);
aca_put(aca); aca_put(aca);
in6_dev_put(idev);
return 0; return 0;
} }
int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
{
int ret;
struct inet6_dev *idev = in6_dev_get(dev);
if (idev == NULL)
return -ENODEV;
ret = __ipv6_dev_ac_dec(idev, addr);
in6_dev_put(idev);
return ret;
}
/* /*
* check if the interface has this anycast address * check if the interface has this anycast address
*/ */
......
...@@ -128,6 +128,8 @@ static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED; ...@@ -128,6 +128,8 @@ static rwlock_t ipv6_sk_mc_lock = RW_LOCK_UNLOCKED;
static struct socket *igmp6_socket; static struct socket *igmp6_socket;
int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr);
static void igmp6_join_group(struct ifmcaddr6 *ma); static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma); static void igmp6_leave_group(struct ifmcaddr6 *ma);
static void igmp6_timer_handler(unsigned long data); static void igmp6_timer_handler(unsigned long data);
...@@ -256,9 +258,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) ...@@ -256,9 +258,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
if (idev) { if (idev) {
(void) ip6_mc_leave_src(sk,mc_lst,idev); (void) ip6_mc_leave_src(sk,mc_lst,idev);
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
in6_dev_put(idev); in6_dev_put(idev);
} }
ipv6_dev_mc_dec(dev, &mc_lst->addr);
dev_put(dev); dev_put(dev);
} }
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
...@@ -322,9 +324,9 @@ void ipv6_sock_mc_close(struct sock *sk) ...@@ -322,9 +324,9 @@ void ipv6_sock_mc_close(struct sock *sk)
if (idev) { if (idev) {
(void) ip6_mc_leave_src(sk, mc_lst, idev); (void) ip6_mc_leave_src(sk, mc_lst, idev);
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
in6_dev_put(idev); in6_dev_put(idev);
} }
ipv6_dev_mc_dec(dev, &mc_lst->addr);
dev_put(dev); dev_put(dev);
} }
...@@ -870,7 +872,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr) ...@@ -870,7 +872,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, struct in6_addr *addr)
/* /*
* device multicast group del * device multicast group del
*/ */
static int __ipv6_dev_mc_dec(struct net_device *dev, struct inet6_dev *idev, struct in6_addr *addr) int __ipv6_dev_mc_dec(struct inet6_dev *idev, struct in6_addr *addr)
{ {
struct ifmcaddr6 *ma, **map; struct ifmcaddr6 *ma, **map;
...@@ -903,7 +905,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr) ...@@ -903,7 +905,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, struct in6_addr *addr)
if (!idev) if (!idev)
return -ENODEV; return -ENODEV;
err = __ipv6_dev_mc_dec(dev, idev, addr); err = __ipv6_dev_mc_dec(idev, addr);
in6_dev_put(idev); in6_dev_put(idev);
...@@ -2108,7 +2110,12 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev) ...@@ -2108,7 +2110,12 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
* addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
* fail. * fail.
*/ */
__ipv6_dev_mc_dec(idev->dev, idev, &maddr); __ipv6_dev_mc_dec(idev, &maddr);
if (idev->cnf.forwarding) {
ipv6_addr_all_routers(&maddr);
__ipv6_dev_mc_dec(idev, &maddr);
}
write_lock_bh(&idev->lock); write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) { while ((i = idev->mc_list) != NULL) {
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/icmpv6.h> #include <linux/icmpv6.h>
#include <linux/jhash.h>
#include <net/sock.h> #include <net/sock.h>
#include <net/snmp.h> #include <net/snmp.h>
...@@ -270,15 +271,14 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d ...@@ -270,15 +271,14 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d
static u32 ndisc_hash(const void *pkey, const struct net_device *dev) static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
{ {
u32 hash_val; const u32 *p32 = pkey;
u32 addr_hash, i;
hash_val = *(u32*)(pkey + sizeof(struct in6_addr) - 4); addr_hash = 0;
hash_val ^= (hash_val>>16); for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
hash_val ^= hash_val>>8; addr_hash ^= *p32++;
hash_val ^= hash_val>>3;
hash_val = (hash_val^dev->ifindex)&NEIGH_HASHMASK;
return hash_val; return jhash_2words(addr_hash, dev->ifindex, nd_tbl.hash_rnd);
} }
static int ndisc_constructor(struct neighbour *neigh) static int ndisc_constructor(struct neighbour *neigh)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment