Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
e35a3e9a
Commit
e35a3e9a
authored
Apr 04, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://linux-pnp.bkbits.net/pnp-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
e315468f
da334d91
Changes
46
Hide whitespace changes
Inline
Side-by-side
Showing
46 changed files
with
1894 additions
and
1523 deletions
+1894
-1523
arch/alpha/oprofile/common.c
arch/alpha/oprofile/common.c
+6
-0
arch/i386/Makefile
arch/i386/Makefile
+1
-1
arch/i386/kernel/smp.c
arch/i386/kernel/smp.c
+2
-1
arch/i386/oprofile/init.c
arch/i386/oprofile/init.c
+9
-0
arch/i386/oprofile/nmi_int.c
arch/i386/oprofile/nmi_int.c
+22
-2
arch/parisc/oprofile/init.c
arch/parisc/oprofile/init.c
+5
-0
arch/ppc64/oprofile/init.c
arch/ppc64/oprofile/init.c
+5
-0
arch/sparc/kernel/module.c
arch/sparc/kernel/module.c
+20
-1
arch/sparc64/kernel/module.c
arch/sparc64/kernel/module.c
+20
-1
arch/sparc64/oprofile/init.c
arch/sparc64/oprofile/init.c
+5
-0
drivers/acpi/osl.c
drivers/acpi/osl.c
+2
-2
drivers/char/ipmi/ipmi_kcs_intf.c
drivers/char/ipmi/ipmi_kcs_intf.c
+57
-12
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_msghandler.c
+23
-2
drivers/char/tty_io.c
drivers/char/tty_io.c
+2
-2
drivers/isdn/i4l/isdn_tty.c
drivers/isdn/i4l/isdn_tty.c
+1
-1
drivers/oprofile/oprof.c
drivers/oprofile/oprof.c
+1
-0
fs/fcntl.c
fs/fcntl.c
+2
-2
include/linux/oprofile.h
include/linux/oprofile.h
+5
-0
include/linux/sched.h
include/linux/sched.h
+6
-0
include/net/sctp/constants.h
include/net/sctp/constants.h
+3
-5
include/net/sctp/sctp.h
include/net/sctp/sctp.h
+58
-46
include/net/sctp/sm.h
include/net/sctp/sm.h
+6
-7
include/net/sctp/structs.h
include/net/sctp/structs.h
+71
-66
include/net/sctp/ulpevent.h
include/net/sctp/ulpevent.h
+3
-3
kernel/exit.c
kernel/exit.c
+1
-1
kernel/itimer.c
kernel/itimer.c
+1
-1
kernel/module.c
kernel/module.c
+0
-5
kernel/signal.c
kernel/signal.c
+93
-78
net/ipv4/ip_output.c
net/ipv4/ip_output.c
+4
-5
net/ipv4/netfilter/ip_nat_core.c
net/ipv4/netfilter/ip_nat_core.c
+0
-1
net/ipv6/ipv6_sockglue.c
net/ipv6/ipv6_sockglue.c
+7
-9
net/ipv6/netfilter/ip6_queue.c
net/ipv6/netfilter/ip6_queue.c
+1
-3
net/ipv6/proc.c
net/ipv6/proc.c
+1
-1
net/sctp/associola.c
net/sctp/associola.c
+32
-40
net/sctp/bind_addr.c
net/sctp/bind_addr.c
+15
-15
net/sctp/endpointola.c
net/sctp/endpointola.c
+7
-0
net/sctp/input.c
net/sctp/input.c
+117
-71
net/sctp/ipv6.c
net/sctp/ipv6.c
+149
-40
net/sctp/output.c
net/sctp/output.c
+45
-3
net/sctp/outqueue.c
net/sctp/outqueue.c
+27
-9
net/sctp/protocol.c
net/sctp/protocol.c
+26
-7
net/sctp/sm_make_chunk.c
net/sctp/sm_make_chunk.c
+53
-43
net/sctp/sm_sideeffect.c
net/sctp/sm_sideeffect.c
+911
-1007
net/sctp/sm_statefuns.c
net/sctp/sm_statefuns.c
+61
-25
net/sctp/socket.c
net/sctp/socket.c
+7
-5
net/sctp/ulpevent.c
net/sctp/ulpevent.c
+1
-0
No files found.
arch/alpha/oprofile/common.c
View file @
e35a3e9a
...
...
@@ -186,3 +186,9 @@ oprofile_arch_init(struct oprofile_operations **ops)
return
0
;
}
void
__exit
oprofile_arch_exit
(
void
)
{
}
arch/i386/Makefile
View file @
e35a3e9a
...
...
@@ -84,7 +84,7 @@ core-y += arch/i386/kernel/ \
arch
/i386/
$
(
mcore-y
)
/
drivers-$(CONFIG_MATH_EMULATION)
+=
arch
/i386/math-emu/
drivers-$(CONFIG_PCI)
+=
arch
/i386/pci/
#
FIXME: is drivers- right ?
#
must be linked after kernel/
drivers-$(CONFIG_OPROFILE)
+=
arch
/i386/oprofile/
CFLAGS
+=
$
(
mflags-y
)
...
...
arch/i386/kernel/smp.c
View file @
e35a3e9a
...
...
@@ -522,7 +522,8 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_lock
(
&
call_lock
);
call_data
=
&
data
;
wmb
();
mb
();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself
(
CALL_FUNCTION_VECTOR
);
...
...
arch/i386/oprofile/init.c
View file @
e35a3e9a
...
...
@@ -17,6 +17,7 @@
*/
extern
int
nmi_init
(
struct
oprofile_operations
**
ops
);
extern
void
nmi_exit
(
void
);
extern
void
timer_init
(
struct
oprofile_operations
**
ops
);
int
__init
oprofile_arch_init
(
struct
oprofile_operations
**
ops
)
...
...
@@ -27,3 +28,11 @@ int __init oprofile_arch_init(struct oprofile_operations ** ops)
timer_init
(
ops
);
return
0
;
}
void
__exit
oprofile_arch_exit
(
void
)
{
#ifdef CONFIG_X86_LOCAL_APIC
nmi_exit
();
#endif
}
arch/i386/oprofile/nmi_int.c
View file @
e35a3e9a
...
...
@@ -67,15 +67,22 @@ static struct device device_nmi = {
};
static
int
__init
init_
nmi_
driverfs
(
void
)
static
int
__init
init_driverfs
(
void
)
{
driver_register
(
&
nmi_driver
);
return
device_register
(
&
device_nmi
);
}
late_initcall
(
init_nmi_driverfs
);
static
void
__exit
exit_driverfs
(
void
)
{
device_unregister
(
&
device_nmi
);
driver_unregister
(
&
nmi_driver
);
}
#else
#define init_driverfs() do { } while (0)
#define exit_driverfs() do { } while (0)
#endif
/* CONFIG_PM */
...
...
@@ -297,6 +304,10 @@ static int __init ppro_init(void)
#endif
/* !CONFIG_X86_64 */
/* in order to get driverfs right */
static
int
using_nmi
;
int
__init
nmi_init
(
struct
oprofile_operations
**
ops
)
{
__u8
vendor
=
current_cpu_data
.
x86_vendor
;
...
...
@@ -339,7 +350,16 @@ int __init nmi_init(struct oprofile_operations ** ops)
return
0
;
}
init_driverfs
();
using_nmi
=
1
;
*
ops
=
&
nmi_ops
;
printk
(
KERN_INFO
"oprofile: using NMI interrupt.
\n
"
);
return
1
;
}
void
__exit
nmi_exit
(
void
)
{
if
(
using_nmi
)
exit_driverfs
();
}
arch/parisc/oprofile/init.c
View file @
e35a3e9a
...
...
@@ -18,3 +18,8 @@ int __init oprofile_arch_init(struct oprofile_operations ** ops)
timer_init
(
ops
);
return
0
;
}
void
__exit
oprofile_arch_exit
()
{
}
arch/ppc64/oprofile/init.c
View file @
e35a3e9a
...
...
@@ -18,3 +18,8 @@ int __init oprofile_arch_init(struct oprofile_operations ** ops)
timer_init
(
ops
);
return
0
;
}
void
__exit
oprofile_arch_exit
(
void
)
{
}
arch/sparc/kernel/module.c
View file @
e35a3e9a
...
...
@@ -36,12 +36,31 @@ void module_free(struct module *mod, void *module_region)
table entries. */
}
/*
We don't need anything special.
*/
/*
Make generic code ignore STT_REGISTER dummy undefined symbols.
*/
int
module_frob_arch_sections
(
Elf_Ehdr
*
hdr
,
Elf_Shdr
*
sechdrs
,
char
*
secstrings
,
struct
module
*
mod
)
{
unsigned
int
symidx
;
Elf32_Sym
*
sym
;
const
char
*
strtab
;
int
i
;
for
(
symidx
=
0
;
sechdrs
[
symidx
].
sh_type
!=
SHT_SYMTAB
;
symidx
++
)
{
if
(
symidx
==
hdr
->
e_shnum
-
1
)
{
printk
(
"%s: no symtab found.
\n
"
,
mod
->
name
);
return
-
ENOEXEC
;
}
}
sym
=
(
Elf32_Sym
*
)
sechdrs
[
symidx
].
sh_addr
;
strtab
=
(
char
*
)
sechdrs
[
sechdrs
[
symidx
].
sh_link
].
sh_addr
;
for
(
i
=
1
;
i
<
sechdrs
[
symidx
].
sh_size
/
sizeof
(
Elf_Sym
);
i
++
)
{
if
(
sym
[
i
].
st_shndx
==
SHN_UNDEF
&&
ELF32_ST_TYPE
(
sym
[
i
].
st_info
)
==
STT_REGISTER
)
sym
[
i
].
st_shndx
=
SHN_ABS
;
}
return
0
;
}
...
...
arch/sparc64/kernel/module.c
View file @
e35a3e9a
...
...
@@ -143,12 +143,31 @@ void module_free(struct module *mod, void *module_region)
table entries. */
}
/*
We don't need anything special.
*/
/*
Make generic code ignore STT_REGISTER dummy undefined symbols.
*/
int
module_frob_arch_sections
(
Elf_Ehdr
*
hdr
,
Elf_Shdr
*
sechdrs
,
char
*
secstrings
,
struct
module
*
mod
)
{
unsigned
int
symidx
;
Elf64_Sym
*
sym
;
const
char
*
strtab
;
int
i
;
for
(
symidx
=
0
;
sechdrs
[
symidx
].
sh_type
!=
SHT_SYMTAB
;
symidx
++
)
{
if
(
symidx
==
hdr
->
e_shnum
-
1
)
{
printk
(
"%s: no symtab found.
\n
"
,
mod
->
name
);
return
-
ENOEXEC
;
}
}
sym
=
(
Elf64_Sym
*
)
sechdrs
[
symidx
].
sh_addr
;
strtab
=
(
char
*
)
sechdrs
[
sechdrs
[
symidx
].
sh_link
].
sh_addr
;
for
(
i
=
1
;
i
<
sechdrs
[
symidx
].
sh_size
/
sizeof
(
Elf_Sym
);
i
++
)
{
if
(
sym
[
i
].
st_shndx
==
SHN_UNDEF
&&
ELF64_ST_TYPE
(
sym
[
i
].
st_info
)
==
STT_REGISTER
)
sym
[
i
].
st_shndx
=
SHN_ABS
;
}
return
0
;
}
...
...
arch/sparc64/oprofile/init.c
View file @
e35a3e9a
...
...
@@ -18,3 +18,8 @@ int __init oprofile_arch_init(struct oprofile_operations ** ops)
timer_init
(
ops
);
return
0
;
}
void
__exit
oprofile_arch_exit
(
void
)
{
}
drivers/acpi/osl.c
View file @
e35a3e9a
...
...
@@ -736,7 +736,7 @@ acpi_os_acquire_lock (
if
(
flags
&
ACPI_NOT_ISR
)
ACPI_DISABLE_IRQS
();
spin_lock
(
handle
);
spin_lock
(
(
spinlock_t
*
)
handle
);
return_VOID
;
}
...
...
@@ -755,7 +755,7 @@ acpi_os_release_lock (
ACPI_DEBUG_PRINT
((
ACPI_DB_MUTEX
,
"Releasing spinlock[%p] from %s level
\n
"
,
handle
,
((
flags
&
ACPI_NOT_ISR
)
?
"non-interrupt"
:
"interrupt"
)));
spin_unlock
(
handle
);
spin_unlock
(
(
spinlock_t
*
)
handle
);
if
(
flags
&
ACPI_NOT_ISR
)
ACPI_ENABLE_IRQS
();
...
...
drivers/char/ipmi/ipmi_kcs_intf.c
View file @
e35a3e9a
...
...
@@ -61,6 +61,14 @@
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
/* Timing parameters. Call every 10 ms when not doing anything,
otherwise call every KCS_SHORT_TIMEOUT_USEC microseconds. */
#define KCS_TIMEOUT_TIME_USEC 10000
#define KCS_USEC_PER_JIFFY (1000000/HZ)
#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
#define KCS_SHORT_TIMEOUT_USEC 250
/* .25ms when the SM request a
short timeout */
#ifdef CONFIG_IPMI_KCS
/* This forces a dependency to the config file for this option. */
#endif
...
...
@@ -132,6 +140,8 @@ struct kcs_info
int
interrupt_disabled
;
};
static
void
kcs_restart_short_timer
(
struct
kcs_info
*
kcs_info
);
static
void
deliver_recv_msg
(
struct
kcs_info
*
kcs_info
,
struct
ipmi_smi_msg
*
msg
)
{
/* Deliver the message to the upper layer with the lock
...
...
@@ -309,6 +319,9 @@ static void handle_transaction_done(struct kcs_info *kcs_info)
#endif
switch
(
kcs_info
->
kcs_state
)
{
case
KCS_NORMAL
:
if
(
!
kcs_info
->
curr_msg
)
break
;
kcs_info
->
curr_msg
->
rsp_size
=
kcs_get_result
(
kcs_info
->
kcs_sm
,
kcs_info
->
curr_msg
->
rsp
,
...
...
@@ -563,8 +576,9 @@ static void sender(void *send_info,
spin_lock_irqsave
(
&
(
kcs_info
->
kcs_lock
),
flags
);
result
=
kcs_event_handler
(
kcs_info
,
0
);
while
(
result
!=
KCS_SM_IDLE
)
{
udelay
(
500
);
result
=
kcs_event_handler
(
kcs_info
,
500
);
udelay
(
KCS_SHORT_TIMEOUT_USEC
);
result
=
kcs_event_handler
(
kcs_info
,
KCS_SHORT_TIMEOUT_USEC
);
}
spin_unlock_irqrestore
(
&
(
kcs_info
->
kcs_lock
),
flags
);
return
;
...
...
@@ -582,6 +596,7 @@ static void sender(void *send_info,
&&
(
kcs_info
->
curr_msg
==
NULL
))
{
start_next_msg
(
kcs_info
);
kcs_restart_short_timer
(
kcs_info
);
}
spin_unlock_irqrestore
(
&
(
kcs_info
->
kcs_lock
),
flags
);
}
...
...
@@ -598,8 +613,9 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
if
(
i_run_to_completion
)
{
result
=
kcs_event_handler
(
kcs_info
,
0
);
while
(
result
!=
KCS_SM_IDLE
)
{
udelay
(
500
);
result
=
kcs_event_handler
(
kcs_info
,
500
);
udelay
(
KCS_SHORT_TIMEOUT_USEC
);
result
=
kcs_event_handler
(
kcs_info
,
KCS_SHORT_TIMEOUT_USEC
);
}
}
...
...
@@ -613,14 +629,42 @@ static void request_events(void *send_info)
atomic_set
(
&
kcs_info
->
req_events
,
1
);
}
/* Call every 10 ms. */
#define KCS_TIMEOUT_TIME_USEC 10000
#define KCS_USEC_PER_JIFFY (1000000/HZ)
#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
#define KCS_SHORT_TIMEOUT_USEC 500
/* .5ms when the SM request a
short timeout */
static
int
initialized
=
0
;
/* Must be called with interrupts off and with the kcs_lock held. */
static
void
kcs_restart_short_timer
(
struct
kcs_info
*
kcs_info
)
{
if
(
del_timer
(
&
(
kcs_info
->
kcs_timer
)))
{
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned
long
jiffies_now
;
/* If we don't delete the timer, then it will go off
immediately, anyway. So we only process if we
actually delete the timer. */
/* We already have irqsave on, so no need for it
here. */
read_lock
(
&
xtime_lock
);
jiffies_now
=
jiffies
;
kcs_info
->
kcs_timer
.
expires
=
jiffies_now
;
kcs_info
->
kcs_timer
.
sub_expires
=
quick_update_jiffies_sub
(
jiffies_now
);
read_unlock
(
&
xtime_lock
);
kcs_info
->
kcs_timer
.
sub_expires
+=
usec_to_arch_cycles
(
KCS_SHORT_TIMEOUT_USEC
);
while
(
kcs_info
->
kcs_timer
.
sub_expires
>=
cycles_per_jiffies
)
{
kcs_info
->
kcs_timer
.
expires
++
;
kcs_info
->
kcs_timer
.
sub_expires
-=
cycles_per_jiffies
;
}
#else
kcs_info
->
kcs_timer
.
expires
=
jiffies
+
1
;
#endif
add_timer
(
&
(
kcs_info
->
kcs_timer
));
}
}
static
void
kcs_timeout
(
unsigned
long
data
)
{
struct
kcs_info
*
kcs_info
=
(
struct
kcs_info
*
)
data
;
...
...
@@ -643,12 +687,11 @@ static void kcs_timeout(unsigned long data)
printk
(
"**Timer: %d.%9.9d
\n
"
,
t
.
tv_sec
,
t
.
tv_usec
);
#endif
jiffies_now
=
jiffies
;
time_diff
=
((
jiffies_now
-
kcs_info
->
last_timeout_jiffies
)
*
KCS_USEC_PER_JIFFY
);
kcs_result
=
kcs_event_handler
(
kcs_info
,
time_diff
);
spin_unlock_irqrestore
(
&
(
kcs_info
->
kcs_lock
),
flags
);
kcs_info
->
last_timeout_jiffies
=
jiffies_now
;
if
((
kcs_info
->
irq
)
&&
(
!
kcs_info
->
interrupt_disabled
))
{
...
...
@@ -669,6 +712,7 @@ static void kcs_timeout(unsigned long data)
}
}
else
{
kcs_info
->
kcs_timer
.
expires
=
jiffies
+
KCS_TIMEOUT_JIFFIES
;
kcs_info
->
kcs_timer
.
sub_expires
=
0
;
}
#else
/* If requested, take the shortest delay possible */
...
...
@@ -681,6 +725,7 @@ static void kcs_timeout(unsigned long data)
do_add_timer:
add_timer
(
&
(
kcs_info
->
kcs_timer
));
spin_unlock_irqrestore
(
&
(
kcs_info
->
kcs_lock
),
flags
);
}
static
void
kcs_irq_handler
(
int
irq
,
void
*
data
,
struct
pt_regs
*
regs
)
...
...
drivers/char/ipmi/ipmi_msghandler.c
View file @
e35a3e9a
...
...
@@ -1765,9 +1765,13 @@ static void ipmi_timeout(unsigned long data)
}
static
atomic_t
smi_msg_inuse_count
=
ATOMIC_INIT
(
0
);
static
atomic_t
recv_msg_inuse_count
=
ATOMIC_INIT
(
0
);
/* FIXME - convert these to slabs. */
static
void
free_smi_msg
(
struct
ipmi_smi_msg
*
msg
)
{
atomic_dec
(
&
smi_msg_inuse_count
);
kfree
(
msg
);
}
...
...
@@ -1775,13 +1779,16 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
{
struct
ipmi_smi_msg
*
rv
;
rv
=
kmalloc
(
sizeof
(
struct
ipmi_smi_msg
),
GFP_ATOMIC
);
if
(
rv
)
if
(
rv
)
{
rv
->
done
=
free_smi_msg
;
atomic_inc
(
&
smi_msg_inuse_count
);
}
return
rv
;
}
static
void
free_recv_msg
(
struct
ipmi_recv_msg
*
msg
)
{
atomic_dec
(
&
recv_msg_inuse_count
);
kfree
(
msg
);
}
...
...
@@ -1790,8 +1797,10 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
struct
ipmi_recv_msg
*
rv
;
rv
=
kmalloc
(
sizeof
(
struct
ipmi_recv_msg
),
GFP_ATOMIC
);
if
(
rv
)
if
(
rv
)
{
rv
->
done
=
free_recv_msg
;
atomic_inc
(
&
recv_msg_inuse_count
);
}
return
rv
;
}
...
...
@@ -1924,6 +1933,8 @@ static __init int ipmi_init_msghandler(void)
static
__exit
void
cleanup_ipmi
(
void
)
{
int
count
;
if
(
!
initialized
)
return
;
...
...
@@ -1940,6 +1951,16 @@ static __exit void cleanup_ipmi(void)
}
initialized
=
0
;
/* Check for buffer leaks. */
count
=
atomic_read
(
&
smi_msg_inuse_count
);
if
(
count
!=
0
)
printk
(
"ipmi_msghandler: SMI message count %d at exit
\n
"
,
count
);
count
=
atomic_read
(
&
recv_msg_inuse_count
);
if
(
count
!=
0
)
printk
(
"ipmi_msghandler: recv message count %d at exit
\n
"
,
count
);
}
module_exit
(
cleanup_ipmi
);
...
...
drivers/char/tty_io.c
View file @
e35a3e9a
...
...
@@ -506,8 +506,8 @@ void do_tty_hangup(void *data)
p
->
tty
=
NULL
;
if
(
!
p
->
leader
)
continue
;
send_
sig
(
SIGHUP
,
p
,
1
);
send_
sig
(
SIGCONT
,
p
,
1
);
send_
group_sig_info
(
SIGHUP
,
SEND_SIG_PRIV
,
p
);
send_
group_sig_info
(
SIGCONT
,
SEND_SIG_PRIV
,
p
);
if
(
tty
->
pgrp
>
0
)
p
->
tty_old_pgrp
=
tty
->
pgrp
;
}
...
...
drivers/isdn/i4l/isdn_tty.c
View file @
e35a3e9a
...
...
@@ -2036,7 +2036,7 @@ modem_write_profile(atemu * m)
memcpy
(
m
->
pmsn
,
m
->
msn
,
ISDN_MSNLEN
);
memcpy
(
m
->
plmsn
,
m
->
lmsn
,
ISDN_LMSNLEN
);
if
(
dev
->
profd
)
send_sig
(
SIGIO
,
dev
->
profd
,
1
);
group_send_sig_info
(
SIGIO
,
SEND_SIG_PRIV
,
dev
->
profd
);
}
int
...
...
drivers/oprofile/oprof.c
View file @
e35a3e9a
...
...
@@ -148,6 +148,7 @@ static int __init oprofile_init(void)
static
void
__exit
oprofile_exit
(
void
)
{
oprofilefs_unregister
();
oprofile_arch_exit
();
}
...
...
fs/fcntl.c
View file @
e35a3e9a
...
...
@@ -465,7 +465,7 @@ static void send_sigio_to_task(struct task_struct *p,
break
;
/* fall-through: fall back on the old plain SIGIO signal */
case
0
:
send_
sig
(
SIGIO
,
p
,
1
);
send_
group_sig_info
(
SIGIO
,
SEND_SIG_PRIV
,
p
);
}
}
...
...
@@ -501,7 +501,7 @@ static void send_sigurg_to_task(struct task_struct *p,
struct
fown_struct
*
fown
)
{
if
(
sigio_perm
(
p
,
fown
))
send_
sig
(
SIGURG
,
p
,
1
);
send_
group_sig_info
(
SIGURG
,
SEND_SIG_PRIV
,
p
);
}
int
send_sigurg
(
struct
fown_struct
*
fown
)
...
...
include/linux/oprofile.h
View file @
e35a3e9a
...
...
@@ -45,6 +45,11 @@ struct oprofile_operations {
*/
int
oprofile_arch_init
(
struct
oprofile_operations
**
ops
);
/**
* One-time exit/cleanup for the arch.
*/
void
oprofile_arch_exit
(
void
);
/**
* Add a sample. This may be called from any context. Pass
* smp_processor_id() as cpu.
...
...
include/linux/sched.h
View file @
e35a3e9a
...
...
@@ -541,6 +541,7 @@ extern void block_all_signals(int (*notifier)(void *priv), void *priv,
extern
void
unblock_all_signals
(
void
);
extern
void
release_task
(
struct
task_struct
*
p
);
extern
int
send_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
send_group_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
force_sig_info
(
int
,
struct
siginfo
*
,
struct
task_struct
*
);
extern
int
__kill_pg_info
(
int
sig
,
struct
siginfo
*
info
,
pid_t
pgrp
);
extern
int
kill_pg_info
(
int
,
struct
siginfo
*
,
pid_t
);
...
...
@@ -558,6 +559,11 @@ extern int kill_proc(pid_t, int, int);
extern
int
do_sigaction
(
int
,
const
struct
k_sigaction
*
,
struct
k_sigaction
*
);
extern
int
do_sigaltstack
(
const
stack_t
*
,
stack_t
*
,
unsigned
long
);
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
/* True if we are on the alternate signal stack. */
static
inline
int
on_sig_stack
(
unsigned
long
sp
)
...
...
include/net/sctp/constants.h
View file @
e35a3e9a
...
...
@@ -138,12 +138,10 @@ typedef enum {
*/
typedef
union
{
sctp_cid_t
chunk
;
sctp_event_timeout_t
timeout
;
sctp_event_other_t
other
;
sctp_event_primitive_t
primitive
;
}
sctp_subtype_t
;
#define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \
...
...
@@ -421,9 +419,9 @@ typedef enum {
/* Reasons to retransmit. */
typedef
enum
{
SCTP_R
ETRANSMIT
_T3_RTX
,
SCTP_R
ETRANSMIT
_FAST_RTX
,
SCTP_R
ETRANSMIT_PMTU_DISCOVERY
,
SCTP_R
TXR
_T3_RTX
,
SCTP_R
TXR
_FAST_RTX
,
SCTP_R
TXR_PMTUD
,
}
sctp_retransmit_reason_t
;
/* Reasons to lower cwnd. */
...
...
include/net/sctp/sctp.h
View file @
e35a3e9a
...
...
@@ -123,14 +123,14 @@
*/
extern
struct
sctp_protocol
sctp_proto
;
extern
struct
sock
*
sctp_get_ctl_sock
(
void
);
extern
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
,
extern
int
sctp_copy_local_addr_list
(
struct
sctp_protocol
*
,
struct
sctp_bind_addr
*
,
sctp_scope_t
,
int
priority
,
int
flags
);
extern
struct
sctp_pf
*
sctp_get_pf_specific
(
sa_family_t
family
);
extern
int
sctp_register_pf
(
struct
sctp_pf
*
,
sa_family_t
);
/*
* sctp
_
socket.c
* sctp
/
socket.c
*/
extern
int
sctp_backlog_rcv
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
);
extern
int
sctp_inet_listen
(
struct
socket
*
sock
,
int
backlog
);
...
...
@@ -139,7 +139,7 @@ extern unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table
*
wait
);
/*
* sctp
_
primitive.c
* sctp
/
primitive.c
*/
extern
int
sctp_primitive_ASSOCIATE
(
sctp_association_t
*
,
void
*
arg
);
extern
int
sctp_primitive_SHUTDOWN
(
sctp_association_t
*
,
void
*
arg
);
...
...
@@ -148,14 +148,14 @@ extern int sctp_primitive_SEND(sctp_association_t *, void *arg);
extern
int
sctp_primitive_REQUESTHEARTBEAT
(
sctp_association_t
*
,
void
*
arg
);
/*
* sctp
_
crc32c.c
* sctp
/
crc32c.c
*/
extern
__u32
sctp_start_cksum
(
__u8
*
ptr
,
__u16
count
);
extern
__u32
sctp_update_cksum
(
__u8
*
ptr
,
__u16
count
,
__u32
cksum
);
extern
__u32
sctp_end_cksum
(
__u32
cksum
);
/*
* sctp
_
input.c
* sctp
/
input.c
*/
extern
int
sctp_rcv
(
struct
sk_buff
*
skb
);
extern
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
u32
info
);
...
...
@@ -170,9 +170,16 @@ extern void __sctp_unhash_endpoint(sctp_endpoint_t *);
extern
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
,
const
union
sctp_addr
*
,
struct
sctp_transport
**
);
extern
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
,
struct
sctphdr
*
,
struct
sctp_endpoint
**
,
struct
sctp_association
**
,
struct
sctp_transport
**
);
extern
void
sctp_err_finish
(
struct
sock
*
,
struct
sctp_endpoint
*
,
struct
sctp_association
*
);
extern
void
sctp_icmp_frag_needed
(
struct
sock
*
,
struct
sctp_association
*
,
struct
sctp_transport
*
t
,
__u32
pmtu
);
/*
* sctp
_
hashdriver.c
* sctp
/
hashdriver.c
*/
extern
void
sctp_hash_digest
(
const
char
*
secret
,
const
int
secret_len
,
const
char
*
text
,
const
int
text_len
,
...
...
@@ -184,9 +191,7 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#ifdef TEST_FRAME
#include <test_frame.h>
#else
/* spin lock wrappers. */
...
...
@@ -194,28 +199,28 @@ extern void sctp_hash_digest(const char *secret, const int secret_len,
#define sctp_spin_unlock_irqrestore(lock, flags) \
spin_unlock_irqrestore(lock, flags)
#define sctp_local_bh_disable() local_bh_disable()
#define sctp_local_bh_enable() local_bh_enable()
#define sctp_spin_lock(lock) spin_lock(lock)
#define sctp_spin_unlock(lock) spin_unlock(lock)
#define sctp_write_lock(lock) write_lock(lock)
#define sctp_local_bh_enable()
local_bh_enable()
#define sctp_spin_lock(lock)
spin_lock(lock)
#define sctp_spin_unlock(lock)
spin_unlock(lock)
#define sctp_write_lock(lock)
write_lock(lock)
#define sctp_write_unlock(lock) write_unlock(lock)
#define sctp_read_lock(lock) read_lock(lock)
#define sctp_read_unlock(lock) read_unlock(lock)
#define sctp_read_lock(lock)
read_lock(lock)
#define sctp_read_unlock(lock)
read_unlock(lock)
/* sock lock wrappers. */
#define sctp_lock_sock(sk) lock_sock(sk)
#define sctp_release_sock(sk) release_sock(sk)
#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
#define SCTP_SOCK_SLEEP_PRE(sk) SOCK_SLEEP_PRE(sk)
#define sctp_lock_sock(sk)
lock_sock(sk)
#define sctp_release_sock(sk)
release_sock(sk)
#define sctp_bh_lock_sock(sk)
bh_lock_sock(sk)
#define sctp_bh_unlock_sock(sk)
bh_unlock_sock(sk)
#define SCTP_SOCK_SLEEP_PRE(sk)
SOCK_SLEEP_PRE(sk)
#define SCTP_SOCK_SLEEP_POST(sk) SOCK_SLEEP_POST(sk)
/* SCTP SNMP MIB stats handlers */
DECLARE_SNMP_STAT
(
struct
sctp_mib
,
sctp_statistics
);
#define SCTP_INC_STATS(field)
SNMP_INC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS_BH(field)
SNMP_INC_STATS_BH(sctp_statistics, field)
#define SCTP_INC_STATS_USER(field)
SNMP_INC_STATS_USER(sctp_statistics, field)
#define SCTP_DEC_STATS(field)
SNMP_DEC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS(field)
SNMP_INC_STATS(sctp_statistics, field)
#define SCTP_INC_STATS_BH(field)
SNMP_INC_STATS_BH(sctp_statistics, field)
#define SCTP_INC_STATS_USER(field)
SNMP_INC_STATS_USER(sctp_statistics, field)
#define SCTP_DEC_STATS(field)
SNMP_DEC_STATS(sctp_statistics, field)
/* Determine if this is a valid kernel address. */
static
inline
int
sctp_is_valid_kaddr
(
unsigned
long
addr
)
...
...
@@ -312,7 +317,6 @@ static inline void sctp_sysctl_register(void) { return; }
static
inline
void
sctp_sysctl_unregister
(
void
)
{
return
;
}
#endif
/* Size of Supported Address Parameter for 'x' address types. */
#define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16))
...
...
@@ -320,19 +324,15 @@ static inline void sctp_sysctl_unregister(void) { return; }
extern
int
sctp_v6_init
(
void
);
extern
void
sctp_v6_exit
(
void
);
static
inline
int
sctp_ipv6_addr_type
(
const
struct
in6_addr
*
addr
)
{
return
ipv6_addr_type
((
struct
in6_addr
*
)
addr
);
}
extern
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
);
#else
/* #ifdef defined(CONFIG_IPV6)
|| defined(CONFIG_IPV6_MODULE)
*/
#else
/* #ifdef defined(CONFIG_IPV6) */
#define sctp_ipv6_addr_type(a) 0
static
inline
int
sctp_v6_init
(
void
)
{
return
0
;
}
static
inline
void
sctp_v6_exit
(
void
)
{
return
;
}
#endif
/* #ifdef defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#endif
/* #if defined(CONFIG_IPV6) */
/* Map an association to an assoc_id. */
static
inline
sctp_assoc_t
sctp_assoc2id
(
const
sctp_association_t
*
asoc
)
...
...
@@ -414,13 +414,22 @@ static inline __s32 sctp_jitter(__u32 rto)
sctp_rand
^=
(
sctp_rand
<<
12
);
sctp_rand
^=
(
sctp_rand
>>
20
);
/* Choose random number from 0 to rto, then move to -50% ~ +50%
* of rto.
/* Choose random number from 0 to rto, then move to -50% ~ +50%
* of rto.
*/
ret
=
sctp_rand
%
rto
-
(
rto
>>
1
);
return
ret
;
}
/* Break down data chunks at this point. */
static
inline
int
sctp_frag_point
(
int
pmtu
)
{
pmtu
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
pmtu
-=
sizeof
(
struct
sctp_sack_chunk
);
return
pmtu
;
}
/* Walk through a list of TLV parameters. Don't trust the
* individual parameter lengths and instead depend on
* the chunk length to indicate when to stop. Make sure
...
...
@@ -479,21 +488,24 @@ static inline struct sctp_protocol *sctp_get_protocol(void)
/* Convert from an IP version number to an Address Family symbol. */
static
inline
int
ipver2af
(
__u8
ipver
)
{
int
family
;
switch
(
ipver
)
{
case
4
:
family
=
AF_INET
;
break
;
return
AF_INET
;
case
6
:
family
=
AF_INET6
;
break
;
return
AF_INET6
;
default:
family
=
0
;
break
;
return
0
;
};
}
return
family
;
/* Perform some sanity checks. */
static
inline
int
sctp_sanity_check
(
void
)
{
SCTP_ASSERT
(
sizeof
(
struct
sctp_ulpevent
)
<=
sizeof
(((
struct
sk_buff
*
)
0
)
->
cb
),
"SCTP: ulpevent does not fit in skb!
\n
"
,
return
0
);
return
1
;
}
/* Warning: The following hash functions assume a power of two 'size'. */
...
...
@@ -537,7 +549,7 @@ struct sctp_sock {
struct
sock
sk
;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct
ipv6_pinfo
*
pinet6
;
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
struct
inet_opt
inet
;
struct
sctp_opt
sctp
;
};
...
...
@@ -550,7 +562,7 @@ struct sctp6_sock {
struct
sctp_opt
sctp
;
struct
ipv6_pinfo
inet6
;
};
#endif
/* CONFIG_IPV6
|| CONFIG_IPV6_MODULE
*/
#endif
/* CONFIG_IPV6 */
#define sctp_sk(__sk) (&((struct sctp_sock *)__sk)->sctp)
...
...
include/net/sctp/sm.h
View file @
e35a3e9a
...
...
@@ -197,15 +197,14 @@ sctp_state_fn_t sctp_addip_do_asconf;
sctp_state_fn_t
sctp_addip_do_asconf_ack
;
/* Prototypes for utility support functions. */
__u8
sctp_get_chunk_type
(
s
ctp_chunk_t
*
chunk
);
__u8
sctp_get_chunk_type
(
s
truct
sctp_chunk
*
chunk
);
sctp_sm_table_entry_t
*
sctp_sm_lookup_event
(
sctp_event_t
event_type
,
sctp_state_t
state
,
sctp_subtype_t
event_subtype
);
time_t
timeval_sub
(
struct
timeval
*
,
struct
timeval
*
);
sctp_association_t
*
sctp_make_temp_asoc
(
const
sctp_endpoint_t
*
,
sctp_chunk_t
*
,
const
int
priority
);
int
sctp_chunk_iif
(
const
struct
sctp_chunk
*
);
struct
sctp_association
*
sctp_make_temp_asoc
(
const
struct
sctp_endpoint
*
,
struct
sctp_chunk
*
,
int
gfp
);
__u32
sctp_generate_verification_tag
(
void
);
void
sctp_populate_tie_tags
(
__u8
*
cookie
,
__u32
curTag
,
__u32
hisTag
);
...
...
@@ -344,7 +343,7 @@ __u32 sctp_generate_tsn(const sctp_endpoint_t *);
/* 4th level prototypes */
void
sctp_param2sockaddr
(
union
sctp_addr
*
addr
,
sctp_addr_param_t
*
,
__u16
port
);
__u16
port
,
int
iif
);
int
sctp_addr2sockaddr
(
const
union
sctp_params
,
union
sctp_addr
*
);
int
sockaddr2sctp_addr
(
const
union
sctp_addr
*
,
sctp_addr_param_t
*
);
...
...
include/net/sctp/structs.h
View file @
e35a3e9a
...
...
@@ -242,6 +242,7 @@ struct sctp_af {
void
(
*
inaddr_any
)
(
union
sctp_addr
*
,
unsigned
short
);
int
(
*
is_any
)
(
const
union
sctp_addr
*
);
int
(
*
available
)
(
const
union
sctp_addr
*
);
int
(
*
skb_iif
)
(
const
struct
sk_buff
*
sk
);
__u16
net_header_len
;
int
sockaddr_len
;
sa_family_t
sa_family
;
...
...
@@ -260,6 +261,7 @@ struct sctp_pf {
const
union
sctp_addr
*
,
struct
sctp_opt
*
);
int
(
*
bind_verify
)
(
struct
sctp_opt
*
,
union
sctp_addr
*
);
int
(
*
send_verify
)
(
struct
sctp_opt
*
,
union
sctp_addr
*
);
int
(
*
supported_addrs
)(
const
struct
sctp_opt
*
,
__u16
*
);
struct
sock
*
(
*
create_accept_sk
)
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
);
...
...
@@ -430,7 +432,7 @@ struct sctp_ssnmap {
};
struct
sctp_ssnmap
*
sctp_ssnmap_init
(
struct
sctp_ssnmap
*
,
__u16
,
__u16
);
struct
sctp_ssnmap
*
sctp_ssnmap_new
(
__u16
in
,
__u16
out
,
int
priority
);
struct
sctp_ssnmap
*
sctp_ssnmap_new
(
__u16
in
,
__u16
out
,
int
gfp
);
void
sctp_ssnmap_free
(
struct
sctp_ssnmap
*
map
);
void
sctp_ssnmap_clear
(
struct
sctp_ssnmap
*
map
);
...
...
@@ -509,7 +511,7 @@ struct sctp_chunk {
struct
sctp_sndrcvinfo
sinfo
;
/* Which association does this belong to? */
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
/* What endpoint received this chunk? */
sctp_endpoint_common_t
*
rcvr
;
...
...
@@ -541,11 +543,11 @@ struct sctp_chunk {
struct
sctp_transport
*
transport
;
};
sctp_chunk_t
*
sctp_make_chunk
(
const
s
ctp_association_t
*
,
__u8
type
,
sctp_chunk_t
*
sctp_make_chunk
(
const
s
truct
sctp_association
*
,
__u8
type
,
__u8
flags
,
int
size
);
void
sctp_free_chunk
(
sctp_chunk_t
*
);
void
*
sctp_addto_chunk
(
sctp_chunk_t
*
chunk
,
int
len
,
const
void
*
data
);
sctp_chunk_t
*
sctp_chunkify
(
struct
sk_buff
*
,
const
s
ctp_association_t
*
,
sctp_chunk_t
*
sctp_chunkify
(
struct
sk_buff
*
,
const
s
truct
sctp_association
*
,
struct
sock
*
);
void
sctp_init_addrs
(
sctp_chunk_t
*
,
union
sctp_addr
*
,
union
sctp_addr
*
);
const
union
sctp_addr
*
sctp_source
(
const
sctp_chunk_t
*
chunk
);
...
...
@@ -560,7 +562,7 @@ struct sockaddr_storage_list {
union
sctp_addr
a
;
};
typedef
sctp_chunk_t
*
(
sctp_packet_phandler_t
)(
s
ctp_association_t
*
);
typedef
sctp_chunk_t
*
(
sctp_packet_phandler_t
)(
s
truct
sctp_association
*
);
/* This structure holds lists of chunks as we are assembling for
* transmission.
...
...
@@ -590,13 +592,16 @@ struct sctp_packet {
/* This packet should advertise ECN capability to the network
* via the ECT bit.
*/
int
ecn_capable
;
char
ecn_capable
;
/* This packet contains a COOKIE-ECHO chunk. */
int
has_cookie_echo
;
char
has_cookie_echo
;
/* This packet containsa SACK chunk. */
char
has_sack
;
/* SCTP cannot fragment this packet. So let ip fragment it. */
int
ipfragok
;
char
ipfragok
;
int
malloced
;
};
...
...
@@ -660,7 +665,7 @@ struct sctp_transport {
struct
sctp_af
*
af_specific
;
/* Which association do we belong to? */
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
/* RFC2960
*
...
...
@@ -799,7 +804,8 @@ struct sctp_transport {
struct
sctp_transport
*
sctp_transport_new
(
const
union
sctp_addr
*
,
int
);
struct
sctp_transport
*
sctp_transport_init
(
struct
sctp_transport
*
,
const
union
sctp_addr
*
,
int
);
void
sctp_transport_set_owner
(
struct
sctp_transport
*
,
sctp_association_t
*
);
void
sctp_transport_set_owner
(
struct
sctp_transport
*
,
struct
sctp_association
*
);
void
sctp_transport_route
(
struct
sctp_transport
*
,
union
sctp_addr
*
,
struct
sctp_opt
*
);
void
sctp_transport_pmtu
(
struct
sctp_transport
*
);
...
...
@@ -862,13 +868,16 @@ void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *);
* When free()'d, it empties itself out via output_handler().
*/
struct
sctp_outq
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
/* Data pending that has never been transmitted. */
struct
sk_buff_head
out
;
unsigned
out_qlen
;
/* Total length of queued data chunks. */
/* Error of send failed, may used in SCTP_SEND_FAILED event. */
unsigned
error
;
/* These are control chunks we want to send. */
struct
sk_buff_head
control
;
...
...
@@ -902,8 +911,8 @@ struct sctp_outq {
int
malloced
;
};
struct
sctp_outq
*
sctp_outq_new
(
s
ctp_association_t
*
);
void
sctp_outq_init
(
s
ctp_association_t
*
,
struct
sctp_outq
*
);
struct
sctp_outq
*
sctp_outq_new
(
s
truct
sctp_association
*
);
void
sctp_outq_init
(
s
truct
sctp_association
*
,
struct
sctp_outq
*
);
void
sctp_outq_teardown
(
struct
sctp_outq
*
);
void
sctp_outq_free
(
struct
sctp_outq
*
);
int
sctp_outq_tail
(
struct
sctp_outq
*
,
sctp_chunk_t
*
chunk
);
...
...
@@ -947,20 +956,16 @@ sctp_bind_addr_t *sctp_bind_addr_new(int gfp_mask);
void
sctp_bind_addr_init
(
sctp_bind_addr_t
*
,
__u16
port
);
void
sctp_bind_addr_free
(
sctp_bind_addr_t
*
);
int
sctp_bind_addr_copy
(
sctp_bind_addr_t
*
dest
,
const
sctp_bind_addr_t
*
src
,
sctp_scope_t
scope
,
int
priority
,
int
flags
);
sctp_scope_t
scope
,
int
gfp
,
int
flags
);
int
sctp_add_bind_addr
(
sctp_bind_addr_t
*
,
union
sctp_addr
*
,
int
priority
);
int
gfp
);
int
sctp_del_bind_addr
(
sctp_bind_addr_t
*
,
union
sctp_addr
*
);
int
sctp_bind_addr_match
(
sctp_bind_addr_t
*
,
const
union
sctp_addr
*
,
struct
sctp_opt
*
);
union
sctp_params
sctp_bind_addrs_to_raw
(
const
sctp_bind_addr_t
*
bp
,
int
*
addrs_len
,
int
priority
);
int
sctp_raw_to_bind_addrs
(
sctp_bind_addr_t
*
bp
,
__u8
*
raw_addr_list
,
int
addrs_len
,
unsigned
short
port
,
int
priority
);
union
sctp_params
sctp_bind_addrs_to_raw
(
const
struct
sctp_bind_addr
*
bp
,
int
*
addrs_len
,
int
gfp
);
int
sctp_raw_to_bind_addrs
(
struct
sctp_bind_addr
*
bp
,
__u8
*
raw
,
int
len
,
__u16
port
,
int
gfp
);
sctp_scope_t
sctp_scope
(
const
union
sctp_addr
*
);
int
sctp_in_scope
(
const
union
sctp_addr
*
addr
,
const
sctp_scope_t
scope
);
...
...
@@ -1063,7 +1068,7 @@ struct sctp_endpoint {
* pointer, or table pointers dependent on how SCTP
* is implemented.
*/
/* This is really a list of s
ctp_association_t
entries. */
/* This is really a list of s
truct sctp_association
entries. */
struct
list_head
asocs
;
/* Secret Key: A secret key used by this endpoint to compute
...
...
@@ -1099,12 +1104,12 @@ static inline sctp_endpoint_t *sctp_ep(sctp_endpoint_common_t *base)
sctp_endpoint_t
*
sctp_endpoint_new
(
struct
sctp_protocol
*
,
struct
sock
*
,
int
);
sctp_endpoint_t
*
sctp_endpoint_init
(
struct
sctp_endpoint
*
,
struct
sctp_protocol
*
,
struct
sock
*
,
int
priority
);
struct
sock
*
,
int
gfp
);
void
sctp_endpoint_free
(
sctp_endpoint_t
*
);
void
sctp_endpoint_put
(
sctp_endpoint_t
*
);
void
sctp_endpoint_hold
(
sctp_endpoint_t
*
);
void
sctp_endpoint_add_asoc
(
sctp_endpoint_t
*
,
s
ctp_association_t
*
asoc
);
s
ctp_association_t
*
sctp_endpoint_lookup_assoc
(
const
sctp_endpoint_t
*
ep
,
void
sctp_endpoint_add_asoc
(
sctp_endpoint_t
*
,
s
truct
sctp_association
*
asoc
);
s
truct
sctp_association
*
sctp_endpoint_lookup_assoc
(
const
sctp_endpoint_t
*
ep
,
const
union
sctp_addr
*
paddr
,
struct
sctp_transport
**
);
int
sctp_endpoint_is_peeled_off
(
sctp_endpoint_t
*
,
const
union
sctp_addr
*
);
...
...
@@ -1113,18 +1118,16 @@ sctp_endpoint_t *sctp_endpoint_is_match(sctp_endpoint_t *,
int
sctp_has_association
(
const
union
sctp_addr
*
laddr
,
const
union
sctp_addr
*
paddr
);
int
sctp_verify_init
(
const
sctp_association_t
*
asoc
,
sctp_cid_t
cid
,
sctp_init_chunk_t
*
peer_init
,
sctp_chunk_t
*
chunk
,
sctp_chunk_t
**
err_chunk
);
int
sctp_process_init
(
sctp_association_t
*
asoc
,
sctp_cid_t
cid
,
const
union
sctp_addr
*
peer_addr
,
sctp_init_chunk_t
*
peer_init
,
int
priority
);
int
sctp_process_param
(
sctp_association_t
*
asoc
,
union
sctp_params
param
,
const
union
sctp_addr
*
peer_addr
,
int
priority
);
__u32
sctp_generate_tag
(
const
sctp_endpoint_t
*
ep
);
__u32
sctp_generate_tsn
(
const
sctp_endpoint_t
*
ep
);
int
sctp_verify_init
(
const
struct
sctp_association
*
asoc
,
sctp_cid_t
,
sctp_init_chunk_t
*
peer_init
,
struct
sctp_chunk
*
chunk
,
struct
sctp_chunk
**
err_chunk
);
int
sctp_process_init
(
struct
sctp_association
*
,
sctp_cid_t
cid
,
const
union
sctp_addr
*
peer
,
sctp_init_chunk_t
*
init
,
int
gfp
);
int
sctp_process_param
(
struct
sctp_association
*
,
union
sctp_params
param
,
const
union
sctp_addr
*
from
,
int
gfp
);
__u32
sctp_generate_tag
(
const
sctp_endpoint_t
*
);
__u32
sctp_generate_tsn
(
const
sctp_endpoint_t
*
);
/* RFC2960
...
...
@@ -1153,7 +1156,7 @@ struct sctp_association {
struct
list_head
asocs
;
/* This is a signature that lets us know that this is a
* s
ctp_association_t
data structure. Used for mapping an
* s
truct sctp_association
data structure. Used for mapping an
* association id to an association.
*/
__u32
eyecatcher
;
...
...
@@ -1556,44 +1559,46 @@ enum {
};
/* Recover the outter association structure. */
static
inline
s
ctp_association_t
*
sctp_assoc
(
sctp_endpoint_common_t
*
base
)
static
inline
s
truct
sctp_association
*
sctp_assoc
(
sctp_endpoint_common_t
*
base
)
{
s
ctp_association_t
*
asoc
;
s
truct
sctp_association
*
asoc
;
asoc
=
container_of
(
base
,
s
ctp_association_t
,
base
);
asoc
=
container_of
(
base
,
s
truct
sctp_association
,
base
);
return
asoc
;
}
/* These are function signatures for manipulating associations. */
s
ctp_association_t
*
s
truct
sctp_association
*
sctp_association_new
(
const
sctp_endpoint_t
*
,
const
struct
sock
*
,
sctp_scope_t
scope
,
int
priority
);
s
ctp_association_t
*
sctp_association_init
(
s
ctp_association_t
*
,
const
sctp_endpoint_t
*
,
sctp_scope_t
scope
,
int
gfp
);
s
truct
sctp_association
*
sctp_association_init
(
s
truct
sctp_association
*
,
const
sctp_endpoint_t
*
,
const
struct
sock
*
,
sctp_scope_t
scope
,
int
priority
);
void
sctp_association_free
(
sctp_association_t
*
);
void
sctp_association_put
(
sctp_association_t
*
);
void
sctp_association_hold
(
sctp_association_t
*
);
struct
sctp_transport
*
sctp_assoc_choose_shutdown_transport
(
sctp_association_t
*
);
void
sctp_assoc_update_retran_path
(
sctp_association_t
*
);
struct
sctp_transport
*
sctp_assoc_lookup_paddr
(
const
sctp_association_t
*
,
int
gfp
);
void
sctp_association_free
(
struct
sctp_association
*
);
void
sctp_association_put
(
struct
sctp_association
*
);
void
sctp_association_hold
(
struct
sctp_association
*
);
struct
sctp_transport
*
sctp_assoc_choose_shutdown_transport
(
struct
sctp_association
*
);
void
sctp_assoc_update_retran_path
(
struct
sctp_association
*
);
struct
sctp_transport
*
sctp_assoc_lookup_paddr
(
const
struct
sctp_association
*
,
const
union
sctp_addr
*
);
struct
sctp_transport
*
sctp_assoc_add_peer
(
s
ctp_association_t
*
,
struct
sctp_transport
*
sctp_assoc_add_peer
(
s
truct
sctp_association
*
,
const
union
sctp_addr
*
address
,
const
int
priority
);
const
int
gfp
);
void
sctp_assoc_control_transport
(
struct
sctp_association
*
,
struct
sctp_transport
*
,
sctp_transport_cmd_t
,
sctp_sn_error_t
);
struct
sctp_transport
*
sctp_assoc_lookup_tsn
(
s
ctp_association_t
*
,
__u32
);
struct
sctp_transport
*
sctp_assoc_is_match
(
s
ctp_association_t
*
,
struct
sctp_transport
*
sctp_assoc_lookup_tsn
(
s
truct
sctp_association
*
,
__u32
);
struct
sctp_transport
*
sctp_assoc_is_match
(
s
truct
sctp_association
*
,
const
union
sctp_addr
*
,
const
union
sctp_addr
*
);
void
sctp_assoc_migrate
(
sctp_association_t
*
,
struct
sock
*
);
void
sctp_assoc_update
(
sctp_association_t
*
dst
,
sctp_association_t
*
src
);
void
sctp_assoc_migrate
(
struct
sctp_association
*
,
struct
sock
*
);
void
sctp_assoc_update
(
struct
sctp_association
*
old
,
struct
sctp_association
*
new
);
__u32
sctp_association_get_next_tsn
(
struct
sctp_association
*
);
__u32
sctp_association_get_tsn_block
(
struct
sctp_association
*
,
int
);
...
...
@@ -1603,14 +1608,14 @@ void sctp_assoc_rwnd_increase(struct sctp_association *, int);
void
sctp_assoc_rwnd_decrease
(
struct
sctp_association
*
,
int
);
void
sctp_assoc_set_primary
(
struct
sctp_association
*
,
struct
sctp_transport
*
);
int
sctp_assoc_set_bind_addr_from_ep
(
s
ctp_association_t
*
,
int
);
int
sctp_assoc_set_bind_addr_from_cookie
(
s
ctp_association_t
*
,
sctp_cookie_t
*
,
int
);
int
sctp_assoc_set_bind_addr_from_ep
(
s
truct
sctp_association
*
,
int
);
int
sctp_assoc_set_bind_addr_from_cookie
(
s
truct
sctp_association
*
,
sctp_cookie_t
*
,
int
gfp
);
int
sctp_cmp_addr_exact
(
const
union
sctp_addr
*
ss1
,
const
union
sctp_addr
*
ss2
);
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
ctp_association_t
*
asoc
);
sctp_chunk_t
*
sctp_get_no_prepend
(
s
ctp_association_t
*
asoc
);
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
truct
sctp_association
*
asoc
);
sctp_chunk_t
*
sctp_get_no_prepend
(
s
truct
sctp_association
*
asoc
);
/* A convenience structure to parse out SCTP specific CMSGs. */
typedef
struct
sctp_cmsgs
{
...
...
include/net/sctp/ulpevent.h
View file @
e35a3e9a
...
...
@@ -38,7 +38,6 @@
* be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpevent_h__
#define __sctp_ulpevent_h__
...
...
@@ -50,6 +49,7 @@ struct sctp_ulpevent {
struct
sctp_association
*
asoc
;
struct
sctp_sndrcvinfo
sndrcvinfo
;
int
msg_flags
;
int
iif
;
};
/* Retrieve the skb this event sits inside of. */
...
...
@@ -61,9 +61,9 @@ static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
/* Retrieve & cast the event sitting inside the skb. */
static
inline
struct
sctp_ulpevent
*
sctp_skb2event
(
struct
sk_buff
*
skb
)
{
return
(
struct
sctp_ulpevent
*
)
skb
->
cb
;
return
(
struct
sctp_ulpevent
*
)
skb
->
cb
;
}
struct
sctp_ulpevent
*
sctp_ulpevent_new
(
int
size
,
int
flags
,
int
priority
);
struct
sctp_ulpevent
*
sctp_ulpevent_init
(
struct
sctp_ulpevent
*
,
int
flags
);
void
sctp_ulpevent_free
(
struct
sctp_ulpevent
*
);
...
...
kernel/exit.c
View file @
e35a3e9a
...
...
@@ -488,7 +488,7 @@ static inline void reparent_thread(task_t *p, task_t *father, int traced)
p
->
self_exec_id
++
;
if
(
p
->
pdeath_signal
)
send_
sig
(
p
->
pdeath_signal
,
p
,
0
);
send_
group_sig_info
(
p
->
pdeath_signal
,
0
,
p
);
/* Move the child from its dying parent to the new one. */
if
(
unlikely
(
traced
))
{
...
...
kernel/itimer.c
View file @
e35a3e9a
...
...
@@ -67,7 +67,7 @@ void it_real_fn(unsigned long __data)
struct
task_struct
*
p
=
(
struct
task_struct
*
)
__data
;
unsigned
long
interval
;
send_
sig
(
SIGALRM
,
p
,
1
);
send_
group_sig_info
(
SIGALRM
,
SEND_SIG_PRIV
,
p
);
interval
=
p
->
it_real_incr
;
if
(
interval
)
{
if
(
interval
>
(
unsigned
long
)
LONG_MAX
)
...
...
kernel/module.c
View file @
e35a3e9a
...
...
@@ -974,11 +974,6 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
/* Ok if weak. */
if
(
ELF_ST_BIND
(
sym
[
i
].
st_info
)
==
STB_WEAK
)
break
;
#if defined(CONFIG_SPARC32) || defined(CONFIG_SPARC64)
/* Ok if Sparc register directive. */
if
(
ELF_ST_TYPE
(
sym
[
i
].
st_info
)
==
STT_REGISTER
)
break
;
#endif
printk
(
KERN_WARNING
"%s: Unknown symbol %s
\n
"
,
mod
->
name
,
strtab
+
sym
[
i
].
st_name
);
...
...
kernel/signal.c
View file @
e35a3e9a
...
...
@@ -33,64 +33,79 @@ static kmem_cache_t *sigqueue_cachep;
atomic_t
nr_queued_signals
;
int
max_queued_signals
=
1024
;
/*********************************************************
POSIX thread group signal behavior:
----------------------------------------------------------
| | userspace | kernel |
----------------------------------------------------------
| SIGHUP | load-balance | kill-all |
| SIGINT | load-balance | kill-all |
| SIGQUIT | load-balance | kill-all+core |
| SIGILL | specific | kill-all+core |
| SIGTRAP | specific | kill-all+core |
| SIGABRT/SIGIOT | specific | kill-all+core |
| SIGBUS | specific | kill-all+core |
| SIGFPE | specific | kill-all+core |
| SIGKILL | n/a | kill-all |
| SIGUSR1 | load-balance | kill-all |
| SIGSEGV | specific | kill-all+core |
| SIGUSR2 | load-balance | kill-all |
| SIGPIPE | specific | kill-all |
| SIGALRM | load-balance | kill-all |
| SIGTERM | load-balance | kill-all |
| SIGCHLD | load-balance | ignore |
| SIGCONT | load-balance | ignore |
| SIGSTOP | n/a | stop-all |
| SIGTSTP | load-balance | stop-all |
| SIGTTIN | load-balance | stop-all |
| SIGTTOU | load-balance | stop-all |
| SIGURG | load-balance | ignore |
| SIGXCPU | specific | kill-all+core |
| SIGXFSZ | specific | kill-all+core |
| SIGVTALRM | load-balance | kill-all |
| SIGPROF | specific | kill-all |
| SIGPOLL/SIGIO | load-balance | kill-all |
| SIGSYS/SIGUNUSED | specific | kill-all+core |
| SIGSTKFLT | specific | kill-all |
| SIGWINCH | load-balance | ignore |
| SIGPWR | load-balance | kill-all |
| SIGRTMIN-SIGRTMAX | load-balance | kill-all |
----------------------------------------------------------
non-POSIX signal thread group behavior:
----------------------------------------------------------
| | userspace | kernel |
----------------------------------------------------------
| SIGEMT | specific | kill-all+core |
----------------------------------------------------------
*/
/* Some systems do not have a SIGSTKFLT and the kernel never
* generates such signals anyways.
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
* or to the process as a whole (Linux thread group). How the signal
* is sent determines whether it's to one thread or the whole group,
* which determines which signal mask(s) are involved in blocking it
* from being delivered until later. When the signal is delivered,
* either it's caught or ignored by a user handler or it has a default
* effect that applies to the whole thread group (POSIX process).
*
* The possible effects an unblocked signal set to SIG_DFL can have are:
* ignore - Nothing Happens
* terminate - kill the process, i.e. all threads in the group,
* similar to exit_group. The group leader (only) reports
* WIFSIGNALED status to its parent.
* coredump - write a core dump file describing all threads using
* the same mm and then kill all those threads
* stop - stop all the threads in the group, i.e. TASK_STOPPED state
*
* SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
* Other signals when not blocked and set to SIG_DFL behaves as follows.
* The job control signals also have other special effects.
*
* +--------------------+------------------+
* | POSIX signal | default action |
* +--------------------+------------------+
* | SIGHUP | terminate |
* | SIGINT | terminate |
* | SIGQUIT | coredump |
* | SIGILL | coredump |
* | SIGTRAP | coredump |
* | SIGABRT/SIGIOT | coredump |
* | SIGBUS | coredump |
* | SIGFPE | coredump |
* | SIGKILL | terminate(+) |
* | SIGUSR1 | terminate |
* | SIGSEGV | coredump |
* | SIGUSR2 | terminate |
* | SIGPIPE | terminate |
* | SIGALRM | terminate |
* | SIGTERM | terminate |
* | SIGCHLD | ignore |
* | SIGCONT | ignore(*) |
* | SIGSTOP | stop(*)(+) |
* | SIGTSTP | stop(*) |
* | SIGTTIN | stop(*) |
* | SIGTTOU | stop(*) |
* | SIGURG | ignore |
* | SIGXCPU | coredump |
* | SIGXFSZ | coredump |
* | SIGVTALRM | terminate |
* | SIGPROF | terminate |
* | SIGPOLL/SIGIO | terminate |
* | SIGSYS/SIGUNUSED | coredump |
* | SIGSTKFLT | terminate |
* | SIGWINCH | ignore |
* | SIGPWR | terminate |
* | SIGRTMIN-SIGRTMAX | terminate |
* +--------------------+------------------+
* | non-POSIX signal | default action |
* +--------------------+------------------+
* | SIGEMT | coredump |
* +--------------------+------------------+
*
* (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
* (*) Special job control effects:
* When SIGCONT is sent, it resumes the process (all threads in the group)
* from TASK_STOPPED state and also clears any pending/queued stop signals
* (any of those marked with "stop(*)"). This happens regardless of blocking,
* catching, or ignoring SIGCONT. When any stop signal is sent, it clears
* any pending/queued SIGCONT signals; this happens regardless of blocking,
* catching, or ignored the stop signal, though (except for SIGSTOP) the
* default action of stopping the process may happen later or never.
*/
#ifdef SIGSTKFLT
#define M_SIGSTKFLT M(SIGSTKFLT)
#else
#define M_SIGSTKFLT 0
#endif
#ifdef SIGEMT
#define M_SIGEMT M(SIGEMT)
...
...
@@ -105,16 +120,6 @@ int max_queued_signals = 1024;
#endif
#define T(sig, mask) (M(sig) & (mask))
#define SIG_KERNEL_BROADCAST_MASK (\
M(SIGHUP) | M(SIGINT) | M(SIGQUIT) | M(SIGILL) | \
M(SIGTRAP) | M(SIGABRT) | M(SIGBUS) | M(SIGFPE) | \
M(SIGKILL) | M(SIGUSR1) | M(SIGSEGV) | M(SIGUSR2) | \
M(SIGPIPE) | M(SIGALRM) | M(SIGTERM) | M(SIGXCPU) | \
M(SIGXFSZ) | M(SIGVTALRM) | M(SIGPROF) | M(SIGPOLL) | \
M(SIGSYS) | M_SIGSTKFLT | M(SIGPWR) | M(SIGCONT) | \
M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) | \
M_SIGEMT )
#define SIG_KERNEL_ONLY_MASK (\
M(SIGKILL) | M(SIGSTOP) )
...
...
@@ -599,7 +604,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
struct
task_struct
*
parent
);
/*
* Handle magic process-wide effects of stop/continue signals
, and SIGKILL
.
* Handle magic process-wide effects of stop/continue signals.
* Unlike the signal actions, these happen immediately at signal-generation
* time regardless of blocking, ignoring, or handling. This does the
* actual continuing for SIGCONT, but not the actual stopping for stop
...
...
@@ -1134,9 +1139,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
*/
/*
* XXX should probably nix these interfaces and update the kernel
* to specify explicitly whether the signal is a group signal or
* specific to a thread.
* These two are the most common entry points. They send a signal
* just to the specific thread.
*/
int
send_sig_info
(
int
sig
,
struct
siginfo
*
info
,
struct
task_struct
*
p
)
...
...
@@ -1150,13 +1154,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
* going away or changing from under us.
*/
read_lock
(
&
tasklist_lock
);
if
(
T
(
sig
,
SIG_KERNEL_BROADCAST_MASK
))
{
ret
=
group_send_sig_info
(
sig
,
info
,
p
);
}
else
{
spin_lock_irq
(
&
p
->
sighand
->
siglock
);
ret
=
specific_send_sig_info
(
sig
,
info
,
p
);
spin_unlock_irq
(
&
p
->
sighand
->
siglock
);
}
spin_lock_irq
(
&
p
->
sighand
->
siglock
);
ret
=
specific_send_sig_info
(
sig
,
info
,
p
);
spin_unlock_irq
(
&
p
->
sighand
->
siglock
);
read_unlock
(
&
tasklist_lock
);
return
ret
;
}
...
...
@@ -1167,6 +1167,20 @@ send_sig(int sig, struct task_struct *p, int priv)
return
send_sig_info
(
sig
,
(
void
*
)(
long
)(
priv
!=
0
),
p
);
}
/*
* This is the entry point for "process-wide" signals.
* They will go to an appropriate thread in the thread group.
*/
int
send_group_sig_info
(
int
sig
,
struct
siginfo
*
info
,
struct
task_struct
*
p
)
{
int
ret
;
read_lock
(
&
tasklist_lock
);
ret
=
group_send_sig_info
(
sig
,
info
,
p
);
read_unlock
(
&
tasklist_lock
);
return
ret
;
}
void
force_sig
(
int
sig
,
struct
task_struct
*
p
)
{
...
...
@@ -1642,6 +1656,7 @@ EXPORT_SYMBOL(kill_sl_info);
EXPORT_SYMBOL
(
notify_parent
);
EXPORT_SYMBOL
(
send_sig
);
EXPORT_SYMBOL
(
send_sig_info
);
EXPORT_SYMBOL
(
send_group_sig_info
);
EXPORT_SYMBOL
(
sigprocmask
);
EXPORT_SYMBOL
(
block_all_signals
);
EXPORT_SYMBOL
(
unblock_all_signals
);
...
...
net/ipv4/ip_output.c
View file @
e35a3e9a
...
...
@@ -1293,11 +1293,10 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
static
struct
packet_type
ip_packet_type
=
{
__constant_htons
(
ETH_P_IP
),
NULL
,
/* All devices */
ip_rcv
,
(
void
*
)
1
,
NULL
,
.
type
=
__constant_htons
(
ETH_P_IP
),
.
dev
=
NULL
,
/* All devices */
.
func
=
ip_rcv
,
.
data
=
(
void
*
)
1
,
};
/*
...
...
net/ipv4/netfilter/ip_nat_core.c
View file @
e35a3e9a
...
...
@@ -8,7 +8,6 @@
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4.h>
#include <linux/brlock.h>
#include <linux/vmalloc.h>
#include <net/checksum.h>
#include <net/icmp.h>
...
...
net/ipv6/ipv6_sockglue.c
View file @
e35a3e9a
...
...
@@ -55,20 +55,18 @@ DEFINE_SNMP_STAT(struct ipv6_mib, ipv6_statistics);
static
struct
packet_type
ipv6_packet_type
=
{
__constant_htons
(
ETH_P_IPV6
),
NULL
,
/* All devices */
ipv6_rcv
,
(
void
*
)
1
,
NULL
.
type
=
__constant_htons
(
ETH_P_IPV6
),
.
dev
=
NULL
,
/* All devices */
.
func
=
ipv6_rcv
,
.
data
=
(
void
*
)
1
,
};
/*
* addrconf module should be notif
y
ed of a device going up
* addrconf module should be notif
i
ed of a device going up
*/
static
struct
notifier_block
ipv6_dev_notf
=
{
addrconf_notify
,
NULL
,
0
.
notifier_call
=
addrconf_notify
,
.
priority
=
0
};
struct
ip6_ra_chain
*
ip6_ra_chain
;
...
...
net/ipv6/netfilter/ip6_queue.c
View file @
e35a3e9a
...
...
@@ -26,7 +26,6 @@
#include <linux/netfilter.h>
#include <linux/netlink.h>
#include <linux/spinlock.h>
#include <linux/brlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <net/sock.h>
...
...
@@ -682,8 +681,7 @@ init_or_cleanup(int init)
cleanup:
nf_unregister_queue_handler
(
PF_INET6
);
br_write_lock_bh
(
BR_NETPROTO_LOCK
);
br_write_unlock_bh
(
BR_NETPROTO_LOCK
);
synchronize_net
();
ipq_flush
(
NF_DROP
);
cleanup_sysctl:
...
...
net/ipv6/proc.c
View file @
e35a3e9a
...
...
@@ -156,7 +156,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
int
i
;
for
(
i
=
0
;
i
<
sizeof
(
snmp6_list
)
/
sizeof
(
snmp6_list
[
0
]);
i
++
)
seq_printf
(
seq
,
"%-32s
\t
%l
d
\n
"
,
snmp6_list
[
i
].
name
,
seq_printf
(
seq
,
"%-32s
\t
%l
u
\n
"
,
snmp6_list
[
i
].
name
,
fold_field
(
snmp6_list
[
i
].
mib
,
snmp6_list
[
i
].
offset
));
return
0
;
...
...
net/sctp/associola.c
View file @
e35a3e9a
...
...
@@ -290,14 +290,18 @@ sctp_association_t *sctp_association_init(sctp_association_t *asoc,
*/
void
sctp_association_free
(
sctp_association_t
*
asoc
)
{
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_transport
*
transport
;
sctp_endpoint_t
*
ep
;
struct
list_head
*
pos
,
*
temp
;
int
i
;
ep
=
asoc
->
ep
;
list_del
(
&
asoc
->
asocs
);
/* Decrement the backlog value for a TCP-style listening socket. */
if
((
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
&&
(
SCTP_SS_LISTENING
==
sk
->
state
))
sk
->
ack_backlog
--
;
/* Mark as dead, so other users can know this structure is
* going away.
*/
...
...
@@ -421,8 +425,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
SCTP_DEBUG_PRINTK
(
"sctp_assoc_add_peer:association %p PMTU set to "
"%d
\n
"
,
asoc
,
asoc
->
pmtu
);
asoc
->
frag_point
=
asoc
->
pmtu
;
asoc
->
frag_point
-=
SCTP_IP_OVERHEAD
+
sizeof
(
struct
sctp_data_chunk
);
asoc
->
frag_point
=
sctp_frag_point
(
asoc
->
pmtu
);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
...
...
@@ -642,8 +645,6 @@ __u32 sctp_association_get_tsn_block(sctp_association_t *asoc, int num)
/* Compare two addresses to see if they match. Wildcard addresses
* only match themselves.
*
* FIXME: We do not match address scopes correctly.
*/
int
sctp_cmp_addr_exact
(
const
union
sctp_addr
*
ss1
,
const
union
sctp_addr
*
ss2
)
...
...
@@ -651,38 +652,27 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1,
struct
sctp_af
*
af
;
af
=
sctp_get_af_specific
(
ss1
->
sa
.
sa_family
);
if
(
!
af
)
if
(
unlikely
(
!
af
)
)
return
0
;
return
af
->
cmp_addr
(
ss1
,
ss2
);
}
/* Return an ecne chunk to get prepended to a packet.
* Note: We are sly and return a shared, prealloced chunk.
* Note: We are sly and return a shared, prealloced chunk. FIXME:
* No we don't, but we could/should.
*/
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
ctp_association_t
*
asoc
)
sctp_chunk_t
*
sctp_get_ecne_prepend
(
s
truct
sctp_association
*
asoc
)
{
sctp_chunk_t
*
chunk
;
int
need_ecne
;
__u32
lowest_tsn
;
struct
sctp_chunk
*
chunk
;
/*
Can be called from task or bh. Both need_ecne and
*
last_ecne_tsn are written during bh
.
/*
Send ECNE if needed.
*
Not being able to allocate a chunk here is not deadly
.
*/
need_ecne
=
asoc
->
need_ecne
;
lowest_tsn
=
asoc
->
last_ecne_tsn
;
if
(
need_ecne
)
{
chunk
=
sctp_make_ecne
(
asoc
,
lowest_tsn
);
/* ECNE is not mandatory to the flow. Being unable to
* alloc mem is not deadly. We are just unable to help
* out the network. If we run out of memory, just return
* NULL.
*/
}
else
{
if
(
asoc
->
need_ecne
)
chunk
=
sctp_make_ecne
(
asoc
,
asoc
->
last_ecne_tsn
);
else
chunk
=
NULL
;
}
return
chunk
;
}
...
...
@@ -832,12 +822,17 @@ static void sctp_assoc_bh_rcv(sctp_association_t *asoc)
void
sctp_assoc_migrate
(
sctp_association_t
*
assoc
,
struct
sock
*
newsk
)
{
struct
sctp_opt
*
newsp
=
sctp_sk
(
newsk
);
struct
sock
*
oldsk
=
assoc
->
base
.
sk
;
/* Delete the association from the old endpoint's list of
* associations.
*/
list_del
(
&
assoc
->
asocs
);
/* Decrement the backlog value for a TCP-style socket. */
if
(
SCTP_SOCKET_TCP
==
sctp_sk
(
oldsk
)
->
type
)
oldsk
->
ack_backlog
--
;
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put
(
assoc
->
ep
);
sock_put
(
assoc
->
base
.
sk
);
...
...
@@ -986,8 +981,7 @@ void sctp_assoc_sync_pmtu(sctp_association_t *asoc)
if
(
pmtu
)
{
asoc
->
pmtu
=
pmtu
;
asoc
->
frag_point
=
pmtu
-
(
SCTP_IP_OVERHEAD
+
sizeof
(
sctp_data_chunk_t
));
asoc
->
frag_point
=
sctp_frag_point
(
pmtu
);
}
SCTP_DEBUG_PRINTK
(
"%s: asoc:%p, pmtu:%d, frag_point:%d
\n
"
,
...
...
@@ -1001,9 +995,9 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
case
SCTP_STATE_ESTABLISHED
:
case
SCTP_STATE_SHUTDOWN_PENDING
:
case
SCTP_STATE_SHUTDOWN_RECEIVED
:
if
((
asoc
->
rwnd
>
asoc
->
a_rwnd
)
&&
if
((
asoc
->
rwnd
>
asoc
->
a_rwnd
)
&&
((
asoc
->
rwnd
-
asoc
->
a_rwnd
)
>=
min_t
(
__u32
,
(
asoc
->
base
.
sk
->
rcvbuf
>>
1
),
asoc
->
pmtu
)))
min_t
(
__u32
,
(
asoc
->
base
.
sk
->
rcvbuf
>>
1
),
asoc
->
pmtu
)))
return
1
;
break
;
default:
...
...
@@ -1070,14 +1064,14 @@ void sctp_assoc_rwnd_decrease(sctp_association_t *asoc, int len)
asoc
->
rwnd
=
0
;
}
SCTP_DEBUG_PRINTK
(
"%s: asoc %p rwnd decreased by %d to (%u, %u)
\n
"
,
__FUNCTION__
,
asoc
,
len
,
asoc
->
rwnd
,
__FUNCTION__
,
asoc
,
len
,
asoc
->
rwnd
,
asoc
->
rwnd_over
);
}
/* Build the bind address list for the association based on info from the
* local endpoint and the remote peer.
*/
int
sctp_assoc_set_bind_addr_from_ep
(
s
ctp_association_t
*
asoc
,
int
priority
)
int
sctp_assoc_set_bind_addr_from_ep
(
s
truct
sctp_association
*
asoc
,
int
gfp
)
{
sctp_scope_t
scope
;
int
flags
;
...
...
@@ -1094,19 +1088,17 @@ int sctp_assoc_set_bind_addr_from_ep(sctp_association_t *asoc, int priority)
return
sctp_bind_addr_copy
(
&
asoc
->
base
.
bind_addr
,
&
asoc
->
ep
->
base
.
bind_addr
,
scope
,
priority
,
flags
);
scope
,
gfp
,
flags
);
}
/* Build the association's bind address list from the cookie. */
int
sctp_assoc_set_bind_addr_from_cookie
(
sctp_association_t
*
asoc
,
sctp_cookie_t
*
cookie
,
int
priority
)
sctp_cookie_t
*
cookie
,
int
gfp
)
{
int
var_size2
=
ntohs
(
cookie
->
peer_init
->
chunk_hdr
.
length
);
int
var_size3
=
cookie
->
raw_addr_list_len
;
__u8
*
raw_addr_list
=
(
__u8
*
)
cookie
+
sizeof
(
sctp_cookie_t
)
+
var_size2
;
__u8
*
raw
=
(
__u8
*
)
cookie
+
sizeof
(
sctp_cookie_t
)
+
var_size2
;
return
sctp_raw_to_bind_addrs
(
&
asoc
->
base
.
bind_addr
,
raw_addr_list
,
var_size3
,
asoc
->
ep
->
base
.
bind_addr
.
port
,
priority
);
return
sctp_raw_to_bind_addrs
(
&
asoc
->
base
.
bind_addr
,
raw
,
var_size3
,
asoc
->
ep
->
base
.
bind_addr
.
port
,
gfp
);
}
net/sctp/bind_addr.c
View file @
e35a3e9a
...
...
@@ -53,7 +53,7 @@
/* Forward declarations for internal helpers. */
static
int
sctp_copy_one_addr
(
sctp_bind_addr_t
*
,
union
sctp_addr
*
,
sctp_scope_t
scope
,
int
priority
,
int
flags
);
sctp_scope_t
scope
,
int
gfp
,
int
flags
);
static
void
sctp_bind_addr_clean
(
sctp_bind_addr_t
*
);
/* First Level Abstractions. */
...
...
@@ -62,7 +62,7 @@ static void sctp_bind_addr_clean(sctp_bind_addr_t *);
* in 'src' which have a broader scope than 'scope'.
*/
int
sctp_bind_addr_copy
(
sctp_bind_addr_t
*
dest
,
const
sctp_bind_addr_t
*
src
,
sctp_scope_t
scope
,
int
priority
,
int
flags
)
sctp_scope_t
scope
,
int
gfp
,
int
flags
)
{
struct
sockaddr_storage_list
*
addr
;
struct
list_head
*
pos
;
...
...
@@ -75,7 +75,7 @@ int sctp_bind_addr_copy(sctp_bind_addr_t *dest, const sctp_bind_addr_t *src,
list_for_each
(
pos
,
&
src
->
address_list
)
{
addr
=
list_entry
(
pos
,
struct
sockaddr_storage_list
,
list
);
error
=
sctp_copy_one_addr
(
dest
,
&
addr
->
a
,
scope
,
priority
,
flags
);
gfp
,
flags
);
if
(
error
<
0
)
goto
out
;
}
...
...
@@ -88,11 +88,11 @@ int sctp_bind_addr_copy(sctp_bind_addr_t *dest, const sctp_bind_addr_t *src,
}
/* Create a new SCTP_bind_addr from nothing. */
sctp_bind_addr_t
*
sctp_bind_addr_new
(
int
priority
)
sctp_bind_addr_t
*
sctp_bind_addr_new
(
int
gfp
)
{
sctp_bind_addr_t
*
retval
;
retval
=
t_new
(
sctp_bind_addr_t
,
priority
);
retval
=
t_new
(
sctp_bind_addr_t
,
gfp
);
if
(
!
retval
)
goto
nomem
;
...
...
@@ -144,12 +144,12 @@ void sctp_bind_addr_free(sctp_bind_addr_t *bp)
/* Add an address to the bind address list in the SCTP_bind_addr structure. */
int
sctp_add_bind_addr
(
sctp_bind_addr_t
*
bp
,
union
sctp_addr
*
new
,
int
priority
)
int
gfp
)
{
struct
sockaddr_storage_list
*
addr
;
/* Add the address to the bind address list. */
addr
=
t_new
(
struct
sockaddr_storage_list
,
priority
);
addr
=
t_new
(
struct
sockaddr_storage_list
,
gfp
);
if
(
!
addr
)
return
-
ENOMEM
;
...
...
@@ -197,7 +197,7 @@ int sctp_del_bind_addr(sctp_bind_addr_t *bp, union sctp_addr *del_addr)
* The second argument is the return value for the length.
*/
union
sctp_params
sctp_bind_addrs_to_raw
(
const
sctp_bind_addr_t
*
bp
,
int
*
addrs_len
,
int
priority
)
int
*
addrs_len
,
int
gfp
)
{
union
sctp_params
addrparms
;
union
sctp_params
retval
;
...
...
@@ -214,7 +214,7 @@ union sctp_params sctp_bind_addrs_to_raw(const sctp_bind_addr_t *bp,
len
+=
sizeof
(
sctp_addr_param_t
);
}
retval
.
v
=
kmalloc
(
len
,
priority
);
retval
.
v
=
kmalloc
(
len
,
gfp
);
if
(
!
retval
.
v
)
goto
end_raw
;
...
...
@@ -238,7 +238,7 @@ union sctp_params sctp_bind_addrs_to_raw(const sctp_bind_addr_t *bp,
* address parameters).
*/
int
sctp_raw_to_bind_addrs
(
sctp_bind_addr_t
*
bp
,
__u8
*
raw_addr_list
,
int
addrs_len
,
__u16
port
,
int
priority
)
int
addrs_len
,
__u16
port
,
int
gfp
)
{
sctp_addr_param_t
*
rawaddr
;
sctp_paramhdr_t
*
param
;
...
...
@@ -254,8 +254,8 @@ int sctp_raw_to_bind_addrs(sctp_bind_addr_t *bp, __u8 *raw_addr_list,
switch
(
param
->
type
)
{
case
SCTP_PARAM_IPV4_ADDRESS
:
case
SCTP_PARAM_IPV6_ADDRESS
:
sctp_param2sockaddr
(
&
addr
,
rawaddr
,
port
);
retval
=
sctp_add_bind_addr
(
bp
,
&
addr
,
priority
);
sctp_param2sockaddr
(
&
addr
,
rawaddr
,
port
,
0
);
retval
=
sctp_add_bind_addr
(
bp
,
&
addr
,
gfp
);
if
(
retval
)
{
/* Can't finish building the list, clean up. */
sctp_bind_addr_clean
(
bp
);
...
...
@@ -300,14 +300,14 @@ int sctp_bind_addr_match(sctp_bind_addr_t *bp, const union sctp_addr *addr,
/* Copy out addresses from the global local address list. */
static
int
sctp_copy_one_addr
(
sctp_bind_addr_t
*
dest
,
union
sctp_addr
*
addr
,
sctp_scope_t
scope
,
int
priority
,
int
flags
)
sctp_scope_t
scope
,
int
gfp
,
int
flags
)
{
struct
sctp_protocol
*
proto
=
sctp_get_protocol
();
int
error
=
0
;
if
(
sctp_is_any
(
addr
))
{
error
=
sctp_copy_local_addr_list
(
proto
,
dest
,
scope
,
priority
,
flags
);
gfp
,
flags
);
}
else
if
(
sctp_in_scope
(
addr
,
scope
))
{
/* Now that the address is in scope, check to see if
* the address type is supported by local sock as
...
...
@@ -318,7 +318,7 @@ static int sctp_copy_one_addr(sctp_bind_addr_t *dest, union sctp_addr *addr,
(((
AF_INET6
==
addr
->
sa
.
sa_family
)
&&
(
flags
&
SCTP_ADDR6_ALLOWED
)
&&
(
flags
&
SCTP_ADDR6_PEERSUPP
))))
error
=
sctp_add_bind_addr
(
dest
,
addr
,
priority
);
error
=
sctp_add_bind_addr
(
dest
,
addr
,
gfp
);
}
return
error
;
...
...
net/sctp/endpointola.c
View file @
e35a3e9a
...
...
@@ -177,8 +177,15 @@ sctp_endpoint_t *sctp_endpoint_init(sctp_endpoint_t *ep,
/* Add an association to an endpoint. */
void
sctp_endpoint_add_asoc
(
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
)
{
struct
sock
*
sk
=
ep
->
base
.
sk
;
/* Now just add it to our list of asocs */
list_add_tail
(
&
asoc
->
asocs
,
&
ep
->
asocs
);
/* Increment the backlog value for a TCP-style listening socket. */
if
((
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
&&
(
SCTP_SS_LISTENING
==
sk
->
state
))
sk
->
ack_backlog
++
;
}
/* Free the endpoint structure. Delay cleanup until
...
...
net/sctp/input.c
View file @
e35a3e9a
...
...
@@ -207,21 +207,19 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock
(
sk
);
if
(
sock_owned_by_user
(
sk
))
{
if
(
sock_owned_by_user
(
sk
))
sk_add_backlog
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
else
{
else
sctp_backlog_rcv
(
sk
,
(
struct
sk_buff
*
)
chunk
);
}
/* Release the sock and any reference counts we took in the
* lookup calls.
*/
sctp_bh_unlock_sock
(
sk
);
if
(
asoc
)
{
if
(
asoc
)
sctp_association_put
(
asoc
);
}
else
{
else
sctp_endpoint_put
(
ep
);
}
sock_put
(
sk
);
return
ret
;
...
...
@@ -268,10 +266,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
}
/* Handle icmp frag needed error. */
static
inline
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
,
__u32
pmtu
)
void
sctp_icmp_frag_needed
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
,
struct
sctp_transport
*
t
,
__u32
pmtu
)
{
if
(
unlikely
(
pmtu
<
SCTP_DEFAULT_MINSEGMENT
))
{
printk
(
KERN_WARNING
"%s: Reported pmtu %d too low, "
...
...
@@ -280,54 +276,38 @@ static inline void sctp_icmp_frag_needed(struct sock *sk,
pmtu
=
SCTP_DEFAULT_MINSEGMENT
;
}
if
(
!
sock_owned_by_user
(
sk
)
&&
t
ransport
&&
(
transpor
t
->
pmtu
!=
pmtu
))
{
t
ransport
->
pmtu
=
pmtu
;
if
(
!
sock_owned_by_user
(
sk
)
&&
t
&&
(
t
->
pmtu
!=
pmtu
))
{
t
->
pmtu
=
pmtu
;
sctp_assoc_sync_pmtu
(
asoc
);
sctp_retransmit
(
&
asoc
->
outqueue
,
transport
,
SCTP_RETRANSMIT_PMTU_DISCOVERY
);
sctp_retransmit
(
&
asoc
->
outqueue
,
t
,
SCTP_RTXR_PMTUD
);
}
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
/* Common lookup code for icmp/icmpv6 error handler. */
struct
sock
*
sctp_err_lookup
(
int
family
,
struct
sk_buff
*
skb
,
struct
sctphdr
*
sctphdr
,
struct
sctp_endpoint
**
epp
,
struct
sctp_association
**
app
,
struct
sctp_transport
**
tpp
)
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
union
sctp_addr
saddr
,
daddr
;
struct
inet_opt
*
inet
;
union
sctp_addr
saddr
;
union
sctp_addr
daddr
;
struct
sctp_af
*
af
;
struct
sock
*
sk
=
NULL
;
sctp_endpoint_t
*
ep
=
NULL
;
sctp_association_t
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
;
int
err
;
struct
sctp_endpoint
*
ep
=
NULL
;
struct
sctp_association
*
asoc
=
NULL
;
struct
sctp_transport
*
transport
=
NULL
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
*
app
=
NULL
;
*
epp
=
NULL
;
*
tpp
=
NULL
;
af
=
sctp_get_af_specific
(
family
);
if
(
unlikely
(
!
af
))
{
return
NULL
;
}
saddr
.
v4
.
sin_family
=
AF_INET
;
saddr
.
v4
.
sin_port
=
ntohs
(
sh
->
source
);
memcpy
(
&
saddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
saddr
,
sizeof
(
struct
in_addr
));
daddr
.
v4
.
sin_family
=
AF_INET
;
daddr
.
v4
.
sin_port
=
ntohs
(
sh
->
dest
);
memcpy
(
&
daddr
.
v4
.
sin_addr
.
s_addr
,
&
iph
->
daddr
,
sizeof
(
struct
in_addr
));
/* Initialize local addresses for lookups. */
af
->
from_skb
(
&
saddr
,
skb
,
1
);
af
->
from_skb
(
&
daddr
,
skb
,
0
);
/* Look for an association that matches the incoming ICMP error
* packet.
...
...
@@ -340,13 +320,12 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
*/
ep
=
__sctp_rcv_lookup_endpoint
(
&
daddr
);
if
(
!
ep
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
return
NULL
;
}
}
if
(
asoc
)
{
if
(
ntohl
(
s
h
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
if
(
ntohl
(
s
ctphdr
->
vtag
)
!=
asoc
->
c
.
peer_vtag
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
goto
out
;
}
...
...
@@ -355,12 +334,90 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
sk
=
ep
->
base
.
sk
;
sctp_bh_lock_sock
(
sk
);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if
(
sock_owned_by_user
(
sk
))
NET_INC_STATS_BH
(
LockDroppedIcmps
);
*
epp
=
ep
;
*
app
=
asoc
;
*
tpp
=
transport
;
return
sk
;
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
return
NULL
;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
void
sctp_err_finish
(
struct
sock
*
sk
,
struct
sctp_endpoint
*
ep
,
struct
sctp_association
*
asoc
)
{
sctp_bh_unlock_sock
(
sk
);
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
}
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the sctp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void
sctp_v4_err
(
struct
sk_buff
*
skb
,
__u32
info
)
{
struct
iphdr
*
iph
=
(
struct
iphdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
(
iph
->
ihl
<<
2
));
int
type
=
skb
->
h
.
icmph
->
type
;
int
code
=
skb
->
h
.
icmph
->
code
;
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
inet_opt
*
inet
;
char
*
saveip
,
*
savesctp
;
int
err
;
if
(
skb
->
len
<
((
iph
->
ihl
<<
2
)
+
8
))
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
iph
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP_INC_STATS_BH
(
IcmpInErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
case
ICMP_PARAMETERPROB
:
err
=
EPROTO
;
...
...
@@ -399,13 +456,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
}
out_unlock:
sctp_bh_unlock_sock
(
sk
);
out:
sock_put
(
sk
);
if
(
asoc
)
sctp_association_put
(
asoc
);
if
(
ep
)
sctp_endpoint_put
(
ep
);
sctp_err_finish
(
sk
,
ep
,
asoc
);
}
/*
...
...
@@ -623,9 +674,9 @@ void __sctp_unhash_established(sctp_association_t *asoc)
}
/* Look up an association. */
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
l
addr
,
const
union
sctp_addr
*
p
add
r
,
struct
sctp_transport
**
transportp
)
sctp_association_t
*
__sctp_lookup_association
(
const
union
sctp_addr
*
l
ocal
,
const
union
sctp_addr
*
p
ee
r
,
struct
sctp_transport
**
pt
)
{
sctp_hashbucket_t
*
head
;
sctp_endpoint_common_t
*
epb
;
...
...
@@ -636,12 +687,12 @@ sctp_association_t *__sctp_lookup_association(const union sctp_addr *laddr,
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
hash
=
sctp_assoc_hashfn
(
l
addr
->
v4
.
sin_port
,
padd
r
->
v4
.
sin_port
);
hash
=
sctp_assoc_hashfn
(
l
ocal
->
v4
.
sin_port
,
pee
r
->
v4
.
sin_port
);
head
=
&
sctp_proto
.
assoc_hashbucket
[
hash
];
read_lock
(
&
head
->
lock
);
for
(
epb
=
head
->
chain
;
epb
;
epb
=
epb
->
next
)
{
asoc
=
sctp_assoc
(
epb
);
transport
=
sctp_assoc_is_match
(
asoc
,
l
addr
,
padd
r
);
transport
=
sctp_assoc_is_match
(
asoc
,
l
ocal
,
pee
r
);
if
(
transport
)
goto
hit
;
}
...
...
@@ -651,7 +702,7 @@ sctp_association_t *__sctp_lookup_association(const union sctp_addr *laddr,
return
NULL
;
hit:
*
transportp
=
transport
;
*
pt
=
transport
;
sctp_association_hold
(
asoc
);
sock_hold
(
epb
->
sk
);
read_unlock
(
&
head
->
lock
);
...
...
@@ -754,7 +805,7 @@ static sctp_association_t *__sctp_rcv_init_lookup(struct sk_buff *skb,
(
SCTP_PARAM_IPV6_ADDRESS
!=
params
.
p
->
type
))
continue
;
sctp_param2sockaddr
(
paddr
,
params
.
addr
,
ntohs
(
sh
->
source
));
sctp_param2sockaddr
(
paddr
,
params
.
addr
,
ntohs
(
sh
->
source
)
,
0
);
asoc
=
__sctp_lookup_association
(
laddr
,
paddr
,
transportp
);
if
(
asoc
)
return
asoc
;
...
...
@@ -782,8 +833,3 @@ sctp_association_t *__sctp_rcv_lookup(struct sk_buff *skb,
return
asoc
;
}
net/sctp/ipv6.c
View file @
e35a3e9a
/* SCTP kernel reference Implementation
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
* Copyright (c) 2002 International Business Machines, Corp.
* Copyright (c) 2002
-2003
International Business Machines, Corp.
*
* This file is part of the SCTP kernel reference Implementation
*
...
...
@@ -88,17 +88,62 @@ extern struct notifier_block sctp_inetaddr_notifier;
ntohs((addr)->s6_addr16[6]), \
ntohs((addr)->s6_addr16[7])
/* FIXME: Comments. */
static
inline
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
)
/* ICMP error handler. */
void
sctp_v6_err
(
struct
sk_buff
*
skb
,
struct
inet6_skb_parm
*
opt
,
int
type
,
int
code
,
int
offset
,
__u32
info
)
{
/* BUG. WRITE ME. */
struct
ipv6hdr
*
iph
=
(
struct
ipv6hdr
*
)
skb
->
data
;
struct
sctphdr
*
sh
=
(
struct
sctphdr
*
)(
skb
->
data
+
offset
);
struct
sock
*
sk
;
sctp_endpoint_t
*
ep
;
sctp_association_t
*
asoc
;
struct
sctp_transport
*
transport
;
struct
ipv6_pinfo
*
np
;
char
*
saveip
,
*
savesctp
;
int
err
;
/* Fix up skb to look at the embedded net header. */
saveip
=
skb
->
nh
.
raw
;
savesctp
=
skb
->
h
.
raw
;
skb
->
nh
.
ipv6h
=
iph
;
skb
->
h
.
raw
=
(
char
*
)
sh
;
sk
=
sctp_err_lookup
(
AF_INET6
,
skb
,
sh
,
&
ep
,
&
asoc
,
&
transport
);
/* Put back, the original pointers. */
skb
->
nh
.
raw
=
saveip
;
skb
->
h
.
raw
=
savesctp
;
if
(
!
sk
)
{
ICMP6_INC_STATS_BH
(
Icmp6InErrors
);
return
;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
*/
switch
(
type
)
{
case
ICMPV6_PKT_TOOBIG
:
sctp_icmp_frag_needed
(
sk
,
asoc
,
transport
,
ntohl
(
info
));
goto
out_unlock
;
default:
break
;
}
np
=
inet6_sk
(
sk
);
icmpv6_err_convert
(
type
,
code
,
&
err
);
if
(
!
sock_owned_by_user
(
sk
)
&&
np
->
recverr
)
{
sk
->
err
=
err
;
sk
->
error_report
(
sk
);
}
else
{
/* Only an error on timeout */
sk
->
err_soft
=
err
;
}
out_unlock:
sctp_err_finish
(
sk
,
ep
,
asoc
);
}
/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
static
in
line
int
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
static
in
t
sctp_v6_xmit
(
struct
sk_buff
*
skb
,
struct
sctp_transport
*
transport
,
int
ipfragok
)
{
struct
sock
*
sk
=
skb
->
sk
;
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
...
...
@@ -110,15 +155,18 @@ static inline int sctp_v6_xmit(struct sk_buff *skb,
/* Fill in the dest address from the route entry passed with the skb
* and the source address from the transport.
*/
*/
fl
.
fl6_dst
=
&
rt6
->
rt6i_dst
.
addr
;
fl
.
fl6_src
=
&
transport
->
saddr
.
v6
.
sin6_addr
;
fl
.
fl6_src
=
&
transport
->
saddr
.
v6
.
sin6_addr
;
fl
.
fl6_flowlabel
=
np
->
flow_label
;
IP6_ECN_flow_xmit
(
sk
,
fl
.
fl6_flowlabel
);
fl
.
oif
=
sk
->
bound_dev_if
;
if
(
ipv6_addr_type
(
fl
.
fl6_src
)
&
IPV6_ADDR_LINKLOCAL
)
fl
.
oif
=
transport
->
saddr
.
v6
.
sin6_scope_id
;
else
fl
.
oif
=
sk
->
bound_dev_if
;
fl
.
uli_u
.
ports
.
sport
=
inet_sk
(
sk
)
->
sport
;
fl
.
uli_u
.
ports
.
dport
=
inet_sk
(
sk
)
->
d
port
;
fl
.
uli_u
.
ports
.
dport
=
transport
->
ipaddr
.
v6
.
sin6_
port
;
if
(
np
->
opt
&&
np
->
opt
->
srcrt
)
{
struct
rt0_hdr
*
rt0
=
(
struct
rt0_hdr
*
)
np
->
opt
->
srcrt
;
...
...
@@ -174,7 +222,7 @@ struct dst_entry *sctp_v6_get_dst(sctp_association_t *asoc,
/* Returns the number of consecutive initial bits that match in the 2 ipv6
* addresses.
*/
*/
static
inline
int
sctp_v6_addr_match_len
(
union
sctp_addr
*
s1
,
union
sctp_addr
*
s2
)
{
...
...
@@ -186,7 +234,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
__u32
a1xora2
;
a1xora2
=
a1
->
s6_addr32
[
i
]
^
a2
->
s6_addr32
[
i
];
if
((
j
=
fls
(
ntohl
(
a1xora2
))))
return
(
i
*
32
+
32
-
j
);
}
...
...
@@ -196,7 +244,7 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
/* Fills in the source address(saddr) based on the destination address(daddr)
* and asoc's bind address list.
*/
*/
void
sctp_v6_get_saddr
(
sctp_association_t
*
asoc
,
struct
dst_entry
*
dst
,
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
{
...
...
@@ -214,7 +262,7 @@ void sctp_v6_get_saddr(sctp_association_t *asoc, struct dst_entry *dst,
__FUNCTION__
,
asoc
,
dst
,
NIP6
(
&
daddr
->
v6
.
sin6_addr
));
if
(
!
asoc
)
{
ipv6_get_saddr
(
dst
,
&
daddr
->
v6
.
sin6_addr
,
&
saddr
->
v6
.
sin6_addr
);
ipv6_get_saddr
(
dst
,
&
daddr
->
v6
.
sin6_addr
,
&
saddr
->
v6
.
sin6_addr
);
SCTP_DEBUG_PRINTK
(
"saddr from ipv6_get_saddr: "
"%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x
\n
"
,
NIP6
(
&
saddr
->
v6
.
sin6_addr
));
...
...
@@ -279,6 +327,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr
->
a
.
v6
.
sin6_family
=
AF_INET6
;
addr
->
a
.
v6
.
sin6_port
=
0
;
addr
->
a
.
v6
.
sin6_addr
=
ifp
->
addr
;
addr
->
a
.
v6
.
sin6_scope_id
=
dev
->
ifindex
;
INIT_LIST_HEAD
(
&
addr
->
list
);
list_add_tail
(
&
addr
->
list
,
addrlist
);
}
...
...
@@ -299,7 +348,7 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb,
port
=
&
addr
->
v6
.
sin6_port
;
addr
->
v6
.
sin6_family
=
AF_INET6
;
addr
->
v6
.
sin6_flowinfo
=
0
;
/* FIXME */
addr
->
v6
.
sin6_scope_id
=
0
;
/* FIXME */
addr
->
v6
.
sin6_scope_id
=
((
struct
inet6_skb_parm
*
)
skb
->
cb
)
->
iif
;
sh
=
(
struct
sctphdr
*
)
skb
->
h
.
raw
;
if
(
is_saddr
)
{
...
...
@@ -336,19 +385,25 @@ static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst,
ipv6_addr_copy
(
&
addr
->
v6
.
sin6_addr
,
&
rt
->
rt6i_src
.
addr
);
}
/* Compare addresses exactly.
Well.. almost exactly; ignore scope_id
*
for now.
FIXME: v4-mapped-v6.
/* Compare addresses exactly.
* FIXME: v4-mapped-v6.
*/
static
int
sctp_v6_cmp_addr
(
const
union
sctp_addr
*
addr1
,
const
union
sctp_addr
*
addr2
)
{
int
match
;
if
(
addr1
->
sa
.
sa_family
!=
addr2
->
sa
.
sa_family
)
return
0
;
match
=
!
ipv6_addr_cmp
((
struct
in6_addr
*
)
&
addr1
->
v6
.
sin6_addr
,
(
struct
in6_addr
*
)
&
addr2
->
v6
.
sin6_addr
);
if
(
ipv6_addr_cmp
(
&
addr1
->
v6
.
sin6_addr
,
&
addr2
->
v6
.
sin6_addr
))
return
0
;
/* If this is a linklocal address, compare the scope_id. */
if
(
ipv6_addr_type
(
&
addr1
->
v6
.
sin6_addr
)
&
IPV6_ADDR_LINKLOCAL
)
{
if
(
addr1
->
v6
.
sin6_scope_id
&&
addr2
->
v6
.
sin6_scope_id
&&
(
addr1
->
v6
.
sin6_scope_id
!=
addr2
->
v6
.
sin6_scope_id
))
{
return
0
;
}
}
return
match
;
return
1
;
}
/* Initialize addr struct to INADDR_ANY. */
...
...
@@ -382,7 +437,6 @@ static int sctp_v6_available(const union sctp_addr *addr)
return
ipv6_chk_addr
(
in6
,
NULL
);
}
/* This function checks if the address is a valid address to be used for
* SCTP.
*
...
...
@@ -432,7 +486,7 @@ static sctp_scope_t sctp_v6_scope(union sctp_addr *addr)
return
retval
;
}
/* Create and initialize a new sk for the socket to be returned by accept(). */
/* Create and initialize a new sk for the socket to be returned by accept(). */
struct
sock
*
sctp_v6_create_accept_sk
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
)
{
...
...
@@ -469,11 +523,11 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
memcpy
(
newnp
,
np
,
sizeof
(
struct
ipv6_pinfo
));
ipv6_addr_copy
(
&
newnp
->
daddr
,
&
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
);
ipv6_addr_copy
(
&
newnp
->
daddr
,
&
asoc
->
peer
.
primary_addr
.
v6
.
sin6_addr
);
newinet
->
sport
=
inet
->
sport
;
newinet
->
dport
=
asoc
->
peer
.
port
;
#ifdef INET_REFCNT_DEBUG
atomic_inc
(
&
inet6_sock_nr
);
atomic_inc
(
&
inet_sock_nr
);
...
...
@@ -488,6 +542,13 @@ struct sock *sctp_v6_create_accept_sk(struct sock *sk,
return
newsk
;
}
/* Where did this skb come from? */
static
int
sctp_v6_skb_iif
(
const
struct
sk_buff
*
skb
)
{
struct
inet6_skb_parm
*
opt
=
(
struct
inet6_skb_parm
*
)
skb
->
cb
;
return
opt
->
iif
;
}
/* Initialize a PF_INET6 socket msg_name. */
static
void
sctp_inet6_msgname
(
char
*
msgname
,
int
*
addr_len
)
{
...
...
@@ -496,13 +557,13 @@ static void sctp_inet6_msgname(char *msgname, int *addr_len)
sin6
=
(
struct
sockaddr_in6
*
)
msgname
;
sin6
->
sin6_family
=
AF_INET6
;
sin6
->
sin6_flowinfo
=
0
;
sin6
->
sin6_scope_id
=
0
;
sin6
->
sin6_scope_id
=
0
;
/*FIXME */
*
addr_len
=
sizeof
(
struct
sockaddr_in6
);
}
/* Initialize a PF_INET msgname from a ulpevent. */
static
void
sctp_inet6_event_msgname
(
struct
sctp_ulpevent
*
event
,
char
*
msgname
,
int
*
addrlen
)
static
void
sctp_inet6_event_msgname
(
struct
sctp_ulpevent
*
event
,
char
*
msgname
,
int
*
addrlen
)
{
struct
sockaddr_in6
*
sin6
,
*
sin6from
;
...
...
@@ -528,6 +589,8 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, char *msgname,
sin6from
=
&
event
->
asoc
->
peer
.
primary_addr
.
v6
;
ipv6_addr_copy
(
&
sin6
->
sin6_addr
,
&
sin6from
->
sin6_addr
);
if
(
ipv6_addr_type
(
&
sin6
->
sin6_addr
)
&
IPV6_ADDR_LINKLOCAL
)
sin6
->
sin6_scope_id
=
sin6from
->
sin6_scope_id
;
}
}
...
...
@@ -546,8 +609,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
/* FIXME: Map ipv4 address into v4-mapped-on-v6 address. */
if
(
__constant_htons
(
ETH_P_IP
)
==
skb
->
protocol
)
{
/* FIXME:
Easy, but there was no way to test this
*
yet
.
/* FIXME:
The latest I-D added options for two
*
behaviors
.
*/
return
;
}
...
...
@@ -556,9 +619,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
ipv6_addr_copy
(
&
sin6
->
sin6_addr
,
&
skb
->
nh
.
ipv6h
->
saddr
);
if
(
ipv6_addr_type
(
&
sin6
->
sin6_addr
)
&
IPV6_ADDR_LINKLOCAL
)
{
struct
inet6_skb_parm
*
opt
=
(
struct
inet6_skb_parm
*
)
skb
->
cb
;
sin6
->
sin6_scope_id
=
opt
->
iif
;
struct
sctp_ulpevent
*
ev
=
sctp_skb2event
(
skb
);
sin6
->
sin6_scope_id
=
ev
->
iif
;
}
}
}
...
...
@@ -612,22 +674,67 @@ static int sctp_inet6_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
struct
sctp_af
*
af
;
/* ASSERT: address family has already been verified. */
if
(
addr
->
sa
.
sa_family
!=
AF_INET6
)
{
if
(
addr
->
sa
.
sa_family
!=
AF_INET6
)
af
=
sctp_get_af_specific
(
addr
->
sa
.
sa_family
);
}
else
af
=
opt
->
pf
->
af
;
else
{
struct
sock
*
sk
;
int
type
=
ipv6_addr_type
(
&
addr
->
v6
.
sin6_addr
);
sk
=
&
container_of
(
opt
,
struct
sctp6_sock
,
sctp
)
->
sk
;
if
(
type
&
IPV6_ADDR_LINKLOCAL
)
{
/* Note: Behavior similar to af_inet6.c:
* 1) Overrides previous bound_dev_if
* 2) Destructive even if bind isn't successful.
*/
if
(
addr
->
v6
.
sin6_scope_id
)
sk
->
bound_dev_if
=
addr
->
v6
.
sin6_scope_id
;
if
(
!
sk
->
bound_dev_if
)
return
0
;
}
af
=
opt
->
pf
->
af
;
}
return
af
->
available
(
addr
);
}
/* Verify that the provided sockaddr looks bindable. Common verification,
* has already been taken care of.
*/
static
int
sctp_inet6_send_verify
(
struct
sctp_opt
*
opt
,
union
sctp_addr
*
addr
)
{
struct
sctp_af
*
af
=
NULL
;
/* ASSERT: address family has already been verified. */
if
(
addr
->
sa
.
sa_family
!=
AF_INET6
)
af
=
sctp_get_af_specific
(
addr
->
sa
.
sa_family
);
else
{
struct
sock
*
sk
;
int
type
=
ipv6_addr_type
(
&
addr
->
v6
.
sin6_addr
);
sk
=
&
container_of
(
opt
,
struct
sctp6_sock
,
sctp
)
->
sk
;
if
(
type
&
IPV6_ADDR_LINKLOCAL
)
{
/* Note: Behavior similar to af_inet6.c:
* 1) Overrides previous bound_dev_if
* 2) Destructive even if bind isn't successful.
*/
if
(
addr
->
v6
.
sin6_scope_id
)
sk
->
bound_dev_if
=
addr
->
v6
.
sin6_scope_id
;
if
(
!
sk
->
bound_dev_if
)
return
0
;
}
af
=
opt
->
pf
->
af
;
}
return
af
!=
NULL
;
}
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Note: In the future, we may want to look at sock options
* to determine whether a PF_INET6 socket really wants to have IPV4
* addresses.
* addresses.
* Returns number of addresses supported.
*/
static
int
sctp_inet6_supported_addrs
(
const
struct
sctp_opt
*
opt
,
__u16
*
types
)
__u16
*
types
)
{
types
[
0
]
=
SCTP_PARAM_IPV4_ADDRESS
;
types
[
1
]
=
SCTP_PARAM_IPV6_ADDRESS
;
...
...
@@ -700,6 +807,7 @@ static struct sctp_af sctp_ipv6_specific = {
.
inaddr_any
=
sctp_v6_inaddr_any
,
.
is_any
=
sctp_v6_is_any
,
.
available
=
sctp_v6_available
,
.
skb_iif
=
sctp_v6_skb_iif
,
.
net_header_len
=
sizeof
(
struct
ipv6hdr
),
.
sockaddr_len
=
sizeof
(
struct
sockaddr_in6
),
.
sa_family
=
AF_INET6
,
...
...
@@ -711,6 +819,7 @@ static struct sctp_pf sctp_pf_inet6_specific = {
.
af_supported
=
sctp_inet6_af_supported
,
.
cmp_addr
=
sctp_inet6_cmp_addr
,
.
bind_verify
=
sctp_inet6_bind_verify
,
.
send_verify
=
sctp_inet6_send_verify
,
.
supported_addrs
=
sctp_inet6_supported_addrs
,
.
create_accept_sk
=
sctp_v6_create_accept_sk
,
.
af
=
&
sctp_ipv6_specific
,
...
...
net/sctp/output.c
View file @
e35a3e9a
...
...
@@ -79,6 +79,7 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
packet
->
ecn_capable
=
ecn_capable
;
packet
->
get_prepend_chunk
=
prepend_handler
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
/* We might need to call the prepend_handler right away. */
...
...
@@ -100,6 +101,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
packet
->
ecn_capable
=
0
;
packet
->
get_prepend_chunk
=
NULL
;
packet
->
has_cookie_echo
=
0
;
packet
->
has_sack
=
0
;
packet
->
ipfragok
=
0
;
packet
->
malloced
=
0
;
sctp_packet_reset
(
packet
);
...
...
@@ -155,6 +157,37 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
return
retval
;
}
/* Try to bundle a SACK with the packet. */
static
sctp_xmit_t
sctp_packet_bundle_sack
(
struct
sctp_packet
*
pkt
,
struct
sctp_chunk
*
chunk
)
{
sctp_xmit_t
retval
=
SCTP_XMIT_OK
;
/* If sending DATA and haven't aleady bundled a SACK, try to
* bundle one in to the packet.
*/
if
(
sctp_chunk_is_data
(
chunk
)
&&
!
pkt
->
has_sack
&&
!
pkt
->
has_cookie_echo
)
{
struct
sctp_association
*
asoc
;
asoc
=
pkt
->
transport
->
asoc
;
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
{
struct
sctp_chunk
*
sack
;
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
sack
)
{
struct
timer_list
*
timer
;
retval
=
sctp_packet_append_chunk
(
pkt
,
sack
);
asoc
->
peer
.
sack_needed
=
0
;
timer
=
&
asoc
->
timers
[
SCTP_EVENT_TIMEOUT_SACK
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
}
}
}
return
retval
;
}
/* Append a chunk to the offered packet reporting back any inability to do
* so.
*/
...
...
@@ -163,10 +196,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
{
sctp_xmit_t
retval
=
SCTP_XMIT_OK
;
__u16
chunk_len
=
WORD_ROUND
(
ntohs
(
chunk
->
chunk_hdr
->
length
));
size_t
psize
=
packet
->
size
;
size_t
psize
;
size_t
pmtu
;
int
too_big
;
retval
=
sctp_packet_bundle_sack
(
packet
,
chunk
);
psize
=
packet
->
size
;
if
(
retval
!=
SCTP_XMIT_OK
)
goto
finish
;
pmtu
=
((
packet
->
transport
->
asoc
)
?
(
packet
->
transport
->
asoc
->
pmtu
)
:
(
packet
->
transport
->
pmtu
));
...
...
@@ -214,11 +253,14 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
*/
if
(
sctp_chunk_is_data
(
chunk
))
{
retval
=
sctp_packet_append_data
(
packet
,
chunk
);
/* Disallow SACK bundling after DATA. */
packet
->
has_sack
=
1
;
if
(
SCTP_XMIT_OK
!=
retval
)
goto
finish
;
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
{
}
else
if
(
SCTP_CID_COOKIE_ECHO
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_cookie_echo
=
1
;
}
else
if
(
SCTP_CID_SACK
==
chunk
->
chunk_hdr
->
type
)
packet
->
has_sack
=
1
;
/* It is OK to send this chunk. */
__skb_queue_tail
(
&
packet
->
chunks
,
(
struct
sk_buff
*
)
chunk
);
...
...
net/sctp/outqueue.c
View file @
e35a3e9a
...
...
@@ -138,13 +138,13 @@ void sctp_outq_init(sctp_association_t *asoc, struct sctp_outq *q)
}
/* Free the outqueue structure and any related pending chunks.
* FIXME: Add SEND_FAILED support.
*/
void
sctp_outq_teardown
(
struct
sctp_outq
*
q
)
{
struct
sctp_transport
*
transport
;
struct
list_head
*
lchunk
,
*
pos
,
*
temp
;
sctp_chunk_t
*
chunk
;
struct
sctp_ulpevent
*
ev
;
/* Throw away unacknowledged chunks. */
list_for_each
(
pos
,
&
q
->
asoc
->
peer
.
transport_addr_list
)
{
...
...
@@ -152,6 +152,14 @@ void sctp_outq_teardown(struct sctp_outq *q)
while
((
lchunk
=
sctp_list_dequeue
(
&
transport
->
transmitted
)))
{
chunk
=
list_entry
(
lchunk
,
sctp_chunk_t
,
transmitted_list
);
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
q
->
asoc
,
chunk
,
SCTP_DATA_SENT
,
q
->
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
q
->
asoc
->
ulpq
,
ev
);
sctp_free_chunk
(
chunk
);
}
}
...
...
@@ -171,8 +179,19 @@ void sctp_outq_teardown(struct sctp_outq *q)
}
/* Throw away any leftover data chunks. */
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* Generate a SEND FAILED event. */
ev
=
sctp_ulpevent_make_send_failed
(
q
->
asoc
,
chunk
,
SCTP_DATA_UNSENT
,
q
->
error
,
GFP_ATOMIC
);
if
(
ev
)
sctp_ulpq_tail_event
(
&
q
->
asoc
->
ulpq
,
ev
);
sctp_free_chunk
(
chunk
);
}
q
->
error
=
0
;
/* Throw away any leftover control chunks. */
while
((
chunk
=
(
sctp_chunk_t
*
)
skb_dequeue
(
&
q
->
control
)))
...
...
@@ -357,7 +376,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
__u8
fast_retransmit
=
0
;
switch
(
reason
)
{
case
SCTP_R
ETRANSMIT
_T3_RTX
:
case
SCTP_R
TXR
_T3_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_T3_RTX
);
/* Update the retran path if the T3-rtx timer has expired for
* the current retran path.
...
...
@@ -365,10 +384,11 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
if
(
transport
==
transport
->
asoc
->
peer
.
retran_path
)
sctp_assoc_update_retran_path
(
transport
->
asoc
);
break
;
case
SCTP_R
ETRANSMIT
_FAST_RTX
:
case
SCTP_R
TXR
_FAST_RTX
:
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_FAST_RTX
);
fast_retransmit
=
1
;
break
;
case
SCTP_RTXR_PMTUD
:
default:
break
;
}
...
...
@@ -876,7 +896,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
start_timer
=
0
;
queue
=
&
q
->
out
;
while
(
NULL
!=
(
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
while
((
chunk
=
sctp_outq_dequeue_data
(
q
)))
{
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
...
...
@@ -891,9 +911,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if
(
ev
)
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
ev
);
/* Free the chunk. This chunk is not on any
* list yet, just free it.
*/
/* Free the chunk. */
sctp_free_chunk
(
chunk
);
continue
;
}
...
...
@@ -1572,7 +1590,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
if
(
transport
)
{
if
(
do_fast_retransmit
)
sctp_retransmit
(
q
,
transport
,
SCTP_R
ETRANSMIT
_FAST_RTX
);
sctp_retransmit
(
q
,
transport
,
SCTP_R
TXR
_FAST_RTX
);
SCTP_DEBUG_PRINTK
(
"%s: transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, pba: %d
\n
"
,
...
...
net/sctp/protocol.c
View file @
e35a3e9a
...
...
@@ -170,7 +170,7 @@ static void __sctp_get_local_addr_list(struct sctp_protocol *proto)
static
void
sctp_get_local_addr_list
(
struct
sctp_protocol
*
proto
)
{
long
flags
__attribute__
((
unused
))
;
unsigned
long
flags
;
sctp_spin_lock_irqsave
(
&
sctp_proto
.
local_addr_lock
,
flags
);
__sctp_get_local_addr_list
(
&
sctp_proto
);
...
...
@@ -193,7 +193,7 @@ static void __sctp_free_local_addr_list(struct sctp_protocol *proto)
/* Free the existing local addresses. */
static
void
sctp_free_local_addr_list
(
struct
sctp_protocol
*
proto
)
{
long
flags
__attribute__
((
unused
))
;
unsigned
long
flags
;
sctp_spin_lock_irqsave
(
&
proto
->
local_addr_lock
,
flags
);
__sctp_free_local_addr_list
(
proto
);
...
...
@@ -208,7 +208,7 @@ int sctp_copy_local_addr_list(struct sctp_protocol *proto,
struct
sockaddr_storage_list
*
addr
;
int
error
=
0
;
struct
list_head
*
pos
;
long
flags
__attribute__
((
unused
))
;
unsigned
long
flags
;
sctp_spin_lock_irqsave
(
&
proto
->
local_addr_lock
,
flags
);
list_for_each
(
pos
,
&
proto
->
local_addr_list
)
{
...
...
@@ -233,7 +233,6 @@ int sctp_copy_local_addr_list(struct sctp_protocol *proto,
end_copy:
sctp_spin_unlock_irqrestore
(
&
proto
->
local_addr_lock
,
flags
);
return
error
;
}
...
...
@@ -383,7 +382,7 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
* addresses. If an association is passed, trys to get a dst entry with a
* source adddress that matches an address in the bind address list.
*/
struct
dst_entry
*
sctp_v4_get_dst
(
s
ctp_association_t
*
asoc
,
struct
dst_entry
*
sctp_v4_get_dst
(
s
truct
sctp_association
*
asoc
,
union
sctp_addr
*
daddr
,
union
sctp_addr
*
saddr
)
{
...
...
@@ -480,6 +479,12 @@ void sctp_v4_get_saddr(sctp_association_t *asoc,
}
/* What interface did this skb arrive on? */
int
sctp_v4_skb_iif
(
const
struct
sk_buff
*
skb
)
{
return
((
struct
rtable
*
)
skb
->
dst
)
->
rt_iif
;
}
/* Create and initialize a new sk for the socket returned by accept(). */
struct
sock
*
sctp_v4_create_accept_sk
(
struct
sock
*
sk
,
struct
sctp_association
*
asoc
)
...
...
@@ -538,10 +543,10 @@ struct sock *sctp_v4_create_accept_sk(struct sock *sk,
/* Event handler for inet address addition/deletion events.
* Basically, whenever there is an event, we re-build our local address list.
*/
static
int
sctp_inetaddr_event
(
struct
notifier_block
*
this
,
unsigned
long
ev
ent
,
static
int
sctp_inetaddr_event
(
struct
notifier_block
*
this
,
unsigned
long
ev
,
void
*
ptr
)
{
long
flags
__attribute__
((
unused
))
;
unsigned
long
flags
;
sctp_spin_lock_irqsave
(
&
sctp_proto
.
local_addr_lock
,
flags
);
__sctp_free_local_addr_list
(
&
sctp_proto
);
...
...
@@ -689,6 +694,14 @@ static int sctp_inet_bind_verify(struct sctp_opt *opt, union sctp_addr *addr)
return
sctp_v4_available
(
addr
);
}
/* Verify that sockaddr looks sendable. Common verification has already
* been taken care of.
*/
static
int
sctp_inet_send_verify
(
struct
sctp_opt
*
opt
,
union
sctp_addr
*
addr
)
{
return
1
;
}
/* Fill in Supported Address Type information for INIT and INIT-ACK
* chunks. Returns number of addresses supported.
*/
...
...
@@ -721,6 +734,7 @@ static struct sctp_pf sctp_pf_inet = {
.
af_supported
=
sctp_inet_af_supported
,
.
cmp_addr
=
sctp_inet_cmp_addr
,
.
bind_verify
=
sctp_inet_bind_verify
,
.
send_verify
=
sctp_inet_send_verify
,
.
supported_addrs
=
sctp_inet_supported_addrs
,
.
create_accept_sk
=
sctp_v4_create_accept_sk
,
.
af
=
&
sctp_ipv4_specific
,
...
...
@@ -797,6 +811,7 @@ struct sctp_af sctp_ipv4_specific = {
.
is_any
=
sctp_v4_is_any
,
.
available
=
sctp_v4_available
,
.
scope
=
sctp_v4_scope
,
.
skb_iif
=
sctp_v4_skb_iif
,
.
net_header_len
=
sizeof
(
struct
iphdr
),
.
sockaddr_len
=
sizeof
(
struct
sockaddr_in
),
.
sa_family
=
AF_INET
,
...
...
@@ -874,6 +889,10 @@ __init int sctp_init(void)
int
i
;
int
status
=
0
;
/* SCTP_DEBUG sanity check. */
if
(
!
sctp_sanity_check
())
return
-
EINVAL
;
/* Add SCTP to inet_protos hash table. */
if
(
inet_add_protocol
(
&
sctp_protocol
,
IPPROTO_SCTP
)
<
0
)
return
-
EAGAIN
;
...
...
net/sctp/sm_make_chunk.c
View file @
e35a3e9a
...
...
@@ -66,6 +66,19 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* What was the inbound interface for this chunk? */
int
sctp_chunk_iif
(
const
struct
sctp_chunk
*
chunk
)
{
struct
sctp_af
*
af
;
int
iif
=
0
;
af
=
sctp_get_af_specific
(
ipver2af
(
chunk
->
skb
->
nh
.
iph
->
version
));
if
(
af
)
iif
=
af
->
skb_iif
(
chunk
->
skb
);
return
iif
;
}
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
* Note 2: The ECN capable field is reserved for future use of
...
...
@@ -145,7 +158,7 @@ void sctp_init_cause(sctp_chunk_t *chunk, __u16 cause_code,
*/
sctp_chunk_t
*
sctp_make_init
(
const
sctp_association_t
*
asoc
,
const
sctp_bind_addr_t
*
bp
,
int
priority
,
int
vparam_len
)
int
gfp
,
int
vparam_len
)
{
sctp_inithdr_t
init
;
union
sctp_params
addrs
;
...
...
@@ -165,7 +178,7 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
addrs
.
v
=
NULL
;
/* Convert the provided bind address list to raw format */
addrs
=
sctp_bind_addrs_to_raw
(
bp
,
&
addrs_len
,
priority
);
addrs
=
sctp_bind_addrs_to_raw
(
bp
,
&
addrs_len
,
gfp
);
if
(
!
addrs
.
v
)
goto
nodata
;
...
...
@@ -225,7 +238,7 @@ sctp_chunk_t *sctp_make_init(const sctp_association_t *asoc,
sctp_chunk_t
*
sctp_make_init_ack
(
const
sctp_association_t
*
asoc
,
const
sctp_chunk_t
*
chunk
,
int
priority
,
int
unkparam_len
)
int
gfp
,
int
unkparam_len
)
{
sctp_inithdr_t
initack
;
sctp_chunk_t
*
retval
;
...
...
@@ -237,8 +250,7 @@ sctp_chunk_t *sctp_make_init_ack(const sctp_association_t *asoc,
retval
=
NULL
;
addrs
=
sctp_bind_addrs_to_raw
(
&
asoc
->
base
.
bind_addr
,
&
addrs_len
,
priority
);
addrs
=
sctp_bind_addrs_to_raw
(
&
asoc
->
base
.
bind_addr
,
&
addrs_len
,
gfp
);
if
(
!
addrs
.
v
)
goto
nomem_rawaddr
;
...
...
@@ -1019,21 +1031,18 @@ sctp_chunk_t *sctp_make_chunk(const sctp_association_t *asoc,
struct
sk_buff
*
skb
;
struct
sock
*
sk
;
skb
=
dev_alloc_skb
(
WORD_ROUND
(
sizeof
(
sctp_chunkhdr_t
)
+
paylen
));
/* No need to allocate LL here, as this is only a chunk. */
skb
=
alloc_skb
(
WORD_ROUND
(
sizeof
(
sctp_chunkhdr_t
)
+
paylen
),
GFP_ATOMIC
);
if
(
!
skb
)
goto
nodata
;
/* Make room for the chunk header. */
chunk_hdr
=
(
sctp_chunkhdr_t
*
)
skb_put
(
skb
,
sizeof
(
sctp_chunkhdr_t
));
skb_pull
(
skb
,
sizeof
(
sctp_chunkhdr_t
));
chunk_hdr
->
type
=
type
;
chunk_hdr
->
flags
=
flags
;
chunk_hdr
->
length
=
htons
(
sizeof
(
sctp_chunkhdr_t
));
/* Move the data pointer back up to the start of the chunk. */
skb_push
(
skb
,
sizeof
(
sctp_chunkhdr_t
));
sk
=
asoc
?
asoc
->
base
.
sk
:
NULL
;
retval
=
sctp_chunkify
(
skb
,
asoc
,
sk
);
if
(
!
retval
)
{
...
...
@@ -1162,7 +1171,7 @@ int sctp_datachunks_from_user(sctp_association_t *asoc,
msg_len
-=
first_len
;
whole
=
1
;
}
}
}
/* How many full sized? How many bytes leftover? */
whole
+=
msg_len
/
max
;
...
...
@@ -1198,7 +1207,7 @@ int sctp_datachunks_from_user(sctp_association_t *asoc,
__skb_queue_tail
(
chunks
,
(
struct
sk_buff
*
)
chunk
);
/* The first chunk, the first chunk was likely short
/* The first chunk, the first chunk was likely short
* to allow bundling, so reset to full size.
*/
if
(
0
==
i
)
...
...
@@ -1282,26 +1291,26 @@ void sctp_chunk_assign_tsn(sctp_chunk_t *chunk)
}
/* Create a CLOSED association to use with an incoming packet. */
sctp_association_t
*
sctp_make_temp_asoc
(
const
sctp_endpoint_t
*
ep
,
sctp_chunk_t
*
chunk
,
int
priority
)
sctp_association_t
*
sctp_make_temp_asoc
(
const
struct
sctp_endpoint
*
ep
,
struct
sctp_chunk
*
chunk
,
int
gfp
)
{
sctp_association_t
*
asoc
;
struct
sk_buff
*
skb
;
sctp_scope_t
scope
;
/* Create the bare association. */
scope
=
sctp_scope
(
sctp_source
(
chunk
));
asoc
=
sctp_association_new
(
ep
,
ep
->
base
.
sk
,
scope
,
priority
);
asoc
=
sctp_association_new
(
ep
,
ep
->
base
.
sk
,
scope
,
gfp
);
if
(
!
asoc
)
goto
nodata
;
skb
=
chunk
->
skb
;
/* Create an entry for the source address of the packet. */
switch
(
chunk
->
skb
->
nh
.
iph
->
version
)
{
/* FIXME: Use the af specific helpers. */
switch
(
skb
->
nh
.
iph
->
version
)
{
case
4
:
asoc
->
c
.
peer_addr
.
v4
.
sin_family
=
AF_INET
;
asoc
->
c
.
peer_addr
.
v4
.
sin_port
=
ntohs
(
chunk
->
sctp_hdr
->
source
);
asoc
->
c
.
peer_addr
.
v4
.
sin_addr
.
s_addr
=
chunk
->
skb
->
nh
.
iph
->
saddr
;
asoc
->
c
.
peer_addr
.
v4
.
sin_addr
.
s_addr
=
skb
->
nh
.
iph
->
saddr
;
break
;
case
6
:
...
...
@@ -1309,8 +1318,9 @@ sctp_association_t *sctp_make_temp_asoc(const sctp_endpoint_t *ep,
asoc
->
c
.
peer_addr
.
v6
.
sin6_port
=
ntohs
(
chunk
->
sctp_hdr
->
source
);
asoc
->
c
.
peer_addr
.
v6
.
sin6_flowinfo
=
0
;
/* BUG BUG BUG */
asoc
->
c
.
peer_addr
.
v6
.
sin6_addr
=
chunk
->
skb
->
nh
.
ipv6h
->
saddr
;
asoc
->
c
.
peer_addr
.
v6
.
sin6_scope_id
=
0
;
/* BUG BUG BUG */
asoc
->
c
.
peer_addr
.
v6
.
sin6_addr
=
skb
->
nh
.
ipv6h
->
saddr
;
asoc
->
c
.
peer_addr
.
v6
.
sin6_scope_id
=
((
struct
inet6_skb_parm
*
)
skb
->
cb
)
->
iif
;
break
;
default:
...
...
@@ -1397,7 +1407,7 @@ sctp_cookie_param_t *sctp_pack_cookie(const sctp_endpoint_t *ep,
/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
sctp_association_t
*
sctp_unpack_cookie
(
const
sctp_endpoint_t
*
ep
,
const
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
int
priority
,
sctp_chunk_t
*
chunk
,
int
gfp
,
int
*
error
,
sctp_chunk_t
**
err_chk_p
)
{
sctp_association_t
*
retval
=
NULL
;
...
...
@@ -1408,6 +1418,7 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
__u8
digest_buf
[
SCTP_SIGNATURE_SIZE
];
int
secret
;
sctp_scope_t
scope
;
struct
sk_buff
*
skb
=
chunk
->
skb
;
headersize
=
sizeof
(
sctp_chunkhdr_t
)
+
SCTP_SECRET_SIZE
;
bodysize
=
ntohs
(
chunk
->
chunk_hdr
->
length
)
-
headersize
;
...
...
@@ -1450,7 +1461,7 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
* an association, there is no need to check cookie's expiration
* for init collision case of lost COOKIE ACK.
*/
if
(
!
asoc
&&
tv_lt
(
bear_cookie
->
expiration
,
chunk
->
skb
->
stamp
))
{
if
(
!
asoc
&&
tv_lt
(
bear_cookie
->
expiration
,
skb
->
stamp
))
{
__u16
len
;
/*
* Section 3.3.10.3 Stale Cookie Error (3)
...
...
@@ -1463,9 +1474,9 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
len
=
ntohs
(
chunk
->
chunk_hdr
->
length
);
*
err_chk_p
=
sctp_make_op_error_space
(
asoc
,
chunk
,
len
);
if
(
*
err_chk_p
)
{
suseconds_t
usecs
=
(
chunk
->
skb
->
stamp
.
tv_sec
-
suseconds_t
usecs
=
(
skb
->
stamp
.
tv_sec
-
bear_cookie
->
expiration
.
tv_sec
)
*
1000000L
+
chunk
->
skb
->
stamp
.
tv_usec
-
skb
->
stamp
.
tv_usec
-
bear_cookie
->
expiration
.
tv_usec
;
usecs
=
htonl
(
usecs
);
...
...
@@ -1480,7 +1491,7 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
/* Make a new base association. */
scope
=
sctp_scope
(
sctp_source
(
chunk
));
retval
=
sctp_association_new
(
ep
,
ep
->
base
.
sk
,
scope
,
priority
);
retval
=
sctp_association_new
(
ep
,
ep
->
base
.
sk
,
scope
,
gfp
);
if
(
!
retval
)
{
*
error
=
-
SCTP_IERROR_NOMEM
;
goto
fail
;
...
...
@@ -1522,13 +1533,14 @@ sctp_association_t *sctp_unpack_cookie(const sctp_endpoint_t *ep,
* 3rd Level Abstractions
********************************************************************/
/*
* Report a missing mandatory parameter.
*/
struct
__sctp_missing
{
__u32
num_missing
;
__u16
type
;
}
__attribute__
((
packed
));;
/*
* Report a missing mandatory parameter.
*/
static
int
sctp_process_missing_param
(
const
sctp_association_t
*
asoc
,
sctp_param_t
paramtype
,
sctp_chunk_t
*
chunk
,
...
...
@@ -1774,8 +1786,7 @@ int sctp_verify_init(const sctp_association_t *asoc,
*/
int
sctp_process_init
(
sctp_association_t
*
asoc
,
sctp_cid_t
cid
,
const
union
sctp_addr
*
peer_addr
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
sctp_init_chunk_t
*
peer_init
,
int
gfp
)
{
union
sctp_params
param
;
struct
sctp_transport
*
transport
;
...
...
@@ -1793,14 +1804,14 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
* be a a better choice than any of the embedded addresses.
*/
if
(
peer_addr
)
if
(
!
sctp_assoc_add_peer
(
asoc
,
peer_addr
,
priority
))
if
(
!
sctp_assoc_add_peer
(
asoc
,
peer_addr
,
gfp
))
goto
nomem
;
/* Process the initialization parameters. */
sctp_walk_params
(
param
,
peer_init
,
init_hdr
.
params
)
{
if
(
!
sctp_process_param
(
asoc
,
param
,
peer_addr
,
priority
))
if
(
!
sctp_process_param
(
asoc
,
param
,
peer_addr
,
gfp
))
goto
clean_up
;
}
...
...
@@ -1842,7 +1853,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
/* Copy cookie in case we need to resend COOKIE-ECHO. */
cookie
=
asoc
->
peer
.
cookie
;
if
(
cookie
)
{
asoc
->
peer
.
cookie
=
kmalloc
(
asoc
->
peer
.
cookie_len
,
priority
);
asoc
->
peer
.
cookie
=
kmalloc
(
asoc
->
peer
.
cookie_len
,
gfp
);
if
(
!
asoc
->
peer
.
cookie
)
goto
clean_up
;
memcpy
(
asoc
->
peer
.
cookie
,
cookie
,
asoc
->
peer
.
cookie_len
);
...
...
@@ -1871,8 +1882,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
/* Allocate storage for the negotiated streams. */
asoc
->
ssnmap
=
sctp_ssnmap_new
(
asoc
->
peer
.
i
.
num_outbound_streams
,
asoc
->
c
.
sinit_num_ostreams
,
priority
);
asoc
->
c
.
sinit_num_ostreams
,
gfp
);
if
(
!
asoc
->
ssnmap
)
goto
nomem_ssnmap
;
...
...
@@ -1914,7 +1924,7 @@ int sctp_process_init(sctp_association_t *asoc, sctp_cid_t cid,
* structures for the addresses.
*/
int
sctp_process_param
(
sctp_association_t
*
asoc
,
union
sctp_params
param
,
const
union
sctp_addr
*
peer_addr
,
int
priority
)
const
union
sctp_addr
*
peer_addr
,
int
gfp
)
{
union
sctp_addr
addr
;
int
i
;
...
...
@@ -1933,10 +1943,10 @@ int sctp_process_param(sctp_association_t *asoc, union sctp_params param,
break
;
/* Fall through. */
case
SCTP_PARAM_IPV4_ADDRESS
:
sctp_param2sockaddr
(
&
addr
,
param
.
addr
,
asoc
->
peer
.
port
);
sctp_param2sockaddr
(
&
addr
,
param
.
addr
,
asoc
->
peer
.
port
,
0
);
scope
=
sctp_scope
(
peer_addr
);
if
(
sctp_in_scope
(
&
addr
,
scope
))
if
(
!
sctp_assoc_add_peer
(
asoc
,
&
addr
,
priority
))
if
(
!
sctp_assoc_add_peer
(
asoc
,
&
addr
,
gfp
))
return
0
;
break
;
...
...
@@ -2051,7 +2061,7 @@ __u32 sctp_generate_tsn(const sctp_endpoint_t *ep)
/* Convert from an SCTP IP parameter to a union sctp_addr. */
void
sctp_param2sockaddr
(
union
sctp_addr
*
addr
,
sctp_addr_param_t
*
param
,
__u16
port
)
__u16
port
,
int
iif
)
{
switch
(
param
->
v4
.
param_hdr
.
type
)
{
case
SCTP_PARAM_IPV4_ADDRESS
:
...
...
@@ -2065,7 +2075,7 @@ void sctp_param2sockaddr(union sctp_addr *addr, sctp_addr_param_t *param,
addr
->
v6
.
sin6_port
=
port
;
addr
->
v6
.
sin6_flowinfo
=
0
;
/* BUG */
addr
->
v6
.
sin6_addr
=
param
->
v6
.
addr
;
addr
->
v6
.
sin6_scope_id
=
0
;
/* BUG */
addr
->
v6
.
sin6_scope_id
=
iif
;
break
;
default:
...
...
net/sctp/sm_sideeffect.c
View file @
e35a3e9a
...
...
@@ -55,1202 +55,1106 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Do forward declarations of static functions. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
);
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
,
__u32
lowest_tsn
);
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_event_t
,
sctp_subtype_t
,
sctp_chunk_t
*
chunk
);
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
);
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
);
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
);
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
struct
sctp_transport
*
,
sctp_chunk_t
*
);
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_sackhdr_t
*
);
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_chunk_t
*
);
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
,
sctp_association_t
*
,
sctp_state_t
);
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
/********************************************************************
* Helper functions
********************************************************************/
#define DEBUG_POST \
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
"asoc %p, status: %s\n", \
asoc, sctp_status_tbl[status])
/* A helper function for delayed processing of INET ECN CE bit. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
{
/* Save the TSN away for comparison when we receive CWR */
#define DEBUG_POST_SFX \
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
error, asoc, \
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
asoc
->
last_ecne_tsn
=
lowest_tsn
;
asoc
->
need_ecne
=
1
;
}
/*
* This is the master state machine processing function.
/*
Helper function for delayed processing of SCTP ECNE chunk. */
/* RFC 2960 Appendix A
*
* If you want to understand all of lksctp, this is a
* good place to start.
* RFC 2481 details a specific bit for a sender to send in
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
chunk
)
{
sctp_cmd_seq_t
commands
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
sctp_chunk_t
*
repl
;
static
printfn_t
*
table
[]
=
{
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
};
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
/* Our previously transmitted packet ran into some congestion
* so we should take action by reducing cwnd and ssthresh
* and then ACK our peer that we we've done so by
* sending a CWR.
*/
/* Look up the state function, run it, and then process the
* side effects. These three steps are the heart of lksctp.
/* First, try to determine if we want to actually lower
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
*/
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
struct
sctp_transport
*
transport
;
sctp_init_cmd_seq
(
&
commands
);
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
DEBUG_PRE
;
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
DEBUG_POST
;
/* Update the congestion variables. */
if
(
transport
)
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
&
commands
,
priority
);
DEBUG_POST_SFX
;
/* Always try to quiet the other end. In case of lost CWR,
* resend last_cwr_tsn.
*/
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
return
error
;
/* If we run out of memory, it will look like a lost CWR. We'll
* get back in sync eventually.
*/
return
repl
;
}
#undef DEBUG_PRE
#undef DEBUG_POST
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
/* Helper function to do delayed processing of ECN CWR chunk. */
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
{
int
error
;
/* FIXME - Most of the dispositions left today would be categorized
* as "exceptional" dispositions. For those dispositions, it
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
* disposition SCTP_DISPOSITION_CONSUME.
/* Turn off ECNE getting auto-prepended to every outgoing
* packet
*/
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
switch
(
status
)
{
case
SCTP_DISPOSITION_DISCARD
:
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
asoc
->
need_ecne
=
0
;
}
case
SCTP_DISPOSITION_NOMEM
:
/* We ran out of memory, so we need to discard this
* packet.
*/
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error
=
-
ENOMEM
;
break
;
/* Generate SACK if necessary. We call this at the end of a packet. */
int
sctp_gen_sack
(
struct
sctp_association
*
asoc
,
int
force
,
sctp_cmd_seq_t
*
commands
)
{
__u32
ctsn
,
max_tsn_seen
;
struct
sctp_chunk
*
sack
;
int
error
=
0
;
case
SCTP_DISPOSITION_DELETE_TCB
:
/* This should now be a command. */
break
;
if
(
force
)
asoc
->
peer
.
sack_needed
=
1
;
case
SCTP_DISPOSITION_CONSUME
:
case
SCTP_DISPOSITION_ABORT
:
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
*/
break
;
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
);
case
SCTP_DISPOSITION_VIOLATION
:
printk
(
KERN_ERR
"sctp protocol violation state %d "
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
break
;
/* From 12.2 Parameters necessary per association (i.e. the TCB):
*
* Ack State : This flag indicates if the next received packet
* : is to be responded to with a SACK. ...
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
case
SCTP_DISPOSITION_NOT_IMPL
:
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
*
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
* an acknowledgement SHOULD be generated for at least every
* second packet (not every second DATA chunk) received, and
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
case
SCTP_DISPOSITION_BUG
:
printk
(
KERN_ERR
"sctp bug in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
asoc
->
peer
.
sack_needed
=
0
;
default:
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
bail:
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
}
out:
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
/* When the T3-RTX timer expires, it calls this function to create the
* relevant state machine event.
*/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
{
int
error
=
0
;
int
force
;
sctp_cmd_t
*
cmd
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
int
error
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
/* Check whether a task is in the sock. */
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
* the loop would look like:
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
case
SCTP_CMD_NEW_ASOC
:
/* Register a new association. */
asoc
=
cmd
->
obj
.
ptr
;
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_CMD_UPDATE_ASSOC
:
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
break
;
/* Is this transport really dead and just waiting around for
* the timer to let go of the reference?
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_PURGE_OUTQUEUE
:
sctp_outq_teardown
(
&
asoc
->
outqueue
);
break
;
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
case
SCTP_CMD_DELETE_TCB
:
/* Delete the current association. */
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
asoc
=
NULL
;
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_CMD_NEW_STATE
:
/* Enter a new state. */
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
break
;
out_unlock
:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
case
SCTP_CMD_REPORT_TSN
:
/* Record the arrival of a TSN. */
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
/* This is a sa interface for producing timeout events. It works
* for timeouts which use the association as their parameter.
*/
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
sctp_event_timeout_t
timeout_type
)
{
int
error
=
0
;
case
SCTP_CMD_GEN_SACK
:
/* Generate a Selective ACK.
* The argument tells us whether to just count
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
__FUNCTION__
,
timeout_type
);
case
SCTP_CMD_PROCESS_SACK
:
/* Process an inbound SACK. */
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
cmd
->
obj
.
ptr
)
;
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
sctp_association_hold
(
asoc
);
goto
out_unlock
;
}
case
SCTP_CMD_GEN_INIT_ACK
:
/* Generate an INIT ACK chunk. */
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
0
);
if
(
!
new_obj
)
goto
nomem
;
/* Is this association really dead and just waiting around for
* the timer to let go of the reference?
*/
if
(
asoc
->
base
.
dead
)
goto
out_unlock
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
case
SCTP_CMD_PEER_INIT
:
/* Process a unified INIT from the peer.
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
*/
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_CMD_GEN_COOKIE_ECHO
:
/* Generate a COOKIE ECHO chunk. */
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
if
(
!
new_obj
)
{
if
(
cmd
->
obj
.
ptr
)
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_association_put
(
asoc
);
}
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
if
(
cmd
->
obj
.
ptr
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
}
case
SCTP_CMD_GEN_SHUTDOWN
:
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
* Reset error counts.
*/
asoc
->
overall_error_count
=
0
;
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
}
/* Generate a SHUTDOWN chunk. */
new_obj
=
sctp_make_shutdown
(
asoc
);
if
(
!
new_obj
)
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
case
SCTP_CMD_CHUNK_ULP
:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"chunk_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
case
SCTP_CMD_EVENT_ULP
:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"event_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
}
/* sctp_generate_t5_shutdown_guard_event() */
case
SCTP_CMD_REPLY
:
/* Send a chunk to our peer. */
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
);
break
;
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
}
case
SCTP_CMD_SEND_PKT
:
/* Send a full packet to our peer. */
packet
=
cmd
->
obj
.
ptr
;
sctp_packet_transmit
(
packet
);
sctp_ootb_pkt_free
(
packet
);
break
;
/* Generate a heart beat event. If the sock is busy, reschedule. Make
* sure that the transport is still valid.
*/
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
case
SCTP_CMD_RETRAN
:
/* Mark a transport for retransmission. */
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_RETRANSMIT_T3_RTX
);
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
case
SCTP_CMD_TRANSMIT
:
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_CMD_ECN_CE
:
/* Do delayed CE processing. */
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
/* Is this structure just waiting around for us to actually
* get destroyed?
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_CMD_ECN_ECNE
:
/* Do delayed ECNE processing. */
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
chunk
);
if
(
new_obj
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_ECN_CWR
:
/* Do delayed CWR processing. */
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
case
SCTP_CMD_SETUP_T2
:
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_CMD_TIMER_START
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
timeout
)
BUG
();
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
timer
->
expires
=
jiffies
+
timeout
;
sctp_association_hold
(
asoc
);
add_timer
(
timer
);
break
;
/* Inject a SACK Timeout event into the state machine. */
void
sctp_generate_sack_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
}
case
SCTP_CMD_TIMER_RESTART
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
);
break
;
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
NULL
,
sctp_generate_t1_cookie_event
,
sctp_generate_t1_init_event
,
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
case
SCTP_CMD_TIMER_STOP
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
break
;
case
SCTP_CMD_INIT_RESTART
:
/* Do the needed accounting and updates
* associated with restarting an initialization
* timer.
*/
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
asoc
->
max_init_timeo
)
{
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
}
/* RFC 2960 8.2 Path Failure Detection
*
* When its peer endpoint is multi-homed, an endpoint should keep a
* error counter for each of the destination transport addresses of the
* peer endpoint.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
asoc
->
overall_error_count
++
;
/* If we've sent any data bundled with
* COOKIE-ECHO we need to resend.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
if
(
transport
->
active
&&
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
"IP:%d.%d.%d.%d failed.
\n
"
,
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
sctp_assoc_control_transport
(
asoc
,
transport
,
SCTP_TRANSPORT_DOWN
,
SCTP_FAILED_THRESHOLD
);
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
cmd
->
obj
.
to
));
break
;
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
}
case
SCTP_CMD_INIT_FAILED
:
sctp_cmd_init_failed
(
commands
,
asoc
);
break
;
/* Worker routine to handle INIT command failure. */
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
unsigned
error
)
{
struct
sctp_ulpevent
*
event
;
case
SCTP_CMD_ASSOC_FAILED
:
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
subtype
,
chunk
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_CANT_STR_ASSOC
,
0
,
0
,
0
,
GFP_ATOMIC
);
case
SCTP_CMD_COUNTER_INC
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
break
;
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
))
;
case
SCTP_CMD_COUNTER_RESET
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
break
;
/* SEND_FAILED sent later when cleaning up the association. */
asoc
->
outqueue
.
error
=
error
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_REPORT_DUP
:
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
struct
sctp_association
*
asoc
,
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
struct
sctp_chunk
*
chunk
,
unsigned
error
)
{
struct
sctp_ulpevent
*
event
;
case
SCTP_CMD_REPORT_BAD_TAG
:
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
break
;
/* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
case
SCTP_CMD_STRIKE
:
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transport
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
(
__u16
)
error
,
0
,
0
,
GFP_ATOMIC
);
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
));
case
SCTP_CMD_TRANSPORT_RESET
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
SCTP_STATE
(
SCTP_STATE_CLOSED
));
case
SCTP_CMD_TRANSPORT_ON
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
break
;
/* SEND_FAILED sent later when cleaning up the association. */
asoc
->
outqueue
.
error
=
error
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
()
);
}
case
SCTP_CMD_HB_TIMERS_START
:
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
break
;
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
struct
sctp_association
*
asoc
,
struct
sctp_chunk
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
gfp
)
{
int
error
;
case
SCTP_CMD_HB_TIMER_UPDATE
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
break
;
/* We only process the init as a sideeffect in a single
* case. This is when we process the INIT-ACK. If we
* fail during INIT processing (due to malloc problems),
* just return the error and stop processing the stack.
*/
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
gfp
))
error
=
-
ENOMEM
;
else
error
=
0
;
case
SCTP_CMD_HB_TIMERS_STOP
:
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
break
;
return
error
;
}
case
SCTP_CMD_REPORT_ERROR
:
error
=
cmd
->
obj
.
error
;
break
;
/* Helper function to break out starting up of heartbeat timers. */
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_PROCESS_CTSN
:
/* Dummy up a SACK for processing. */
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
sackh
.
a_rwnd
=
0
;
sackh
.
num_gap_ack_blocks
=
0
;
sackh
.
num_dup_tsns
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
/* Start a heartbeat timer for each transport on the association.
* hold a reference on the transport to make sure none of
* the needed data structures go away.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
case
SCTP_CMD_DISCARD_PACKET
:
/* We need to discard the whole packet. */
chunk
->
pdiscard
=
1
;
break
;
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
}
case
SCTP_CMD_RTO_PENDING
:
t
=
cmd
->
obj
.
transport
;
t
->
rto_pending
=
1
;
break
;
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_PART_DELIVER
:
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
/* Stop all heartbeat timers. */
case
SCTP_CMD_RENEGE
:
sctp_ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
default:
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
cmd
->
verb
,
cmd
->
obj
.
ptr
);
break
;
};
if
(
error
)
return
error
;
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
del_timer
(
&
t
->
hb_timer
))
sctp_transport_put
(
t
);
}
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
/* A helper function for delayed processing of INET ECN CE bit. */
static
void
sctp_do_ecn_ce_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
/* Helper function to update the heartbeat timer. */
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
/* Save the TSN away for comparison when we receive CWR */
asoc
->
last_ecne_tsn
=
lowest_tsn
;
asoc
->
need_ecne
=
1
;
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
/* Helper function for delayed processing of SCTP ECNE chunk. */
/* RFC 2960 Appendix A
*
* RFC 2481 details a specific bit for a sender to send in
* the header of its next outbound TCP segment to indicate to
* its peer that it has reduced its congestion window. This
* is termed the CWR bit. For SCTP the same indication is made
* by including the CWR chunk. This chunk contains one data
* element, i.e. the TSN number that was sent in the ECNE chunk.
* This element represents the lowest TSN number in the datagram
* that was originally marked with the CE bit.
*/
static
sctp_chunk_t
*
sctp_do_ecn_ecne_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
,
sctp_chunk_t
*
chunk
)
/* Helper function to handle the reception of an HEARTBEAT ACK. */
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
,
sctp_chunk_t
*
chunk
)
{
sctp_chunk_t
*
repl
;
/* Our previously transmitted packet ran into some congestion
* so we should take action by reducing cwnd and ssthresh
* and then ACK our peer that we we've done so by
* sending a CWR.
*/
sctp_sender_hb_info_t
*
hbinfo
;
/* First, try to determine if we want to actually lower
* our cwnd variables. Only lower them if the ECNE looks more
* recent than the last response.
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
* transport address to which the HEARTBEAT was sent.
* The association's overall error count is also cleared.
*/
if
(
TSN_lt
(
asoc
->
last_cwr_tsn
,
lowest_tsn
))
{
struct
sctp_transport
*
transport
;
/* Find which transport's congestion variables
* need to be adjusted.
*/
transport
=
sctp_assoc_lookup_tsn
(
asoc
,
lowest_tsn
);
/* Update the congestion variables. */
if
(
transport
)
sctp_transport_lower_cwnd
(
transport
,
SCTP_LOWER_CWND_ECNE
);
asoc
->
last_cwr_tsn
=
lowest_tsn
;
}
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
/*
Always try to quiet the other end. In case of lost CWR,
*
resend last_cwr_tsn
.
/*
Mark the destination transport address as active if it is not so
*
marked
.
*/
repl
=
sctp_make_cwr
(
asoc
,
asoc
->
last_cwr_tsn
,
chunk
);
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
/* If we run out of memory, it will look like a lost CWR. We'll
* get back in sync eventually.
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
*/
return
repl
;
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
/* Helper function to do delayed processing of ECN CWR chunk. */
static
void
sctp_do_ecn_cwr_work
(
sctp_association_t
*
asoc
,
__u32
lowest_tsn
)
/* Helper function to do a transport reset at the expiry of the hearbeat
* timer.
*/
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
/* Turn off ECNE getting auto-prepended to every outgoing
* packet
*/
asoc
->
need_ecne
=
0
;
}
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
/* This macro is to compress the text a bit... */
#define AP(v) asoc->peer.v
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
t
);
}
/* Generate SACK if necessary. We call this at the end of a packet. */
int
sctp_gen_sack
(
sctp_association_t
*
asoc
,
int
force
,
sctp_cmd_seq_t
*
commands
)
/* Helper function to process the process SACK command. */
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
__u32
ctsn
,
max_tsn_seen
;
sctp_chunk_t
*
sack
;
int
error
=
0
;
if
(
force
)
asoc
->
peer
.
sack_needed
=
1
;
ctsn
=
sctp_tsnmap_get_ctsn
(
&
asoc
->
peer
.
tsn_map
);
max_tsn_seen
=
sctp_tsnmap_get_max_tsn_seen
(
&
asoc
->
peer
.
tsn_map
);
/* From 12.2 Parameters necessary per association (i.e. the TCB):
*
* Ack State : This flag indicates if the next received packet
* : is to be responded to with a SACK. ...
* : When DATA chunks are out of order, SACK's
* : are not delayed (see Section 6).
*
* [This is actually not mentioned in Section 6, but we
* implement it here anyway. --piggy]
*/
if
(
max_tsn_seen
!=
ctsn
)
asoc
->
peer
.
sack_needed
=
1
;
int
err
;
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
*
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
* an acknowledgement SHOULD be generated for at least every
* second packet (not every second DATA chunk) received, and
* SHOULD be generated within 200 ms of the arrival of any
* unacknowledged DATA chunk. ...
*/
if
(
!
asoc
->
peer
.
sack_needed
)
{
/* We will need a SACK for the next packet. */
asoc
->
peer
.
sack_needed
=
1
;
goto
out
;
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
/* There are no more TSNs awaiting SACK. */
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
if
(
asoc
->
a_rwnd
>
asoc
->
rwnd
)
asoc
->
a_rwnd
=
asoc
->
rwnd
;
sack
=
sctp_make_sack
(
asoc
);
if
(
!
sack
)
goto
nomem
;
asoc
->
peer
.
sack_needed
=
0
;
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
sack
);
/* Stop the SACK timer. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_SACK
));
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
out:
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
return
err
;
}
/* Handle a duplicate TSN. */
void
sctp_do_TSNdup
(
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
long
gap
)
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
#if 0
sctp_chunk_t *sack;
/* Caution: gap < 2 * SCTP_TSN_MAP_SIZE
* so gap can be negative.
*
* --xguo
*/
/* Count this TSN. */
if (gap < SCTP_TSN_MAP_SIZE) {
asoc->peer.tsn_map[gap]++;
} else {
asoc->peer.tsn_map_overflow[gap - SCTP_TSN_MAP_SIZE]++;
}
/* From 6.2 Acknowledgement on Reception of DATA Chunks
*
* When a packet arrives with duplicate DATA chunk(s)
* and with no new DATA chunk(s), the endpoint MUST
* immediately send a SACK with no delay. If a packet
* arrives with duplicate DATA chunk(s) bundled with
* new DATA chunks, the endpoint MAY immediately send a
* SACK. Normally receipt of duplicate DATA chunks
* will occur when the original SACK chunk was lost and
* the peer's RTO has expired. The duplicate TSN
* number(s) SHOULD be reported in the SACK as
* duplicate.
*/
asoc->counters[SctpCounterAckState] = 2;
#endif /* 0 */
}
/* sctp_do_TSNdup() */
struct
sctp_transport
*
t
;
#undef AP
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
asoc
->
shutdown_last_sent_to
=
t
;
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
chunk
->
transport
=
t
;
}
/* When the T3-RTX timer expires, it calls this function to create the
* relevant state machine event.
*/
void
sctp_generate_t3_rtx_event
(
unsigned
long
peer
)
/* Helper function to change the state of an association. */
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_state_t
state
)
{
int
error
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
peer
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
/* Check whether a task is in the sock. */
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
);
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
T3_rtx_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
if
(
waitqueue_active
(
&
asoc
->
wait
))
wake_up_interruptible
(
&
asoc
->
wait
);
/* Wake up any processes waiting in the sk's sleep queue of
* a TCP-style or UDP-style peeled-off socket in
* sctp_wait_for_accept() or sctp_wait_for_packet().
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sk
->
state_change
(
sk
);
}
/*
Is this transport really dead and just waiting around for
*
the timer to let go of the reference?
/*
Change the sk->state of a TCP-style socket that has sucessfully
*
completed a connect() call.
*/
if
(
transport
->
dead
)
goto
out_unlock
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
}
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_T3_RTX
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
);
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
*/
#define DEBUG_PRE \
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
"ep %p, %s, %s, asoc %p[%s], %s\n", \
ep, sctp_evttype_tbl[event_type], \
(*debug_fn)(subtype), asoc, \
sctp_state_tbl[state], state_fn->name)
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
#define DEBUG_POST \
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
"asoc %p, status: %s\n", \
asoc, sctp_status_tbl[status])
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
#define DEBUG_POST_SFX \
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
error, asoc, \
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
/* This is a sa interface for producing timeout events. It works
* for timeouts which use the association as their parameter.
/*
* This is the master state machine processing function.
*
* If you want to understand all of lksctp, this is a
* good place to start.
*/
static
void
sctp_generate_timeout_event
(
sctp_association_t
*
asoc
,
sctp_event_timeout_t
timeout_type
)
int
sctp_do_sm
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
int
priority
)
{
sctp_cmd_seq_t
commands
;
sctp_sm_table_entry_t
*
state_fn
;
sctp_disposition_t
status
;
int
error
=
0
;
typedef
const
char
*
(
printfn_t
)(
sctp_subtype_t
);
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy: timer %d
\n
"
,
__FUNCTION__
,
timeout_type
);
/* Try again later. */
if
(
!
mod_timer
(
&
asoc
->
timers
[
timeout_type
],
jiffies
+
(
HZ
/
20
)))
sctp_association_hold
(
asoc
);
goto
out_unlock
;
}
static
printfn_t
*
table
[]
=
{
NULL
,
sctp_cname
,
sctp_tname
,
sctp_oname
,
sctp_pname
,
};
printfn_t
*
debug_fn
__attribute__
((
unused
))
=
table
[
event_type
];
/*
Is this association really dead and just waiting around for
*
the timer to let go of the reference?
/*
Look up the state function, run it, and then process the
*
side effects. These three steps are the heart of lksctp.
*/
if
(
asoc
->
base
.
dead
)
goto
out_unlock
;
/* Run through the state machine. */
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
timeout_type
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
(
void
*
)
timeout_type
,
GFP_ATOMIC
);
state_fn
=
sctp_sm_lookup_event
(
event_type
,
state
,
subtype
);
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
sctp_init_cmd_seq
(
&
commands
);
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_association_put
(
asoc
);
}
DEBUG_PRE
;
status
=
(
*
state_fn
->
fn
)(
ep
,
asoc
,
subtype
,
event_arg
,
&
commands
);
DEBUG_POST
;
void
sctp_generate_t1_cookie_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_COOKIE
);
}
error
=
sctp_side_effects
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
&
commands
,
priority
);
DEBUG_POST_SFX
;
void
sctp_generate_t1_init_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T1_INIT
);
return
error
;
}
void
sctp_generate_t2_shutdown_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
);
}
#undef DEBUG_PRE
#undef DEBUG_POST
void
sctp_generate_t5_shutdown_guard_event
(
unsigned
long
data
)
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
int
sctp_side_effects
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD
);
int
error
;
}
/* sctp_generate_t5_shutdown_guard_event() */
/* FIXME - Most of the dispositions left today would be categorized
* as "exceptional" dispositions. For those dispositions, it
* may not be proper to run through any of the commands at all.
* For example, the command interpreter might be run only with
* disposition SCTP_DISPOSITION_CONSUME.
*/
if
(
0
!=
(
error
=
sctp_cmd_interpreter
(
event_type
,
subtype
,
state
,
ep
,
asoc
,
event_arg
,
status
,
commands
,
priority
)))
goto
bail
;
void
sctp_generate_autoclose_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_AUTOCLOSE
);
}
switch
(
status
)
{
case
SCTP_DISPOSITION_DISCARD
:
SCTP_DEBUG_PRINTK
(
"Ignored sctp protocol event - state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
/* Generate a heart beat event. If the sock is busy, reschedule. Make
* sure that the transport is still valid.
*/
void
sctp_generate_heartbeat_event
(
unsigned
long
data
)
{
int
error
=
0
;
struct
sctp_transport
*
transport
=
(
struct
sctp_transport
*
)
data
;
sctp_association_t
*
asoc
=
transport
->
asoc
;
case
SCTP_DISPOSITION_NOMEM
:
/* We ran out of memory, so we need to discard this
* packet.
*/
/* BUG--we should now recover some memory, probably by
* reneging...
*/
error
=
-
ENOMEM
;
break
;
sctp_bh_lock_sock
(
asoc
->
base
.
sk
);
if
(
sock_owned_by_user
(
asoc
->
base
.
sk
))
{
SCTP_DEBUG_PRINTK
(
"%s:Sock is busy.
\n
"
,
__FUNCTION__
)
;
case
SCTP_DISPOSITION_DELETE_TCB
:
/* This should now be a command. */
break
;
/* Try again later. */
if
(
!
mod_timer
(
&
transport
->
hb_timer
,
jiffies
+
(
HZ
/
20
)))
sctp_transport_hold
(
transport
);
goto
out_unlock
;
}
case
SCTP_DISPOSITION_CONSUME
:
case
SCTP_DISPOSITION_ABORT
:
/*
* We should no longer have much work to do here as the
* real work has been done as explicit commands above.
*/
break
;
/* Is this structure just waiting around for us to actually
* get destroyed?
*/
if
(
transport
->
dead
)
goto
out_unlock
;
case
SCTP_DISPOSITION_VIOLATION
:
printk
(
KERN_ERR
"sctp protocol violation state %d "
"chunkid %d
\n
"
,
state
,
subtype
.
chunk
);
break
;
error
=
sctp_do_sm
(
SCTP_EVENT_T_TIMEOUT
,
SCTP_ST_TIMEOUT
(
SCTP_EVENT_TIMEOUT_HEARTBEAT
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
transport
,
GFP_ATOMIC
)
;
case
SCTP_DISPOSITION_NOT_IMPL
:
printk
(
KERN_WARNING
"sctp unimplemented feature in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
break
;
if
(
error
)
asoc
->
base
.
sk
->
err
=
-
error
;
case
SCTP_DISPOSITION_BUG
:
printk
(
KERN_ERR
"sctp bug in state %d, "
"event_type %d, event_id %d
\n
"
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
out_unlock:
sctp_bh_unlock_sock
(
asoc
->
base
.
sk
);
sctp_transport_put
(
transport
);
}
default:
printk
(
KERN_ERR
"sctp impossible disposition %d "
"in state %d, event_type %d, event_id %d
\n
"
,
status
,
state
,
event_type
,
subtype
.
chunk
);
BUG
();
break
;
};
/* Inject a SACK Timeout event into the state machine. */
void
sctp_generate_sack_event
(
unsigned
long
data
)
{
sctp_association_t
*
asoc
=
(
sctp_association_t
*
)
data
;
sctp_generate_timeout_event
(
asoc
,
SCTP_EVENT_TIMEOUT_SACK
);
bail:
return
error
;
}
sctp_timer_event_t
*
sctp_timer_events
[
SCTP_NUM_TIMEOUT_TYPES
]
=
{
NULL
,
sctp_generate_t1_cookie_event
,
sctp_generate_t1_init_event
,
sctp_generate_t2_shutdown_event
,
NULL
,
sctp_generate_t5_shutdown_guard_event
,
sctp_generate_heartbeat_event
,
sctp_generate_sack_event
,
sctp_generate_autoclose_event
,
};
/********************************************************************
*
3r
d Level Abstractions
*
2n
d Level Abstractions
********************************************************************/
/* RFC 2960 8.2 Path Failure Detection
*
* When its peer endpoint is multi-homed, an endpoint should keep a
* error counter for each of the destination transport addresses of the
* peer endpoint.
*
* Each time the T3-rtx timer expires on any address, or when a
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
* the error counter of that destination address will be incremented.
* When the value in the error counter exceeds the protocol parameter
* 'Path.Max.Retrans' of that destination address, the endpoint should
* mark the destination transport address as inactive, and a
* notification SHOULD be sent to the upper layer.
*
*/
static
void
sctp_do_8_2_transport_strike
(
sctp_association_t
*
asoc
,
struct
sctp_transport
*
transport
)
/* This is the side-effect interpreter. */
int
sctp_cmd_interpreter
(
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_state_t
state
,
sctp_endpoint_t
*
ep
,
sctp_association_t
*
asoc
,
void
*
event_arg
,
sctp_disposition_t
status
,
sctp_cmd_seq_t
*
commands
,
int
priority
)
{
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
asoc
->
overall_error_count
++
;
int
error
=
0
;
int
force
;
sctp_cmd_t
*
cmd
;
sctp_chunk_t
*
new_obj
;
sctp_chunk_t
*
chunk
=
NULL
;
struct
sctp_packet
*
packet
;
struct
list_head
*
pos
;
struct
timer_list
*
timer
;
unsigned
long
timeout
;
struct
sctp_transport
*
t
;
sctp_sackhdr_t
sackh
;
if
(
transport
->
active
&&
(
transport
->
error_count
++
>=
transport
->
error_threshold
))
{
SCTP_DEBUG_PRINTK
(
"transport_strike: transport "
"IP:%d.%d.%d.%d failed.
\n
"
,
NIPQUAD
(
transport
->
ipaddr
.
v4
.
sin_addr
));
sctp_assoc_control_transport
(
asoc
,
transport
,
SCTP_TRANSPORT_DOWN
,
SCTP_FAILED_THRESHOLD
);
}
if
(
SCTP_EVENT_T_TIMEOUT
!=
event_type
)
chunk
=
(
sctp_chunk_t
*
)
event_arg
;
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
/* Note: This whole file is a huge candidate for rework.
* For example, each command could either have its own handler, so
* the loop would look like:
* while (cmds)
* cmd->handle(x, y, z)
* --jgrimm
*/
transport
->
rto
=
min
((
transport
->
rto
*
2
),
transport
->
asoc
->
rto_max
);
}
while
(
NULL
!=
(
cmd
=
sctp_next_cmd
(
commands
)))
{
switch
(
cmd
->
verb
)
{
case
SCTP_CMD_NOP
:
/* Do nothing. */
break
;
/* Worker routine to handle INIT command failure. */
static
void
sctp_cmd_init_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
)
{
struct
sctp_ulpevent
*
event
;
case
SCTP_CMD_NEW_ASOC
:
/* Register a new association. */
asoc
=
cmd
->
obj
.
ptr
;
/* Register with the endpoint. */
sctp_endpoint_add_asoc
(
ep
,
asoc
);
sctp_hash_established
(
asoc
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_CANT_STR_ASSOC
,
0
,
0
,
0
,
GFP_ATOMIC
);
case
SCTP_CMD_UPDATE_ASSOC
:
sctp_assoc_update
(
asoc
,
cmd
->
obj
.
ptr
);
break
;
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
))
;
case
SCTP_CMD_PURGE_OUTQUEUE
:
sctp_outq_teardown
(
&
asoc
->
outqueue
);
break
;
/* FIXME: We need to handle data possibly either
* sent via COOKIE-ECHO bundling or just waiting in
* the transmit queue, if the user has enabled
* SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
case
SCTP_CMD_DELETE_TCB
:
/* Delete the current association. */
sctp_unhash_established
(
asoc
);
sctp_association_free
(
asoc
);
asoc
=
NULL
;
break
;
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
static
void
sctp_cmd_assoc_failed
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_event_t
event_type
,
sctp_subtype_t
subtype
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_ulpevent
*
event
;
__u16
error
=
0
;
case
SCTP_CMD_NEW_STATE
:
/* Enter a new state. */
sctp_cmd_new_state
(
commands
,
asoc
,
cmd
->
obj
.
state
);
break
;
switch
(
event_type
)
{
case
SCTP_EVENT_T_PRIMITIVE
:
if
(
SCTP_PRIMITIVE_ABORT
==
subtype
.
primitive
)
error
=
SCTP_ERROR_USER_ABORT
;
break
;
case
SCTP_EVENT_T_CHUNK
:
if
(
chunk
&&
(
SCTP_CID_ABORT
==
chunk
->
chunk_hdr
->
type
)
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
{
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
}
break
;
default:
break
;
}
case
SCTP_CMD_REPORT_TSN
:
/* Record the arrival of a TSN. */
sctp_tsnmap_mark
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
/* Cancel any partial delivery in progress. */
sctp_ulpq_abort_pd
(
&
asoc
->
ulpq
,
GFP_ATOMIC
);
case
SCTP_CMD_GEN_SACK
:
/* Generate a Selective ACK.
* The argument tells us whether to just count
* the packet and MAYBE generate a SACK, or
* force a SACK out.
*/
force
=
cmd
->
obj
.
i32
;
error
=
sctp_gen_sack
(
asoc
,
force
,
commands
);
break
;
event
=
sctp_ulpevent_make_assoc_change
(
asoc
,
0
,
SCTP_COMM_LOST
,
error
,
0
,
0
,
GFP_ATOMIC
);
if
(
event
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_EVENT_ULP
,
SCTP_ULPEVENT
(
event
))
;
case
SCTP_CMD_PROCESS_SACK
:
/* Process an inbound SACK. */
error
=
sctp_cmd_process_sack
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_NEW_STATE
,
SCTP_STATE
(
SCTP_STATE_CLOSED
));
case
SCTP_CMD_GEN_INIT_ACK
:
/* Generate an INIT ACK chunk. */
new_obj
=
sctp_make_init_ack
(
asoc
,
chunk
,
GFP_ATOMIC
,
0
);
if
(
!
new_obj
)
goto
nomem
;
/* FIXME: We need to handle data that could not be sent or was not
* acked, if the user has enabled SEND_FAILED notifications.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DELETE_TCB
,
SCTP_NULL
());
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
* inside the cookie. In reality, this is only used for INIT-ACK processing
* since all other cases use "temporary" associations and can do all
* their work in statefuns directly.
*/
static
int
sctp_cmd_process_init
(
sctp_cmd_seq_t
*
commands
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
,
sctp_init_chunk_t
*
peer_init
,
int
priority
)
{
int
error
;
case
SCTP_CMD_PEER_INIT
:
/* Process a unified INIT from the peer.
* Note: Only used during INIT-ACK processing. If
* there is an error just return to the outter
* layer which will bail.
*/
error
=
sctp_cmd_process_init
(
commands
,
asoc
,
chunk
,
cmd
->
obj
.
ptr
,
priority
);
break
;
/* We only process the init as a sideeffect in a single
* case. This is when we process the INIT-ACK. If we
* fail during INIT processing (due to malloc problems),
* just return the error and stop processing the stack.
*/
case
SCTP_CMD_GEN_COOKIE_ECHO
:
/* Generate a COOKIE ECHO chunk. */
new_obj
=
sctp_make_cookie_echo
(
asoc
,
chunk
);
if
(
!
new_obj
)
{
if
(
cmd
->
obj
.
ptr
)
sctp_free_chunk
(
cmd
->
obj
.
ptr
);
goto
nomem
;
}
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
if
(
!
sctp_process_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
priority
))
error
=
-
ENOMEM
;
else
error
=
0
;
/* If there is an ERROR chunk to be sent along with
* the COOKIE_ECHO, send it, too.
*/
if
(
cmd
->
obj
.
ptr
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
cmd
->
obj
.
ptr
));
break
;
case
SCTP_CMD_GEN_SHUTDOWN
:
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
* Reset error counts.
*/
asoc
->
overall_error_count
=
0
;
/* Generate a SHUTDOWN chunk. */
new_obj
=
sctp_make_shutdown
(
asoc
);
if
(
!
new_obj
)
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_CHUNK_ULP
:
/* Send a chunk to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"chunk_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_data
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
case
SCTP_CMD_EVENT_ULP
:
/* Send a notification to the sockets layer. */
SCTP_DEBUG_PRINTK
(
"sm_sideff: %s %p, %s %p.
\n
"
,
"event_up:"
,
cmd
->
obj
.
ptr
,
"ulpq:"
,
&
asoc
->
ulpq
);
sctp_ulpq_tail_event
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_REPLY
:
/* Send a chunk to our peer. */
error
=
sctp_outq_tail
(
&
asoc
->
outqueue
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_SEND_PKT
:
/* Send a full packet to our peer. */
packet
=
cmd
->
obj
.
ptr
;
sctp_packet_transmit
(
packet
);
sctp_ootb_pkt_free
(
packet
);
break
;
case
SCTP_CMD_RETRAN
:
/* Mark a transport for retransmission. */
sctp_retransmit
(
&
asoc
->
outqueue
,
cmd
->
obj
.
transport
,
SCTP_RTXR_T3_RTX
);
break
;
case
SCTP_CMD_TRANSMIT
:
/* Kick start transmission. */
error
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
break
;
case
SCTP_CMD_ECN_CE
:
/* Do delayed CE processing. */
sctp_do_ecn_ce_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_ECN_ECNE
:
/* Do delayed ECNE processing. */
new_obj
=
sctp_do_ecn_ecne_work
(
asoc
,
cmd
->
obj
.
u32
,
chunk
);
if
(
new_obj
)
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
new_obj
));
break
;
case
SCTP_CMD_ECN_CWR
:
/* Do delayed CWR processing. */
sctp_do_ecn_cwr_work
(
asoc
,
cmd
->
obj
.
u32
);
break
;
case
SCTP_CMD_SETUP_T2
:
sctp_cmd_setup_t2
(
commands
,
asoc
,
cmd
->
obj
.
ptr
);
break
;
case
SCTP_CMD_TIMER_START
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
timeout
)
BUG
();
return
error
;
}
timer
->
expires
=
jiffies
+
timeout
;
sctp_association_hold
(
asoc
);
add_timer
(
timer
);
break
;
/* Helper function to break out starting up of heartbeat timers. */
static
void
sctp_cmd_hb_timers_start
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
case
SCTP_CMD_TIMER_RESTART
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
timeout
=
asoc
->
timeouts
[
cmd
->
obj
.
to
];
if
(
!
mod_timer
(
timer
,
jiffies
+
timeout
))
sctp_association_hold
(
asoc
)
;
break
;
/* Start a heartbeat timer for each transport on the association.
* hold a reference on the transport to make sure none of
* the needed data structures go away.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
case
SCTP_CMD_TIMER_STOP
:
timer
=
&
asoc
->
timers
[
cmd
->
obj
.
to
];
if
(
timer_pending
(
timer
)
&&
del_timer
(
timer
))
sctp_association_put
(
asoc
);
break
;
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
}
case
SCTP_CMD_INIT_RESTART
:
/* Do the needed accounting and updates
* associated with restarting an initialization
* timer.
*/
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
++
;
asoc
->
timeouts
[
cmd
->
obj
.
to
]
*=
2
;
if
(
asoc
->
timeouts
[
cmd
->
obj
.
to
]
>
asoc
->
max_init_timeo
)
{
asoc
->
timeouts
[
cmd
->
obj
.
to
]
=
asoc
->
max_init_timeo
;
}
static
void
sctp_cmd_hb_timers_stop
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
)
{
struct
sctp_transport
*
t
;
struct
list_head
*
pos
;
/* If we've sent any data bundled with
* COOKIE-ECHO we need to resend.
*/
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
sctp_retransmit_mark
(
&
asoc
->
outqueue
,
t
,
0
);
}
/* Stop all heartbeat timers. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_RESTART
,
SCTP_TO
(
cmd
->
obj
.
to
));
break
;
list_for_each
(
pos
,
&
asoc
->
peer
.
transport_addr_list
)
{
t
=
list_entry
(
pos
,
struct
sctp_transport
,
transports
);
if
(
del_timer
(
&
t
->
hb_timer
))
sctp_transport_put
(
t
);
}
}
case
SCTP_CMD_INIT_FAILED
:
sctp_cmd_init_failed
(
commands
,
asoc
,
cmd
->
obj
.
u32
);
break
;
/* Helper function to update the heartbeat timer. */
static
void
sctp_cmd_hb_timer_update
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
/* Update the heartbeat timer. */
if
(
!
mod_timer
(
&
t
->
hb_timer
,
sctp_transport_timeout
(
t
)))
sctp_transport_hold
(
t
);
}
case
SCTP_CMD_ASSOC_FAILED
:
sctp_cmd_assoc_failed
(
commands
,
asoc
,
event_type
,
subtype
,
chunk
,
cmd
->
obj
.
u32
);
break
;
/* Helper function to handle the reception of an HEARTBEAT ACK. */
static
void
sctp_cmd_transport_on
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
,
sctp_chunk_t
*
chunk
)
{
sctp_sender_hb_info_t
*
hbinfo
;
case
SCTP_CMD_COUNTER_INC
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
++
;
break
;
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
* HEARTBEAT should clear the error counter of the destination
* transport address to which the HEARTBEAT was sent.
* The association's overall error count is also cleared.
*/
t
->
error_count
=
0
;
t
->
asoc
->
overall_error_count
=
0
;
case
SCTP_CMD_COUNTER_RESET
:
asoc
->
counters
[
cmd
->
obj
.
counter
]
=
0
;
break
;
/* Mark the destination transport address as active if it is not so
* marked.
*/
if
(
!
t
->
active
)
sctp_assoc_control_transport
(
asoc
,
t
,
SCTP_TRANSPORT_UP
,
SCTP_HEARTBEAT_SUCCESS
);
case
SCTP_CMD_REPORT_DUP
:
sctp_tsnmap_mark_dup
(
&
asoc
->
peer
.
tsn_map
,
cmd
->
obj
.
u32
);
break
;
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
*/
hbinfo
=
(
sctp_sender_hb_info_t
*
)
chunk
->
skb
->
data
;
sctp_transport_update_rto
(
t
,
(
jiffies
-
hbinfo
->
sent_at
));
}
case
SCTP_CMD_REPORT_BAD_TAG
:
SCTP_DEBUG_PRINTK
(
"vtag mismatch!
\n
"
);
break
;
/* Helper function to do a transport reset at the expiry of the hearbeat
* timer.
*/
static
void
sctp_cmd_transport_reset
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
struct
sctp_transport
*
t
)
{
sctp_transport_lower_cwnd
(
t
,
SCTP_LOWER_CWND_INACTIVE
);
case
SCTP_CMD_STRIKE
:
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
cmd
->
obj
.
transport
);
break
;
/* Mark one strike against a transport. */
sctp_do_8_2_transport_strike
(
asoc
,
t
);
}
case
SCTP_CMD_TRANSPORT_RESET
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_reset
(
commands
,
asoc
,
t
);
break
;
/* Helper function to process the process SACK command. */
static
int
sctp_cmd_process_sack
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_sackhdr_t
*
sackh
)
{
int
err
;
case
SCTP_CMD_TRANSPORT_ON
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_transport_on
(
commands
,
asoc
,
t
,
chunk
);
break
;
if
(
sctp_outq_sack
(
&
asoc
->
outqueue
,
sackh
))
{
/* There are no more TSNs awaiting SACK. */
err
=
sctp_do_sm
(
SCTP_EVENT_T_OTHER
,
SCTP_ST_OTHER
(
SCTP_EVENT_NO_PENDING_TSN
),
asoc
->
state
,
asoc
->
ep
,
asoc
,
NULL
,
GFP_ATOMIC
);
}
else
{
/* Windows may have opened, so we need
* to check if we have DATA to transmit
*/
err
=
sctp_outq_flush
(
&
asoc
->
outqueue
,
0
);
}
case
SCTP_CMD_HB_TIMERS_START
:
sctp_cmd_hb_timers_start
(
commands
,
asoc
);
break
;
return
err
;
}
case
SCTP_CMD_HB_TIMER_UPDATE
:
t
=
cmd
->
obj
.
transport
;
sctp_cmd_hb_timer_update
(
commands
,
asoc
,
t
);
break
;
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
* the transport for a shutdown chunk.
*/
static
void
sctp_cmd_setup_t2
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_chunk_t
*
chunk
)
{
struct
sctp_transport
*
t
;
case
SCTP_CMD_HB_TIMERS_STOP
:
sctp_cmd_hb_timers_stop
(
commands
,
asoc
);
break
;
t
=
sctp_assoc_choose_shutdown_transport
(
asoc
);
asoc
->
shutdown_last_sent_to
=
t
;
asoc
->
timeouts
[
SCTP_EVENT_TIMEOUT_T2_SHUTDOWN
]
=
t
->
rto
;
chunk
->
transport
=
t
;
}
case
SCTP_CMD_REPORT_ERROR
:
error
=
cmd
->
obj
.
error
;
break
;
/* Helper function to change the state of an association. */
static
void
sctp_cmd_new_state
(
sctp_cmd_seq_t
*
cmds
,
sctp_association_t
*
asoc
,
sctp_state_t
state
)
{
case
SCTP_CMD_PROCESS_CTSN
:
/* Dummy up a SACK for processing. */
sackh
.
cum_tsn_ack
=
cmd
->
obj
.
u32
;
sackh
.
a_rwnd
=
0
;
sackh
.
num_gap_ack_blocks
=
0
;
sackh
.
num_dup_tsns
=
0
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_PROCESS_SACK
,
SCTP_SACKH
(
&
sackh
));
break
;
struct
sock
*
sk
=
asoc
->
base
.
sk
;
struct
sctp_opt
*
sp
=
sctp_sk
(
sk
);
case
SCTP_CMD_DISCARD_PACKET
:
/* We need to discard the whole packet. */
chunk
->
pdiscard
=
1
;
break
;
asoc
->
state
=
state
;
asoc
->
state_timestamp
=
jiffies
;
case
SCTP_CMD_RTO_PENDING
:
t
=
cmd
->
obj
.
transport
;
t
->
rto_pending
=
1
;
break
;
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
||
(
SCTP_STATE_CLOSED
==
asoc
->
state
))
{
/* Wake up any processes waiting in the asoc's wait queue in
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
*/
if
(
waitqueue_active
(
&
asoc
->
wait
))
wake_up_interruptible
(
&
asoc
->
wait
);
case
SCTP_CMD_PART_DELIVER
:
sctp_ulpq_partial_delivery
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
/* Wake up any processes waiting in the sk's sleep queue of
* a TCP-style or UDP-style peeled-off socket in
* sctp_wait_for_accept() or sctp_wait_for_packet().
* For a UDP-style socket, the waiters are woken up by the
* notifications.
*/
if
(
SCTP_SOCKET_UDP
!=
sp
->
type
)
sk
->
state_change
(
sk
);
case
SCTP_CMD_RENEGE
:
sctp_ulpq_renege
(
&
asoc
->
ulpq
,
cmd
->
obj
.
ptr
,
GFP_ATOMIC
);
break
;
default:
printk
(
KERN_WARNING
"Impossible command: %u, %p
\n
"
,
cmd
->
verb
,
cmd
->
obj
.
ptr
);
break
;
};
if
(
error
)
return
error
;
}
/* Change the sk->state of a TCP-style socket that has sucessfully
* completed a connect() call.
*/
if
((
SCTP_STATE_ESTABLISHED
==
asoc
->
state
)
&&
(
SCTP_SOCKET_TCP
==
sp
->
type
)
&&
(
SCTP_SS_CLOSED
==
sk
->
state
))
sk
->
state
=
SCTP_SS_ESTABLISHED
;
return
error
;
nomem:
error
=
-
ENOMEM
;
return
error
;
}
net/sctp/sm_statefuns.c
View file @
e35a3e9a
...
...
@@ -191,14 +191,9 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
sctp_chunk_t
*
err_chunk
;
struct
sctp_packet
*
packet
;
sctp_unrecognized_param_t
*
unk_param
;
struct
sock
*
sk
;
int
len
;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if
(
ep
==
sctp_sk
((
sctp_get_ctl_sock
()))
->
ep
)
return
sctp_sf_ootb
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
...
...
@@ -206,6 +201,22 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
if
(
!
chunk
->
singleton
)
return
SCTP_DISPOSITION_VIOLATION
;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if
(
ep
==
sctp_sk
((
sctp_get_ctl_sock
()))
->
ep
)
return
sctp_sf_tabort_8_4_8
(
ep
,
asoc
,
type
,
arg
,
commands
);
sk
=
ep
->
base
.
sk
;
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
if
((
SCTP_SS_LISTENING
!=
sk
->
state
)
||
((
SCTP_SOCKET_TCP
==
sctp_sk
(
sk
)
->
type
)
&&
(
sk
->
ack_backlog
>=
sk
->
max_ack_backlog
)))
return
sctp_sf_tabort_8_4_8
(
ep
,
asoc
,
type
,
arg
,
commands
);
/* Verify the INIT chunk before processing it. */
err_chunk
=
NULL
;
if
(
!
sctp_verify_init
(
asoc
,
chunk
->
chunk_hdr
->
type
,
...
...
@@ -249,8 +260,8 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const sctp_endpoint_t *ep,
/* The call, sctp_process_init(), can fail on memory allocation. */
if
(
!
sctp_process_init
(
new_asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
(
sctp_init_chunk_t
*
)
chunk
->
chunk_hdr
,
sctp_source
(
chunk
),
(
sctp_init_chunk_t
*
)
chunk
->
chunk_hdr
,
GFP_ATOMIC
))
goto
nomem_init
;
...
...
@@ -729,7 +740,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const sctp_endpoint_t *ep,
if
(
asoc
->
overall_error_count
>=
asoc
->
overall_error_threshold
)
{
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
return
SCTP_DISPOSITION_DELETE_TCB
;
...
...
@@ -1379,7 +1391,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const sctp_endpoint_t *ep,
peer_init
=
&
chunk
->
subh
.
cookie_hdr
->
c
.
peer_init
[
0
];
if
(
!
sctp_process_init
(
new_asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
GFP_ATOMIC
))
sctp_source
(
chunk
),
peer_init
,
GFP_ATOMIC
))
goto
nomem
;
/* Make sure no new addresses are being added during the
...
...
@@ -1444,7 +1457,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const sctp_endpoint_t *ep,
*/
peer_init
=
&
chunk
->
subh
.
cookie_hdr
->
c
.
peer_init
[
0
];
if
(
!
sctp_process_init
(
new_asoc
,
chunk
->
chunk_hdr
->
type
,
sctp_source
(
chunk
),
peer_init
,
GFP_ATOMIC
))
sctp_source
(
chunk
),
peer_init
,
GFP_ATOMIC
))
goto
nomem
;
/* Update the content of current association. */
...
...
@@ -1772,14 +1786,16 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(const sctp_endpoint_t *ep,
sctp_chunk_t
*
chunk
=
arg
;
sctp_errhdr_t
*
err
;
err
=
(
sctp_errhdr_t
*
)(
chunk
->
skb
->
data
);
/* If we have gotten too many failures, give up. */
if
(
1
+
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
>
asoc
->
max_init_attempts
)
{
/* INIT_FAILED will issue an ulpevent. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
err
->
cause
));
return
SCTP_DISPOSITION_DELETE_TCB
;
}
err
=
(
sctp_errhdr_t
*
)(
chunk
->
skb
->
data
);
/* Process the error here */
switch
(
err
->
cause
)
{
...
...
@@ -1834,7 +1850,8 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const sctp_endpoint_t *ep,
attempts
=
asoc
->
counters
[
SCTP_COUNTER_INIT_ERROR
]
+
1
;
if
(
attempts
>=
asoc
->
max_init_attempts
)
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
SCTP_ERROR_STALE_COOKIE
));
return
SCTP_DISPOSITION_DELETE_TCB
;
}
...
...
@@ -1936,12 +1953,18 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const sctp_endpoint_t *ep,
sctp_cmd_seq_t
*
commands
)
{
sctp_chunk_t
*
chunk
=
arg
;
__u16
error
=
SCTP_ERROR_NO_ERROR
;
if
(
!
sctp_vtag_verify_either
(
chunk
,
asoc
))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
if
(
chunk
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_
NULL
(
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_
U32
(
error
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -1961,6 +1984,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep,
sctp_cmd_seq_t
*
commands
)
{
sctp_chunk_t
*
chunk
=
arg
;
__u16
error
=
SCTP_ERROR_NO_ERROR
;
if
(
!
sctp_vtag_verify_either
(
chunk
,
asoc
))
return
sctp_sf_pdiscard
(
ep
,
asoc
,
type
,
arg
,
commands
);
...
...
@@ -1971,10 +1995,14 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const sctp_endpoint_t *ep,
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_TIMER_STOP
,
SCTP_TO
(
SCTP_EVENT_TIMEOUT_T1_INIT
));
if
(
chunk
&&
(
ntohs
(
chunk
->
chunk_hdr
->
length
)
>=
(
sizeof
(
struct
sctp_chunkhdr
)
+
sizeof
(
struct
sctp_errhdr
))))
error
=
((
sctp_errhdr_t
*
)
chunk
->
skb
->
data
)
->
cause
;
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_
NULL
(
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_
U32
(
error
));
/* BUG? This does not look complete... */
return
SCTP_DISPOSITION_ABORT
;
}
...
...
@@ -2381,7 +2409,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const sctp_endpoint_t *ep,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
return
SCTP_DISPOSITION_CONSUME
;
...
...
@@ -2596,7 +2625,8 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const sctp_endpoint_t *ep,
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_DISCARD_PACKET
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_DATA
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
return
SCTP_DISPOSITION_CONSUME
;
...
...
@@ -3547,7 +3577,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(const sctp_endpoint_t *ep,
*/
/* Delete the established association. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_USER_ABORT
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
...
...
@@ -3686,7 +3717,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(const sctp_endpoint_t *ep,
*/
/* Delete the established association. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
SCTP_ERROR_USER_ABORT
));
return
retval
;
}
...
...
@@ -4012,7 +4044,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const sctp_endpoint_t *ep,
if
(
asoc
->
overall_error_count
>=
asoc
->
overall_error_threshold
)
{
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
return
SCTP_DISPOSITION_DELETE_TCB
;
...
...
@@ -4147,7 +4180,8 @@ sctp_disposition_t sctp_sf_t1_timer_expire(const sctp_endpoint_t *ep,
SCTP_TO
(
timer
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
repl
));
}
else
{
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_INIT_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
return
SCTP_DISPOSITION_DELETE_TCB
;
}
...
...
@@ -4181,7 +4215,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const sctp_endpoint_t *ep,
SCTP_DEBUG_PRINTK
(
"Timer T2 expired.
\n
"
);
if
(
asoc
->
overall_error_count
>=
asoc
->
overall_error_threshold
)
{
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
SCTP_INC_STATS
(
SctpAborteds
);
SCTP_DEC_STATS
(
SctpCurrEstab
);
return
SCTP_DISPOSITION_DELETE_TCB
;
...
...
@@ -4244,7 +4279,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const sctp_endpoint_t *ep,
goto
nomem
;
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_REPLY
,
SCTP_CHUNK
(
reply
));
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_NULL
());
sctp_add_cmd_sf
(
commands
,
SCTP_CMD_ASSOC_FAILED
,
SCTP_U32
(
SCTP_ERROR_NO_ERROR
));
return
SCTP_DISPOSITION_DELETE_TCB
;
nomem:
...
...
net/sctp/socket.c
View file @
e35a3e9a
...
...
@@ -244,9 +244,6 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
if
(
!
snum
)
snum
=
inet_sk
(
sk
)
->
num
;
/* Add the address to the bind address list. */
sctp_local_bh_disable
();
sctp_write_lock
(
&
ep
->
base
.
addr_lock
);
...
...
@@ -257,7 +254,6 @@ SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
addr
->
v4
.
sin_port
=
htons
(
addr
->
v4
.
sin_port
);
if
(
!
ret
&&
!
bp
->
port
)
bp
->
port
=
snum
;
sctp_write_unlock
(
&
ep
->
base
.
addr_lock
);
sctp_local_bh_enable
();
...
...
@@ -2750,6 +2746,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
err
=
-
EINVAL
;
if
(
sock
->
state
!=
SS_UNCONNECTED
)
goto
out
;
if
(
unlikely
(
backlog
<
0
))
goto
out
;
switch
(
sock
->
type
)
{
case
SOCK_SEQPACKET
:
err
=
sctp_seqpacket_listen
(
sk
,
backlog
);
...
...
@@ -3152,7 +3151,10 @@ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
return
-
EINVAL
;
/* Is this a valid SCTP address? */
if
(
!
af
->
addr_valid
((
union
sctp_addr
*
)
addr
))
if
(
!
af
->
addr_valid
(
addr
))
return
-
EINVAL
;
if
(
!
sctp_sk
(
sk
)
->
pf
->
send_verify
(
sctp_sk
(
sk
),
(
addr
)))
return
-
EINVAL
;
return
0
;
...
...
net/sctp/ulpevent.c
View file @
e35a3e9a
...
...
@@ -628,6 +628,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(sctp_association_t *asoc,
if
(
!
event
)
goto
fail_init
;
event
->
iif
=
sctp_chunk_iif
(
chunk
);
/* Note: Not clearing the entire event struct as
* this is just a fragment of the real event. However,
* we still need to do rwnd accounting.
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment