Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
276ca536
Commit
276ca536
authored
Nov 03, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://lia64.bkbits.net/linux-ia64-release-2.6.10
into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents
148b43d6
cde1321b
Changes
28
Show whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
543 additions
and
269 deletions
+543
-269
fs/ext2/dir.c
fs/ext2/dir.c
+2
-1
include/linux/divert.h
include/linux/divert.h
+2
-0
include/linux/filter.h
include/linux/filter.h
+6
-0
include/linux/icmp.h
include/linux/icmp.h
+2
-0
include/linux/inet.h
include/linux/inet.h
+2
-0
include/linux/inetdevice.h
include/linux/inetdevice.h
+3
-0
include/linux/rtnetlink.h
include/linux/rtnetlink.h
+1
-0
include/net/act_api.h
include/net/act_api.h
+11
-2
include/net/iw_handler.h
include/net/iw_handler.h
+3
-0
include/net/neighbour.h
include/net/neighbour.h
+1
-1
include/net/pkt_act.h
include/net/pkt_act.h
+3
-4
net/core/dev.c
net/core/dev.c
+14
-9
net/core/gen_stats.c
net/core/gen_stats.c
+1
-0
net/core/neighbour.c
net/core/neighbour.c
+15
-10
net/ipv4/Kconfig
net/ipv4/Kconfig
+4
-0
net/ipv4/tcp_diag.c
net/ipv4/tcp_diag.c
+10
-16
net/ipv6/ndisc.c
net/ipv6/ndisc.c
+0
-7
net/netlink/af_netlink.c
net/netlink/af_netlink.c
+374
-109
net/sched/act_api.c
net/sched/act_api.c
+26
-3
net/sched/cls_fw.c
net/sched/cls_fw.c
+2
-4
net/sched/cls_route.c
net/sched/cls_route.c
+2
-4
net/sched/cls_rsvp.h
net/sched/cls_rsvp.h
+2
-4
net/sched/cls_u32.c
net/sched/cls_u32.c
+2
-4
net/sched/gact.c
net/sched/gact.c
+4
-16
net/sched/ipt.c
net/sched/ipt.c
+5
-18
net/sched/mirred.c
net/sched/mirred.c
+5
-18
net/sched/pedit.c
net/sched/pedit.c
+3
-15
net/sched/police.c
net/sched/police.c
+38
-24
No files found.
fs/ext2/dir.c
View file @
276ca536
...
...
@@ -275,7 +275,8 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
"bad page in #%lu"
,
inode
->
i_ino
);
filp
->
f_pos
+=
PAGE_CACHE_SIZE
-
offset
;
continue
;
ret
=
-
EIO
;
goto
done
;
}
kaddr
=
page_address
(
page
);
if
(
need_revalidate
)
{
...
...
include/linux/divert.h
View file @
276ca536
...
...
@@ -109,6 +109,8 @@ struct divert_cf
#include <linux/skbuff.h>
#ifdef CONFIG_NET_DIVERT
#include <linux/netdevice.h>
int
alloc_divert_blk
(
struct
net_device
*
);
void
free_divert_blk
(
struct
net_device
*
);
int
divert_ioctl
(
unsigned
int
cmd
,
struct
divert_cf
__user
*
arg
);
...
...
include/linux/filter.h
View file @
276ca536
...
...
@@ -6,6 +6,9 @@
#define __LINUX_FILTER_H__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/atomic.h>
/*
* Current version of the filter code architecture.
...
...
@@ -135,6 +138,9 @@ static inline unsigned int sk_filter_len(struct sk_filter *fp)
#define SKF_LL_OFF (-0x200000)
#ifdef __KERNEL__
struct
sk_buff
;
struct
sock
;
extern
int
sk_run_filter
(
struct
sk_buff
*
skb
,
struct
sock_filter
*
filter
,
int
flen
);
extern
int
sk_attach_filter
(
struct
sock_fprog
*
fprog
,
struct
sock
*
sk
);
extern
int
sk_chk_filter
(
struct
sock_filter
*
filter
,
int
flen
);
...
...
include/linux/icmp.h
View file @
276ca536
...
...
@@ -17,6 +17,8 @@
#ifndef _LINUX_ICMP_H
#define _LINUX_ICMP_H
#include <linux/types.h>
#define ICMP_ECHOREPLY 0
/* Echo Reply */
#define ICMP_DEST_UNREACH 3
/* Destination Unreachable */
#define ICMP_SOURCE_QUENCH 4
/* Source Quench */
...
...
include/linux/inet.h
View file @
276ca536
...
...
@@ -43,6 +43,8 @@
#define _LINUX_INET_H
#ifdef __KERNEL__
#include <linux/types.h>
extern
__u32
in_aton
(
const
char
*
str
);
#endif
#endif
/* _LINUX_INET_H */
include/linux/inetdevice.h
View file @
276ca536
...
...
@@ -3,7 +3,10 @@
#ifdef __KERNEL__
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
struct
ipv4_devconf
{
...
...
include/linux/rtnetlink.h
View file @
276ca536
...
...
@@ -699,6 +699,7 @@ enum
TCA_RATE
,
TCA_FCNT
,
TCA_STATS2
,
TCA_ACT_STATS
,
__TCA_MAX
};
...
...
include/net/act_api.h
View file @
276ca536
...
...
@@ -28,7 +28,9 @@ struct tcf_police
struct
qdisc_rate_table
*
R_tab
;
struct
qdisc_rate_table
*
P_tab
;
struct
tc_stats
stats
;
struct
gnet_stats_basic
bstats
;
struct
gnet_stats_queue
qstats
;
struct
gnet_stats_rate_est
rate_est
;
spinlock_t
*
stats_lock
;
};
...
...
@@ -44,10 +46,16 @@ struct tcf_##name *next; \
u32 capab; \
int action; \
struct tcf_t tm; \
struct tc_stats stats; \
struct gnet_stats_basic bstats; \
struct gnet_stats_queue qstats; \
struct gnet_stats_rate_est rate_est; \
spinlock_t *stats_lock; \
spinlock_t lock
struct
tcf_act_hdr
{
tca_gen
(
act_hdr
);
};
struct
tc_action
{
...
...
@@ -95,6 +103,7 @@ extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st, spinlock_t
extern
void
tcf_police_destroy
(
struct
tcf_police
*
p
);
extern
struct
tcf_police
*
tcf_police_locate
(
struct
rtattr
*
rta
,
struct
rtattr
*
est
);
extern
int
tcf_police_dump
(
struct
sk_buff
*
skb
,
struct
tcf_police
*
p
);
extern
int
tcf_police_dump_stats
(
struct
sk_buff
*
skb
,
struct
tcf_police
*
p
);
static
inline
int
tcf_police_release
(
struct
tcf_police
*
p
,
int
bind
)
...
...
include/net/iw_handler.h
View file @
276ca536
...
...
@@ -198,6 +198,7 @@
/***************************** INCLUDES *****************************/
#include <linux/wireless.h>
/* IOCTL user space API */
#include <linux/if_ether.h>
/***************************** VERSION *****************************/
/*
...
...
@@ -294,6 +295,8 @@ struct iw_request_info
__u16
flags
;
/* More to come ;-) */
};
struct
net_device
;
/*
* This is how a function handling a Wireless Extension should look
* like (both get and set, standard and private).
...
...
include/net/neighbour.h
View file @
276ca536
...
...
@@ -189,7 +189,7 @@ struct neigh_table
struct
timer_list
gc_timer
;
struct
timer_list
proxy_timer
;
struct
sk_buff_head
proxy_queue
;
int
entries
;
atomic_t
entries
;
rwlock_t
lock
;
unsigned
long
last_rand
;
struct
neigh_parms
*
parms_list
;
...
...
include/net/pkt_act.h
View file @
276ca536
...
...
@@ -60,7 +60,7 @@ tcf_hash_destroy(struct tcf_st *p)
*
p1p
=
p
->
next
;
write_unlock_bh
(
&
tcf_t_lock
);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator
(
&
p
->
stats
);
gen_kill_estimator
(
&
p
->
bstats
,
&
p
->
rate_est
);
#endif
kfree
(
p
);
return
;
...
...
@@ -256,9 +256,8 @@ tcf_hash_create(struct tc_st *parm, struct rtattr *est, struct tc_action *a, int
p
->
tm
.
install
=
jiffies
;
p
->
tm
.
lastuse
=
jiffies
;
#ifdef CONFIG_NET_ESTIMATOR
if
(
est
)
{
qdisc_new_estimator
(
&
p
->
stats
,
p
->
stats_lock
,
est
);
}
if
(
est
)
gen_new_estimator
(
&
p
->
bstats
,
&
p
->
rate_est
,
p
->
stats_lock
,
est
);
#endif
h
=
tcf_hash
(
p
->
index
);
write_lock_bh
(
&
tcf_t_lock
);
...
...
net/core/dev.c
View file @
276ca536
...
...
@@ -1261,11 +1261,6 @@ int dev_queue_xmit(struct sk_buff *skb)
struct
Qdisc
*
q
;
int
rc
=
-
ENOMEM
;
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
local_bh_disable
();
if
(
skb_shinfo
(
skb
)
->
frag_list
&&
!
(
dev
->
features
&
NETIF_F_FRAGLIST
)
&&
__skb_linearize
(
skb
,
GFP_ATOMIC
))
...
...
@@ -1290,6 +1285,11 @@ int dev_queue_xmit(struct sk_buff *skb)
if
(
skb_checksum_help
(
skb
,
0
))
goto
out_kfree_skb
;
/* Disable soft irqs for various locks below. Also
* stops preemption for RCU.
*/
local_bh_disable
();
/* Updates of qdisc are serialized by queue_lock.
* The struct Qdisc which is pointed to by qdisc is now a
* rcu structure - it may be accessed without acquiring
...
...
@@ -1352,7 +1352,6 @@ int dev_queue_xmit(struct sk_buff *skb)
if
(
net_ratelimit
())
printk
(
KERN_CRIT
"Virtual device %s asks to "
"queue packet!
\n
"
,
dev
->
name
);
goto
out_enetdown
;
}
else
{
/* Recursion is detected! It is possible,
* unfortunately */
...
...
@@ -1361,10 +1360,13 @@ int dev_queue_xmit(struct sk_buff *skb)
"%s, fix it urgently!
\n
"
,
dev
->
name
);
}
}
out_enetdown:
rc
=
-
ENETDOWN
;
local_bh_enable
();
out_kfree_skb:
kfree_skb
(
skb
);
return
rc
;
out:
local_bh_enable
();
return
rc
;
...
...
@@ -2374,6 +2376,9 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
return
dev_set_mtu
(
dev
,
ifr
->
ifr_mtu
);
case
SIOCGIFHWADDR
:
if
(
!
dev
->
addr_len
)
memset
(
ifr
->
ifr_hwaddr
.
sa_data
,
0
,
sizeof
ifr
->
ifr_hwaddr
.
sa_data
);
else
memcpy
(
ifr
->
ifr_hwaddr
.
sa_data
,
dev
->
dev_addr
,
min
(
sizeof
ifr
->
ifr_hwaddr
.
sa_data
,
(
size_t
)
dev
->
addr_len
));
ifr
->
ifr_hwaddr
.
sa_family
=
dev
->
type
;
...
...
net/core/gen_stats.c
View file @
276ca536
...
...
@@ -125,6 +125,7 @@ gnet_stats_finish_copy(struct gnet_dump *d)
EXPORT_SYMBOL
(
gnet_stats_start_copy
);
EXPORT_SYMBOL
(
gnet_stats_start_copy_compat
);
EXPORT_SYMBOL
(
gnet_stats_copy_basic
);
EXPORT_SYMBOL
(
gnet_stats_copy_rate_est
);
EXPORT_SYMBOL
(
gnet_stats_copy_queue
);
...
...
net/core/neighbour.c
View file @
276ca536
...
...
@@ -254,18 +254,20 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
{
struct
neighbour
*
n
=
NULL
;
unsigned
long
now
=
jiffies
;
int
entries
;
if
(
tbl
->
entries
>
tbl
->
gc_thresh3
||
(
tbl
->
entries
>
tbl
->
gc_thresh2
&&
entries
=
atomic_inc_return
(
&
tbl
->
entries
)
-
1
;
if
(
entries
>=
tbl
->
gc_thresh3
||
(
entries
>=
tbl
->
gc_thresh2
&&
time_after
(
now
,
tbl
->
last_flush
+
5
*
HZ
)))
{
if
(
!
neigh_forced_gc
(
tbl
)
&&
tbl
->
entries
>
tbl
->
gc_thresh3
)
goto
out
;
entries
>=
tbl
->
gc_thresh3
)
goto
out
_entries
;
}
n
=
kmem_cache_alloc
(
tbl
->
kmem_cachep
,
SLAB_ATOMIC
);
if
(
!
n
)
goto
out
;
goto
out
_entries
;
memset
(
n
,
0
,
tbl
->
entry_size
);
...
...
@@ -281,12 +283,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
NEIGH_CACHE_STAT_INC
(
tbl
,
allocs
);
neigh_glbl_allocs
++
;
tbl
->
entries
++
;
n
->
tbl
=
tbl
;
atomic_set
(
&
n
->
refcnt
,
1
);
n
->
dead
=
1
;
out:
return
n
;
out_entries:
atomic_dec
(
&
tbl
->
entries
);
goto
out
;
}
static
struct
neighbour
**
neigh_hash_alloc
(
unsigned
int
entries
)
...
...
@@ -427,7 +432,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
write_lock_bh
(
&
tbl
->
lock
);
if
(
tbl
->
entries
>
(
tbl
->
hash_mask
+
1
))
if
(
atomic_read
(
&
tbl
->
entries
)
>
(
tbl
->
hash_mask
+
1
))
neigh_hash_grow
(
tbl
,
(
tbl
->
hash_mask
+
1
)
<<
1
);
hash_val
=
tbl
->
hash
(
pkey
,
dev
)
&
tbl
->
hash_mask
;
...
...
@@ -608,7 +613,7 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_PRINTK2
(
"neigh %p is destroyed.
\n
"
,
neigh
);
neigh_glbl_allocs
--
;
neigh
->
tbl
->
entries
--
;
atomic_dec
(
&
neigh
->
tbl
->
entries
)
;
kmem_cache_free
(
neigh
->
tbl
->
kmem_cachep
,
neigh
);
}
...
...
@@ -1394,7 +1399,7 @@ int neigh_table_clear(struct neigh_table *tbl)
del_timer_sync
(
&
tbl
->
proxy_timer
);
pneigh_queue_purge
(
&
tbl
->
proxy_queue
);
neigh_ifdown
(
tbl
,
NULL
);
if
(
tbl
->
entries
)
if
(
atomic_read
(
&
tbl
->
entries
)
)
printk
(
KERN_CRIT
"neighbour leakage
\n
"
);
write_lock
(
&
neigh_tbl_lock
);
for
(
tp
=
&
neigh_tables
;
*
tp
;
tp
=
&
(
*
tp
)
->
next
)
{
...
...
@@ -1951,7 +1956,7 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v)
seq_printf
(
seq
,
"%08x %08lx %08lx %08lx %08lx %08lx %08lx "
"%08lx %08lx %08lx %08lx
\n
"
,
tbl
->
entries
,
atomic_read
(
&
tbl
->
entries
)
,
st
->
allocs
,
st
->
destroys
,
...
...
net/ipv4/Kconfig
View file @
276ca536
...
...
@@ -351,6 +351,7 @@ config INET_TUNNEL
config IP_TCPDIAG
tristate "IP: TCP socket monitoring interface"
depends on INET
default y
---help---
Support for TCP socket monitoring interface used by native Linux
...
...
@@ -358,5 +359,8 @@ config IP_TCPDIAG
If unsure, say Y.
config IP_TCPDIAG_IPV6
def_bool (IP_TCPDIAG=y && IPV6=y) || (IP_TCPDIAG=m && IPV6)
source "net/ipv4/ipvs/Kconfig"
net/ipv4/tcp_diag.c
View file @
276ca536
...
...
@@ -103,14 +103,12 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r
->
tcpdiag_wqueue
=
0
;
r
->
tcpdiag_uid
=
0
;
r
->
tcpdiag_inode
=
0
;
#ifdef CONFIG_IPV6
if
(
r
->
tcpdiag_family
==
AF_INET6
)
{
ipv6_addr_copy
((
struct
in6_addr
*
)
r
->
id
.
tcpdiag_src
,
&
tw
->
tw_v6_rcv_saddr
);
ipv6_addr_copy
((
struct
in6_addr
*
)
r
->
id
.
tcpdiag_dst
,
&
tw
->
tw_v6_daddr
);
}
#endif
nlh
->
nlmsg_len
=
skb
->
tail
-
b
;
return
skb
->
len
;
}
...
...
@@ -120,7 +118,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
r
->
id
.
tcpdiag_src
[
0
]
=
inet
->
rcv_saddr
;
r
->
id
.
tcpdiag_dst
[
0
]
=
inet
->
daddr
;
#ifdef CONFIG_IPV6
if
(
r
->
tcpdiag_family
==
AF_INET6
)
{
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
...
...
@@ -129,7 +126,6 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
ipv6_addr_copy
((
struct
in6_addr
*
)
r
->
id
.
tcpdiag_dst
,
&
np
->
daddr
);
}
#endif
#define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ
...
...
@@ -188,11 +184,19 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
return
-
1
;
}
extern
struct
sock
*
tcp_v4_lookup
(
u32
saddr
,
u16
sport
,
u32
daddr
,
u16
dport
,
int
dif
);
#ifdef CONFIG_IPV6
extern
struct
sock
*
tcp_v4_lookup
(
u32
saddr
,
u16
sport
,
u32
daddr
,
u16
dport
,
int
dif
);
#ifdef CONFIG_IP_TCPDIAG_IPV6
extern
struct
sock
*
tcp_v6_lookup
(
struct
in6_addr
*
saddr
,
u16
sport
,
struct
in6_addr
*
daddr
,
u16
dport
,
int
dif
);
#else
static
inline
struct
sock
*
tcp_v6_lookup
(
struct
in6_addr
*
saddr
,
u16
sport
,
struct
in6_addr
*
daddr
,
u16
dport
,
int
dif
)
{
return
NULL
;
}
#endif
static
int
tcpdiag_get_exact
(
struct
sk_buff
*
in_skb
,
const
struct
nlmsghdr
*
nlh
)
...
...
@@ -207,13 +211,11 @@ static int tcpdiag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh)
req
->
id
.
tcpdiag_src
[
0
],
req
->
id
.
tcpdiag_sport
,
req
->
id
.
tcpdiag_if
);
}
#ifdef CONFIG_IPV6
else
if
(
req
->
tcpdiag_family
==
AF_INET6
)
{
sk
=
tcp_v6_lookup
((
struct
in6_addr
*
)
req
->
id
.
tcpdiag_dst
,
req
->
id
.
tcpdiag_dport
,
(
struct
in6_addr
*
)
req
->
id
.
tcpdiag_src
,
req
->
id
.
tcpdiag_sport
,
req
->
id
.
tcpdiag_if
);
}
#endif
else
{
return
-
EINVAL
;
}
...
...
@@ -422,14 +424,12 @@ static int tcpdiag_dump_sock(struct sk_buff *skb, struct sock *sk,
struct
inet_opt
*
inet
=
inet_sk
(
sk
);
entry
.
family
=
sk
->
sk_family
;
#ifdef CONFIG_IPV6
if
(
entry
.
family
==
AF_INET6
)
{
struct
ipv6_pinfo
*
np
=
inet6_sk
(
sk
);
entry
.
saddr
=
np
->
rcv_saddr
.
s6_addr32
;
entry
.
daddr
=
np
->
daddr
.
s6_addr32
;
}
else
#endif
{
entry
.
saddr
=
&
inet
->
rcv_saddr
;
entry
.
daddr
=
&
inet
->
daddr
;
...
...
@@ -482,14 +482,12 @@ static int tcpdiag_fill_req(struct sk_buff *skb, struct sock *sk,
r
->
tcpdiag_wqueue
=
0
;
r
->
tcpdiag_uid
=
sock_i_uid
(
sk
);
r
->
tcpdiag_inode
=
0
;
#ifdef CONFIG_IPV6
if
(
r
->
tcpdiag_family
==
AF_INET6
)
{
ipv6_addr_copy
((
struct
in6_addr
*
)
r
->
id
.
tcpdiag_src
,
&
req
->
af
.
v6_req
.
loc_addr
);
ipv6_addr_copy
((
struct
in6_addr
*
)
r
->
id
.
tcpdiag_dst
,
&
req
->
af
.
v6_req
.
rmt_addr
);
}
#endif
nlh
->
nlmsg_len
=
skb
->
tail
-
b
;
return
skb
->
len
;
...
...
@@ -545,16 +543,12 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
if
(
bc
)
{
entry
.
saddr
=
#ifdef CONFIG_IPV6
(
entry
.
family
==
AF_INET6
)
?
req
->
af
.
v6_req
.
loc_addr
.
s6_addr32
:
#endif
&
req
->
af
.
v4_req
.
loc_addr
;
entry
.
daddr
=
#ifdef CONFIG_IPV6
(
entry
.
family
==
AF_INET6
)
?
req
->
af
.
v6_req
.
rmt_addr
.
s6_addr32
:
#endif
&
req
->
af
.
v4_req
.
rmt_addr
;
entry
.
dport
=
ntohs
(
req
->
rmt_port
);
...
...
net/ipv6/ndisc.c
View file @
276ca536
...
...
@@ -1078,13 +1078,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return
;
}
neigh
->
flags
|=
NTF_ROUTER
;
/*
* If we where using an "all destinations on link" route
* delete it
*/
rt6_purge_dflt_routers
();
}
if
(
rt
)
...
...
net/netlink/af_netlink.c
View file @
276ca536
...
...
@@ -44,6 +44,12 @@
#include <linux/smp_lock.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <net/sock.h>
#include <net/scm.h>
...
...
@@ -56,9 +62,9 @@
struct
netlink_opt
{
u32
pid
;
unsigned
groups
;
unsigned
int
groups
;
u32
dst_pid
;
unsigned
dst_groups
;
unsigned
int
dst_groups
;
unsigned
long
state
;
int
(
*
handler
)(
int
unit
,
struct
sk_buff
*
skb
);
wait_queue_head_t
wait
;
...
...
@@ -69,9 +75,28 @@ struct netlink_opt
#define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
static
struct
hlist_head
nl_table
[
MAX_LINKS
];
struct
nl_pid_hash
{
struct
hlist_head
*
table
;
unsigned
long
rehash_time
;
unsigned
int
mask
;
unsigned
int
shift
;
unsigned
int
entries
;
unsigned
int
max_shift
;
u32
rnd
;
};
struct
netlink_table
{
struct
nl_pid_hash
hash
;
struct
hlist_head
mc_list
;
};
static
struct
netlink_table
*
nl_table
;
static
DECLARE_WAIT_QUEUE_HEAD
(
nl_table_wait
);
static
unsigned
nl_nonroot
[
MAX_LINKS
];
static
unsigned
int
nl_nonroot
[
MAX_LINKS
];
#ifdef NL_EMULATE_DEV
static
struct
socket
*
netlink_kernel
[
MAX_LINKS
];
...
...
@@ -85,6 +110,11 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
static
struct
notifier_block
*
netlink_chain
;
static
struct
hlist_head
*
nl_pid_hashfn
(
struct
nl_pid_hash
*
hash
,
u32
pid
)
{
return
&
hash
->
table
[
jhash_1word
(
pid
,
hash
->
rnd
)
&
hash
->
mask
];
}
static
void
netlink_sock_destruct
(
struct
sock
*
sk
)
{
skb_queue_purge
(
&
sk
->
sk_receive_queue
);
...
...
@@ -153,11 +183,14 @@ netlink_unlock_table(void)
static
__inline__
struct
sock
*
netlink_lookup
(
int
protocol
,
u32
pid
)
{
struct
nl_pid_hash
*
hash
=
&
nl_table
[
protocol
].
hash
;
struct
hlist_head
*
head
;
struct
sock
*
sk
;
struct
hlist_node
*
node
;
read_lock
(
&
nl_table_lock
);
sk_for_each
(
sk
,
node
,
&
nl_table
[
protocol
])
{
head
=
nl_pid_hashfn
(
hash
,
pid
);
sk_for_each
(
sk
,
node
,
head
)
{
if
(
nlk_sk
(
sk
)
->
pid
==
pid
)
{
sock_hold
(
sk
);
goto
found
;
...
...
@@ -169,27 +202,118 @@ static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
return
sk
;
}
static
inline
struct
hlist_head
*
nl_pid_hash_alloc
(
size_t
size
)
{
if
(
size
<=
PAGE_SIZE
)
return
kmalloc
(
size
,
GFP_ATOMIC
);
else
return
(
struct
hlist_head
*
)
__get_free_pages
(
GFP_ATOMIC
,
get_order
(
size
));
}
static
inline
void
nl_pid_hash_free
(
struct
hlist_head
*
table
,
size_t
size
)
{
if
(
size
<=
PAGE_SIZE
)
kfree
(
table
);
else
free_pages
((
unsigned
long
)
table
,
get_order
(
size
));
}
static
int
nl_pid_hash_rehash
(
struct
nl_pid_hash
*
hash
,
int
grow
)
{
unsigned
int
omask
,
mask
,
shift
;
size_t
osize
,
size
;
struct
hlist_head
*
otable
,
*
table
;
int
i
;
omask
=
mask
=
hash
->
mask
;
osize
=
size
=
(
mask
+
1
)
*
sizeof
(
*
table
);
shift
=
hash
->
shift
;
if
(
grow
)
{
if
(
++
shift
>
hash
->
max_shift
)
return
0
;
mask
=
mask
*
2
+
1
;
size
*=
2
;
}
table
=
nl_pid_hash_alloc
(
size
);
if
(
!
table
)
return
0
;
memset
(
table
,
0
,
size
);
otable
=
hash
->
table
;
hash
->
table
=
table
;
hash
->
mask
=
mask
;
hash
->
shift
=
shift
;
get_random_bytes
(
&
hash
->
rnd
,
sizeof
(
hash
->
rnd
));
for
(
i
=
0
;
i
<=
omask
;
i
++
)
{
struct
sock
*
sk
;
struct
hlist_node
*
node
,
*
tmp
;
sk_for_each_safe
(
sk
,
node
,
tmp
,
&
otable
[
i
])
__sk_add_node
(
sk
,
nl_pid_hashfn
(
hash
,
nlk_sk
(
sk
)
->
pid
));
}
nl_pid_hash_free
(
otable
,
osize
);
hash
->
rehash_time
=
jiffies
+
10
*
60
*
HZ
;
return
1
;
}
static
inline
int
nl_pid_hash_dilute
(
struct
nl_pid_hash
*
hash
,
int
len
)
{
int
avg
=
hash
->
entries
>>
hash
->
shift
;
if
(
unlikely
(
avg
>
1
)
&&
nl_pid_hash_rehash
(
hash
,
1
))
return
1
;
if
(
unlikely
(
len
>
avg
)
&&
time_after
(
jiffies
,
hash
->
rehash_time
))
{
nl_pid_hash_rehash
(
hash
,
0
);
return
1
;
}
return
0
;
}
static
struct
proto_ops
netlink_ops
;
static
int
netlink_insert
(
struct
sock
*
sk
,
u32
pid
)
{
struct
nl_pid_hash
*
hash
=
&
nl_table
[
sk
->
sk_protocol
].
hash
;
struct
hlist_head
*
head
;
int
err
=
-
EADDRINUSE
;
struct
sock
*
osk
;
struct
hlist_node
*
node
;
int
len
;
netlink_table_grab
();
sk_for_each
(
osk
,
node
,
&
nl_table
[
sk
->
sk_protocol
])
{
head
=
nl_pid_hashfn
(
hash
,
pid
);
len
=
0
;
sk_for_each
(
osk
,
node
,
head
)
{
if
(
nlk_sk
(
osk
)
->
pid
==
pid
)
break
;
len
++
;
}
if
(
!
node
)
{
if
(
node
)
goto
err
;
err
=
-
EBUSY
;
if
(
nlk_sk
(
sk
)
->
pid
==
0
)
{
if
(
nlk_sk
(
sk
)
->
pid
)
goto
err
;
err
=
-
ENOMEM
;
if
(
BITS_PER_LONG
>
32
&&
unlikely
(
hash
->
entries
>=
UINT_MAX
))
goto
err
;
if
(
len
&&
nl_pid_hash_dilute
(
hash
,
len
))
head
=
nl_pid_hashfn
(
hash
,
pid
);
hash
->
entries
++
;
nlk_sk
(
sk
)
->
pid
=
pid
;
sk_add_node
(
sk
,
&
nl_table
[
sk
->
sk_protocol
]
);
sk_add_node
(
sk
,
head
);
err
=
0
;
}
}
err:
netlink_table_ungrab
();
return
err
;
}
...
...
@@ -197,7 +321,10 @@ static int netlink_insert(struct sock *sk, u32 pid)
static
void
netlink_remove
(
struct
sock
*
sk
)
{
netlink_table_grab
();
nl_table
[
sk
->
sk_protocol
].
hash
.
entries
--
;
sk_del_node_init
(
sk
);
if
(
nlk_sk
(
sk
)
->
groups
)
__sk_del_bind_node
(
sk
);
netlink_table_ungrab
();
}
...
...
@@ -282,19 +409,25 @@ static int netlink_release(struct socket *sock)
static
int
netlink_autobind
(
struct
socket
*
sock
)
{
struct
sock
*
sk
=
sock
->
sk
;
struct
nl_pid_hash
*
hash
=
&
nl_table
[
sk
->
sk_protocol
].
hash
;
struct
hlist_head
*
head
;
struct
sock
*
osk
;
struct
hlist_node
*
node
;
s32
pid
=
current
->
pid
;
int
err
;
static
s32
rover
=
-
4097
;
retry:
cond_resched
();
netlink_table_grab
();
sk_for_each
(
osk
,
node
,
&
nl_table
[
sk
->
sk_protocol
])
{
head
=
nl_pid_hashfn
(
hash
,
pid
);
sk_for_each
(
osk
,
node
,
head
)
{
if
(
nlk_sk
(
osk
)
->
pid
==
pid
)
{
/* Bind collision, search negative pid values. */
if
(
pid
>
0
)
pid
=
-
4096
;
pid
--
;
pid
=
rover
;
else
if
(
--
pid
>
0
)
pid
=
-
4097
;
netlink_table_ungrab
();
goto
retry
;
}
...
...
@@ -308,7 +441,7 @@ static int netlink_autobind(struct socket *sock)
return
0
;
}
static
inline
int
netlink_capable
(
struct
socket
*
sock
,
unsigned
flag
)
static
inline
int
netlink_capable
(
struct
socket
*
sock
,
unsigned
int
flag
)
{
return
(
nl_nonroot
[
sock
->
sk
->
sk_protocol
]
&
flag
)
||
capable
(
CAP_NET_ADMIN
);
...
...
@@ -331,21 +464,19 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
if
(
nlk
->
pid
)
{
if
(
nladdr
->
nl_pid
!=
nlk
->
pid
)
return
-
EINVAL
;
nlk
->
groups
=
nladdr
->
nl_groups
;
return
0
;
}
if
(
nladdr
->
nl_pid
==
0
)
{
err
=
netlink_autobind
(
sock
);
if
(
err
==
0
)
nlk
->
groups
=
nladdr
->
nl_groups
;
}
else
{
err
=
nladdr
->
nl_pid
?
netlink_insert
(
sk
,
nladdr
->
nl_pid
)
:
netlink_autobind
(
sock
);
if
(
err
)
return
err
;
}
err
=
netlink_insert
(
sk
,
nladdr
->
nl_pid
);
if
(
err
==
0
)
nlk
->
groups
=
nladdr
->
nl_groups
;
return
err
;
if
(
nladdr
->
nl_groups
)
sk_add_bind_node
(
sk
,
&
nl_table
[
sk
->
sk_protocol
].
mc_list
);
return
0
;
}
static
int
netlink_connect
(
struct
socket
*
sock
,
struct
sockaddr
*
addr
,
...
...
@@ -590,94 +721,142 @@ static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff
return
-
1
;
}
int
netlink_broadcast
(
struct
sock
*
ssk
,
struct
sk_buff
*
skb
,
u32
pid
,
u32
group
,
int
allocation
)
{
struct
sock
*
sk
;
struct
hlist_node
*
node
;
struct
sk_buff
*
skb2
=
NULL
;
int
protocol
=
ssk
->
sk_protocol
;
int
failure
=
0
,
delivered
=
0
;
int
congested
=
0
;
int
val
;
netlink_trim
(
skb
,
allocation
);
/* While we sleep in clone, do not allow to change socket list */
netlink_lock_table
();
struct
netlink_broadcast_data
{
struct
sock
*
exclude_sk
;
u32
pid
;
u32
group
;
int
failure
;
int
congested
;
int
delivered
;
int
allocation
;
struct
sk_buff
*
skb
,
*
skb2
;
};
sk_for_each
(
sk
,
node
,
&
nl_table
[
protocol
])
{
static
inline
int
do_one_broadcast
(
struct
sock
*
sk
,
struct
netlink_broadcast_data
*
p
)
{
struct
netlink_opt
*
nlk
=
nlk_sk
(
sk
);
int
val
;
if
(
s
sk
==
sk
)
continue
;
if
(
p
->
exclude_
sk
==
sk
)
goto
out
;
if
(
nlk
->
pid
==
pid
||
!
(
nlk
->
groups
&
group
))
continue
;
if
(
nlk
->
pid
==
p
->
pid
||
!
(
nlk
->
groups
&
p
->
group
))
goto
out
;
if
(
failure
)
{
if
(
p
->
failure
)
{
netlink_overrun
(
sk
);
continue
;
goto
out
;
}
sock_hold
(
sk
);
if
(
skb2
==
NULL
)
{
if
(
atomic_read
(
&
skb
->
users
)
!=
1
)
{
skb2
=
skb_clone
(
skb
,
allocation
);
if
(
p
->
skb2
==
NULL
)
{
if
(
atomic_read
(
&
p
->
skb
->
users
)
!=
1
)
{
p
->
skb2
=
skb_clone
(
p
->
skb
,
p
->
allocation
);
}
else
{
skb2
=
skb
;
atomic_inc
(
&
skb
->
users
);
p
->
skb2
=
p
->
skb
;
atomic_inc
(
&
p
->
skb
->
users
);
}
}
if
(
skb2
==
NULL
)
{
if
(
p
->
skb2
==
NULL
)
{
netlink_overrun
(
sk
);
/* Clone failed. Notify ALL listeners. */
failure
=
1
;
}
else
if
((
val
=
netlink_broadcast_deliver
(
sk
,
skb2
))
<
0
)
{
p
->
failure
=
1
;
}
else
if
((
val
=
netlink_broadcast_deliver
(
sk
,
p
->
skb2
))
<
0
)
{
netlink_overrun
(
sk
);
}
else
{
congested
|=
val
;
delivered
=
1
;
skb2
=
NULL
;
p
->
congested
|=
val
;
p
->
delivered
=
1
;
p
->
skb2
=
NULL
;
}
sock_put
(
sk
);
}
out:
return
0
;
}
int
netlink_broadcast
(
struct
sock
*
ssk
,
struct
sk_buff
*
skb
,
u32
pid
,
u32
group
,
int
allocation
)
{
struct
netlink_broadcast_data
info
;
struct
hlist_node
*
node
;
struct
sock
*
sk
;
info
.
exclude_sk
=
ssk
;
info
.
pid
=
pid
;
info
.
group
=
group
;
info
.
failure
=
0
;
info
.
congested
=
0
;
info
.
delivered
=
0
;
info
.
allocation
=
allocation
;
info
.
skb
=
skb
;
info
.
skb2
=
NULL
;
netlink_trim
(
skb
,
allocation
);
/* While we sleep in clone, do not allow to change socket list */
netlink_lock_table
();
sk_for_each_bound
(
sk
,
node
,
&
nl_table
[
ssk
->
sk_protocol
].
mc_list
)
do_one_broadcast
(
sk
,
&
info
);
netlink_unlock_table
();
if
(
skb2
)
kfree_skb
(
skb2
);
if
(
info
.
skb2
)
kfree_skb
(
info
.
skb2
);
kfree_skb
(
skb
);
if
(
delivered
)
{
if
(
congested
&&
(
allocation
&
__GFP_WAIT
))
if
(
info
.
delivered
)
{
if
(
info
.
congested
&&
(
allocation
&
__GFP_WAIT
))
yield
();
return
0
;
}
if
(
failure
)
if
(
info
.
failure
)
return
-
ENOBUFS
;
return
-
ESRCH
;
}
struct
netlink_set_err_data
{
struct
sock
*
exclude_sk
;
u32
pid
;
u32
group
;
int
code
;
};
static
inline
int
do_one_set_err
(
struct
sock
*
sk
,
struct
netlink_set_err_data
*
p
)
{
struct
netlink_opt
*
nlk
=
nlk_sk
(
sk
);
if
(
sk
==
p
->
exclude_sk
)
goto
out
;
if
(
nlk
->
pid
==
p
->
pid
||
!
(
nlk
->
groups
&
p
->
group
))
goto
out
;
sk
->
sk_err
=
p
->
code
;
sk
->
sk_error_report
(
sk
);
out:
return
0
;
}
void
netlink_set_err
(
struct
sock
*
ssk
,
u32
pid
,
u32
group
,
int
code
)
{
struct
sock
*
sk
;
struct
netlink_set_err_data
info
;
struct
hlist_node
*
node
;
int
protocol
=
ssk
->
sk_protocol
;
struct
sock
*
sk
;
info
.
exclude_sk
=
ssk
;
info
.
pid
=
pid
;
info
.
group
=
group
;
info
.
code
=
code
;
read_lock
(
&
nl_table_lock
);
sk_for_each
(
sk
,
node
,
&
nl_table
[
protocol
])
{
struct
netlink_opt
*
nlk
=
nlk_sk
(
sk
);
if
(
ssk
==
sk
)
continue
;
if
(
nlk
->
pid
==
pid
||
!
(
nlk
->
groups
&
group
)
)
continue
;
sk_for_each_bound
(
sk
,
node
,
&
nl_table
[
ssk
->
sk_protocol
].
mc_list
)
do_one_set_err
(
sk
,
&
info
)
;
sk
->
sk_err
=
code
;
sk
->
sk_error_report
(
sk
);
}
read_unlock
(
&
nl_table_lock
);
}
...
...
@@ -853,6 +1032,9 @@ netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
struct
socket
*
sock
;
struct
sock
*
sk
;
if
(
!
nl_table
)
return
NULL
;
if
(
unit
<
0
||
unit
>=
MAX_LINKS
)
return
NULL
;
...
...
@@ -875,9 +1057,9 @@ netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
return
sk
;
}
void
netlink_set_nonroot
(
int
protocol
,
unsigned
flags
)
void
netlink_set_nonroot
(
int
protocol
,
unsigned
int
flags
)
{
if
((
unsigned
)
protocol
<
MAX_LINKS
)
if
((
unsigned
int
)
protocol
<
MAX_LINKS
)
nl_nonroot
[
protocol
]
=
flags
;
}
...
...
@@ -1070,22 +1252,33 @@ int netlink_post(int unit, struct sk_buff *skb)
#endif
#ifdef CONFIG_PROC_FS
struct
nl_seq_iter
{
int
link
;
int
hash_idx
;
};
static
struct
sock
*
netlink_seq_socket_idx
(
struct
seq_file
*
seq
,
loff_t
pos
)
{
long
i
;
struct
nl_seq_iter
*
iter
=
seq
->
private
;
int
i
,
j
;
struct
sock
*
s
;
struct
hlist_node
*
node
;
loff_t
off
=
0
;
for
(
i
=
0
;
i
<
MAX_LINKS
;
i
++
)
{
sk_for_each
(
s
,
node
,
&
nl_table
[
i
])
{
struct
nl_pid_hash
*
hash
=
&
nl_table
[
i
].
hash
;
for
(
j
=
0
;
j
<=
hash
->
mask
;
j
++
)
{
sk_for_each
(
s
,
node
,
&
hash
->
table
[
j
])
{
if
(
off
==
pos
)
{
seq
->
private
=
(
void
*
)
i
;
iter
->
link
=
i
;
iter
->
hash_idx
=
j
;
return
s
;
}
++
off
;
}
}
}
return
NULL
;
}
...
...
@@ -1098,6 +1291,8 @@ static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
static
void
*
netlink_seq_next
(
struct
seq_file
*
seq
,
void
*
v
,
loff_t
*
pos
)
{
struct
sock
*
s
;
struct
nl_seq_iter
*
iter
;
int
i
,
j
;
++*
pos
;
...
...
@@ -1105,18 +1300,29 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return
netlink_seq_socket_idx
(
seq
,
0
);
s
=
sk_next
(
v
);
if
(
!
s
)
{
long
i
=
(
long
)
seq
->
private
;
if
(
s
)
return
s
;
iter
=
seq
->
private
;
i
=
iter
->
link
;
j
=
iter
->
hash_idx
+
1
;
while
(
++
i
<
MAX_LINKS
)
{
s
=
sk_head
(
&
nl_table
[
i
]);
do
{
struct
nl_pid_hash
*
hash
=
&
nl_table
[
i
].
hash
;
for
(;
j
<=
hash
->
mask
;
j
++
)
{
s
=
sk_head
(
&
hash
->
table
[
j
]);
if
(
s
)
{
seq
->
private
=
(
void
*
)
i
;
break
;
}
iter
->
link
=
i
;
iter
->
hash_idx
=
j
;
return
s
;
}
}
return
s
;
j
=
0
;
}
while
(
++
i
<
MAX_LINKS
);
return
NULL
;
}
static
void
netlink_seq_stop
(
struct
seq_file
*
seq
,
void
*
v
)
...
...
@@ -1160,7 +1366,24 @@ static struct seq_operations netlink_seq_ops = {
static
int
netlink_seq_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
return
seq_open
(
file
,
&
netlink_seq_ops
);
struct
seq_file
*
seq
;
struct
nl_seq_iter
*
iter
;
int
err
;
iter
=
kmalloc
(
sizeof
(
*
iter
),
GFP_KERNEL
);
if
(
!
iter
)
return
-
ENOMEM
;
err
=
seq_open
(
file
,
&
netlink_seq_ops
);
if
(
err
)
{
kfree
(
iter
);
return
err
;
}
memset
(
iter
,
0
,
sizeof
(
*
iter
));
seq
=
file
->
private_data
;
seq
->
private
=
iter
;
return
0
;
}
static
struct
file_operations
netlink_seq_fops
=
{
...
...
@@ -1168,7 +1391,7 @@ static struct file_operations netlink_seq_fops = {
.
open
=
netlink_seq_open
,
.
read
=
seq_read
,
.
llseek
=
seq_lseek
,
.
release
=
seq_release
,
.
release
=
seq_release
_private
,
};
#endif
...
...
@@ -1210,14 +1433,54 @@ static struct net_proto_family netlink_family_ops = {
.
owner
=
THIS_MODULE
,
/* for consistency 8) */
};
extern
void
netlink_skb_parms_too_large
(
void
);
static
int
__init
netlink_proto_init
(
void
)
{
struct
sk_buff
*
dummy_skb
;
int
i
;
unsigned
long
max
;
unsigned
int
order
;
if
(
sizeof
(
struct
netlink_skb_parms
)
>
sizeof
(
dummy_skb
->
cb
))
{
printk
(
KERN_CRIT
"netlink_init: panic
\n
"
);
return
-
1
;
if
(
sizeof
(
struct
netlink_skb_parms
)
>
sizeof
(
dummy_skb
->
cb
))
netlink_skb_parms_too_large
();
nl_table
=
kmalloc
(
sizeof
(
*
nl_table
)
*
MAX_LINKS
,
GFP_KERNEL
);
if
(
!
nl_table
)
{
enomem:
printk
(
KERN_CRIT
"netlink_init: Cannot allocate nl_table
\n
"
);
return
-
ENOMEM
;
}
memset
(
nl_table
,
0
,
sizeof
(
*
nl_table
)
*
MAX_LINKS
);
if
(
num_physpages
>=
(
128
*
1024
))
max
=
num_physpages
>>
(
21
-
PAGE_SHIFT
);
else
max
=
num_physpages
>>
(
23
-
PAGE_SHIFT
);
order
=
get_bitmask_order
(
max
)
-
1
+
PAGE_SHIFT
;
max
=
(
1UL
<<
order
)
/
sizeof
(
struct
hlist_head
);
order
=
get_bitmask_order
(
max
>
UINT_MAX
?
UINT_MAX
:
max
)
-
1
;
for
(
i
=
0
;
i
<
MAX_LINKS
;
i
++
)
{
struct
nl_pid_hash
*
hash
=
&
nl_table
[
i
].
hash
;
hash
->
table
=
nl_pid_hash_alloc
(
1
*
sizeof
(
*
hash
->
table
));
if
(
!
hash
->
table
)
{
while
(
i
--
>
0
)
nl_pid_hash_free
(
nl_table
[
i
].
hash
.
table
,
1
*
sizeof
(
*
hash
->
table
));
kfree
(
nl_table
);
goto
enomem
;
}
memset
(
hash
->
table
,
0
,
1
*
sizeof
(
*
hash
->
table
));
hash
->
max_shift
=
order
;
hash
->
shift
=
0
;
hash
->
mask
=
0
;
hash
->
rehash_time
=
jiffies
;
}
sock_register
(
&
netlink_family_ops
);
#ifdef CONFIG_PROC_FS
proc_net_fops_create
(
"netlink"
,
0
,
&
netlink_seq_fops
);
...
...
@@ -1231,6 +1494,8 @@ static void __exit netlink_proto_exit(void)
{
sock_unregister
(
PF_NETLINK
);
proc_net_remove
(
"netlink"
);
kfree
(
nl_table
);
nl_table
=
NULL
;
}
core_initcall
(
netlink_proto_init
);
...
...
net/sched/act_api.c
View file @
276ca536
...
...
@@ -416,14 +416,37 @@ int tcf_action_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
int
tcf_action_copy_stats
(
struct
sk_buff
*
skb
,
struct
tc_action
*
a
)
{
struct
gnet_dump
d
;
struct
tcf_act_hdr
*
h
=
a
->
priv
;
#ifdef CONFIG_KMOD
/* place holder */
#endif
if
(
NULL
==
a
->
ops
||
NULL
==
a
->
ops
->
get_stats
)
return
1
;
if
(
NULL
==
h
)
goto
errout
;
if
(
gnet_stats_start_copy
(
skb
,
TCA_ACT_STATS
,
h
->
stats_lock
,
&
d
)
<
0
)
goto
errout
;
if
(
NULL
!=
a
->
ops
&&
NULL
!=
a
->
ops
->
get_stats
)
if
(
a
->
ops
->
get_stats
(
skb
,
a
)
<
0
)
goto
errout
;
if
(
gnet_stats_copy_basic
(
&
d
,
&
h
->
bstats
)
<
0
||
#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est
(
&
d
,
&
h
->
rate_est
)
<
0
||
#endif
gnet_stats_copy_queue
(
&
d
,
&
h
->
qstats
)
<
0
)
goto
errout
;
return
a
->
ops
->
get_stats
(
skb
,
a
);
if
(
gnet_stats_finish_copy
(
&
d
)
<
0
)
goto
errout
;
return
0
;
errout:
return
-
1
;
}
...
...
net/sched/cls_fw.c
View file @
276ca536
...
...
@@ -395,11 +395,9 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
}
#else
/* CONFIG_NET_CLS_ACT */
#ifdef CONFIG_NET_CLS_POLICE
if
(
f
->
police
)
{
if
(
qdisc_copy_stats
(
skb
,
&
f
->
police
->
stats
,
f
->
police
->
stats_lock
))
if
(
f
->
police
)
if
(
tcf_police_dump_stats
(
skb
,
f
->
police
)
<
0
)
goto
rtattr_failure
;
}
#endif
/* CONFIG_NET_CLS_POLICE */
#endif
/* CONFIG_NET_CLS_ACT */
return
skb
->
len
;
...
...
net/sched/cls_route.c
View file @
276ca536
...
...
@@ -566,11 +566,9 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
rta
->
rta_len
=
skb
->
tail
-
b
;
#ifdef CONFIG_NET_CLS_POLICE
if
(
f
->
police
)
{
if
(
qdisc_copy_stats
(
skb
,
&
f
->
police
->
stats
,
f
->
police
->
stats_lock
))
if
(
f
->
police
)
if
(
tcf_police_dump_stats
(
skb
,
f
->
police
)
<
0
)
goto
rtattr_failure
;
}
#endif
return
skb
->
len
;
...
...
net/sched/cls_rsvp.h
View file @
276ca536
...
...
@@ -631,11 +631,9 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
rta
->
rta_len
=
skb
->
tail
-
b
;
#ifdef CONFIG_NET_CLS_POLICE
if
(
f
->
police
)
{
if
(
qdisc_copy_stats
(
skb
,
&
f
->
police
->
stats
,
f
->
police
->
stats_lock
))
if
(
f
->
police
)
if
(
tcf_police_dump_stats
(
skb
,
f
->
police
)
<
0
)
goto
rtattr_failure
;
}
#endif
return
skb
->
len
;
...
...
net/sched/cls_u32.c
View file @
276ca536
...
...
@@ -775,11 +775,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
}
#else
#ifdef CONFIG_NET_CLS_POLICE
if
(
TC_U32_KEY
(
n
->
handle
)
&&
n
->
police
)
{
if
(
qdisc_copy_stats
(
skb
,
&
n
->
police
->
stats
,
n
->
police
->
stats_lock
))
if
(
TC_U32_KEY
(
n
->
handle
)
&&
n
->
police
)
if
(
tcf_police_dump_stats
(
skb
,
n
->
police
)
<
0
)
goto
rtattr_failure
;
}
#endif
#endif
return
skb
->
len
;
...
...
net/sched/gact.c
View file @
276ca536
...
...
@@ -62,7 +62,7 @@ gact_net_rand(struct tcf_gact *p) {
int
gact_determ
(
struct
tcf_gact
*
p
)
{
if
(
p
->
stats
.
packets
%
p
->
pval
)
if
(
p
->
b
stats
.
packets
%
p
->
pval
)
return
p
->
action
;
return
p
->
paction
;
}
...
...
@@ -163,10 +163,10 @@ tcf_gact(struct sk_buff **pskb, struct tc_action *a)
#else
action
=
p
->
action
;
#endif
p
->
stats
.
bytes
+=
skb
->
len
;
p
->
stats
.
packets
++
;
p
->
b
stats
.
bytes
+=
skb
->
len
;
p
->
b
stats
.
packets
++
;
if
(
TC_ACT_SHOT
==
action
)
p
->
stats
.
drops
++
;
p
->
q
stats
.
drops
++
;
p
->
tm
.
lastuse
=
jiffies
;
spin_unlock
(
&
p
->
lock
);
...
...
@@ -214,17 +214,6 @@ tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return
-
1
;
}
int
tcf_gact_stats
(
struct
sk_buff
*
skb
,
struct
tc_action
*
a
)
{
struct
tcf_gact
*
p
;
p
=
PRIV
(
a
,
gact
);
if
(
NULL
!=
p
)
return
qdisc_copy_stats
(
skb
,
&
p
->
stats
,
p
->
stats_lock
);
return
1
;
}
struct
tc_action_ops
act_gact_ops
=
{
.
next
=
NULL
,
.
kind
=
"gact"
,
...
...
@@ -232,7 +221,6 @@ struct tc_action_ops act_gact_ops = {
.
capab
=
TCA_CAP_NONE
,
.
owner
=
THIS_MODULE
,
.
act
=
tcf_gact
,
.
get_stats
=
tcf_gact_stats
,
.
dump
=
tcf_gact_dump
,
.
cleanup
=
tcf_gact_cleanup
,
.
lookup
=
tcf_hash_search
,
...
...
net/sched/ipt.c
View file @
276ca536
...
...
@@ -218,9 +218,8 @@ tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, int ov
*/
p
->
tm
.
install
=
jiffies
;
#ifdef CONFIG_NET_ESTIMATOR
if
(
est
)
{
qdisc_new_estimator
(
&
p
->
stats
,
p
->
stats_lock
,
est
);
}
if
(
est
)
gen_new_estimator
(
&
p
->
bstats
,
&
p
->
rate_est
,
p
->
stats_lock
,
est
);
#endif
h
=
tcf_hash
(
p
->
index
);
write_lock_bh
(
&
ipt_lock
);
...
...
@@ -258,8 +257,8 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a)
spin_lock
(
&
p
->
lock
);
p
->
tm
.
lastuse
=
jiffies
;
p
->
stats
.
bytes
+=
skb
->
len
;
p
->
stats
.
packets
++
;
p
->
b
stats
.
bytes
+=
skb
->
len
;
p
->
b
stats
.
packets
++
;
if
(
skb_cloned
(
skb
)
)
{
if
(
pskb_expand_head
(
skb
,
0
,
0
,
GFP_ATOMIC
))
{
...
...
@@ -278,7 +277,7 @@ tcf_ipt(struct sk_buff **pskb, struct tc_action *a)
break
;
case
NF_DROP
:
result
=
TC_ACT_SHOT
;
p
->
stats
.
drops
++
;
p
->
q
stats
.
drops
++
;
break
;
case
IPT_CONTINUE
:
result
=
TC_ACT_PIPE
;
...
...
@@ -346,17 +345,6 @@ tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return
-
1
;
}
int
tcf_ipt_stats
(
struct
sk_buff
*
skb
,
struct
tc_action
*
a
)
{
struct
tcf_ipt
*
p
;
p
=
PRIV
(
a
,
ipt
);
if
(
NULL
!=
p
)
return
qdisc_copy_stats
(
skb
,
&
p
->
stats
,
p
->
stats_lock
);
return
1
;
}
struct
tc_action_ops
act_ipt_ops
=
{
.
next
=
NULL
,
.
kind
=
"ipt"
,
...
...
@@ -364,7 +352,6 @@ struct tc_action_ops act_ipt_ops = {
.
capab
=
TCA_CAP_NONE
,
.
owner
=
THIS_MODULE
,
.
act
=
tcf_ipt
,
.
get_stats
=
tcf_ipt_stats
,
.
dump
=
tcf_ipt_dump
,
.
cleanup
=
tcf_ipt_cleanup
,
.
lookup
=
tcf_hash_search
,
...
...
net/sched/mirred.c
View file @
276ca536
...
...
@@ -195,9 +195,9 @@ tcf_mirred(struct sk_buff **pskb, struct tc_action *a)
bad_mirred:
if
(
NULL
!=
skb2
)
kfree_skb
(
skb2
);
p
->
stats
.
overlimits
++
;
p
->
stats
.
bytes
+=
skb
->
len
;
p
->
stats
.
packets
++
;
p
->
q
stats
.
overlimits
++
;
p
->
b
stats
.
bytes
+=
skb
->
len
;
p
->
b
stats
.
packets
++
;
spin_unlock
(
&
p
->
lock
);
/* should we be asking for packet to be dropped?
* may make sense for redirect case only
...
...
@@ -216,8 +216,8 @@ tcf_mirred(struct sk_buff **pskb, struct tc_action *a)
goto
bad_mirred
;
}
p
->
stats
.
bytes
+=
skb2
->
len
;
p
->
stats
.
packets
++
;
p
->
b
stats
.
bytes
+=
skb2
->
len
;
p
->
b
stats
.
packets
++
;
if
(
!
(
at
&
AT_EGRESS
))
{
if
(
p
->
ok_push
)
{
skb_push
(
skb2
,
skb2
->
dev
->
hard_header_len
);
...
...
@@ -268,18 +268,6 @@ tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
return
-
1
;
}
int
tcf_mirred_stats
(
struct
sk_buff
*
skb
,
struct
tc_action
*
a
)
{
struct
tcf_mirred
*
p
;
p
=
PRIV
(
a
,
mirred
);
if
(
NULL
!=
p
)
return
qdisc_copy_stats
(
skb
,
&
p
->
stats
,
p
->
stats_lock
);
return
1
;
}
static
struct
tc_action_ops
act_mirred_ops
=
{
.
next
=
NULL
,
.
kind
=
"mirred"
,
...
...
@@ -287,7 +275,6 @@ static struct tc_action_ops act_mirred_ops = {
.
capab
=
TCA_CAP_NONE
,
.
owner
=
THIS_MODULE
,
.
act
=
tcf_mirred
,
.
get_stats
=
tcf_mirred_stats
,
.
dump
=
tcf_mirred_dump
,
.
cleanup
=
tcf_mirred_cleanup
,
.
lookup
=
tcf_hash_search
,
...
...
net/sched/pedit.c
View file @
276ca536
...
...
@@ -183,10 +183,10 @@ tcf_pedit(struct sk_buff **pskb, struct tc_action *a)
}
bad:
p
->
stats
.
overlimits
++
;
p
->
q
stats
.
overlimits
++
;
done:
p
->
stats
.
bytes
+=
skb
->
len
;
p
->
stats
.
packets
++
;
p
->
b
stats
.
bytes
+=
skb
->
len
;
p
->
b
stats
.
packets
++
;
spin_unlock
(
&
p
->
lock
);
return
p
->
action
;
}
...
...
@@ -255,17 +255,6 @@ tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
return
-
1
;
}
int
tcf_pedit_stats
(
struct
sk_buff
*
skb
,
struct
tc_action
*
a
)
{
struct
tcf_pedit
*
p
;
p
=
PRIV
(
a
,
pedit
);
if
(
NULL
!=
p
)
return
qdisc_copy_stats
(
skb
,
&
p
->
stats
,
p
->
stats_lock
);
return
1
;
}
static
struct
tc_action_ops
act_pedit_ops
=
{
.
kind
=
"pedit"
,
...
...
@@ -273,7 +262,6 @@ struct tc_action_ops act_pedit_ops = {
.
capab
=
TCA_CAP_NONE
,
.
owner
=
THIS_MODULE
,
.
act
=
tcf_pedit
,
.
get_stats
=
tcf_pedit_stats
,
.
dump
=
tcf_pedit_dump
,
.
cleanup
=
tcf_pedit_cleanup
,
.
lookup
=
tcf_hash_search
,
...
...
net/sched/police.c
View file @
276ca536
...
...
@@ -149,7 +149,7 @@ void tcf_police_destroy(struct tcf_police *p)
*
p1p
=
p
->
next
;
write_unlock_bh
(
&
police_lock
);
#ifdef CONFIG_NET_ESTIMATOR
qdisc_kill_estimator
(
&
p
->
stats
);
gen_kill_estimator
(
&
p
->
bstats
,
&
p
->
rate_est
);
#endif
if
(
p
->
R_tab
)
qdisc_put_rtab
(
p
->
R_tab
);
...
...
@@ -245,7 +245,7 @@ int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,struct tc_actio
p
->
index
=
parm
->
index
?
:
tcf_police_new_index
();
#ifdef CONFIG_NET_ESTIMATOR
if
(
est
)
qdisc_new_estimator
(
&
p
->
stats
,
p
->
stats_lock
,
est
);
gen_new_estimator
(
&
p
->
bstats
,
&
p
->
rate_est
,
p
->
stats_lock
,
est
);
#endif
h
=
tcf_police_hash
(
p
->
index
);
write_lock_bh
(
&
police_lock
);
...
...
@@ -275,16 +275,6 @@ int tcf_act_police_cleanup(struct tc_action *a, int bind)
return
0
;
}
int
tcf_act_police_stats
(
struct
sk_buff
*
skb
,
struct
tc_action
*
a
)
{
struct
tcf_police
*
p
;
p
=
PRIV
(
a
);
if
(
NULL
!=
p
)
return
qdisc_copy_stats
(
skb
,
&
p
->
stats
,
p
->
stats_lock
);
return
1
;
}
int
tcf_act_police
(
struct
sk_buff
**
pskb
,
struct
tc_action
*
a
)
{
psched_time_t
now
;
...
...
@@ -302,12 +292,12 @@ int tcf_act_police(struct sk_buff **pskb, struct tc_action *a)
spin_lock
(
&
p
->
lock
);
p
->
stats
.
bytes
+=
skb
->
len
;
p
->
stats
.
packets
++
;
p
->
b
stats
.
bytes
+=
skb
->
len
;
p
->
b
stats
.
packets
++
;
#ifdef CONFIG_NET_ESTIMATOR
if
(
p
->
ewma_rate
&&
p
->
stats
.
bps
>=
p
->
ewma_rate
)
{
p
->
stats
.
overlimits
++
;
if
(
p
->
ewma_rate
&&
p
->
rate_est
.
bps
>=
p
->
ewma_rate
)
{
p
->
q
stats
.
overlimits
++
;
spin_unlock
(
&
p
->
lock
);
return
p
->
action
;
}
...
...
@@ -343,7 +333,7 @@ int tcf_act_police(struct sk_buff **pskb, struct tc_action *a)
}
}
p
->
stats
.
overlimits
++
;
p
->
q
stats
.
overlimits
++
;
spin_unlock
(
&
p
->
lock
);
return
p
->
action
;
}
...
...
@@ -400,7 +390,6 @@ static struct tc_action_ops act_police_ops = {
.
capab
=
TCA_CAP_NONE
,
.
owner
=
THIS_MODULE
,
.
act
=
tcf_act_police
,
.
get_stats
=
tcf_act_police_stats
,
.
dump
=
tcf_act_police_dump
,
.
cleanup
=
tcf_act_police_cleanup
,
.
lookup
=
tcf_hash_search
,
...
...
@@ -480,7 +469,7 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
p
->
action
=
parm
->
action
;
#ifdef CONFIG_NET_ESTIMATOR
if
(
est
)
qdisc_new_estimator
(
&
p
->
stats
,
p
->
stats_lock
,
est
);
gen_new_estimator
(
&
p
->
bstats
,
&
p
->
rate_est
,
p
->
stats_lock
,
est
);
#endif
h
=
tcf_police_hash
(
p
->
index
);
write_lock_bh
(
&
police_lock
);
...
...
@@ -504,12 +493,12 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
spin_lock
(
&
p
->
lock
);
p
->
stats
.
bytes
+=
skb
->
len
;
p
->
stats
.
packets
++
;
p
->
b
stats
.
bytes
+=
skb
->
len
;
p
->
b
stats
.
packets
++
;
#ifdef CONFIG_NET_ESTIMATOR
if
(
p
->
ewma_rate
&&
p
->
stats
.
bps
>=
p
->
ewma_rate
)
{
p
->
stats
.
overlimits
++
;
if
(
p
->
ewma_rate
&&
p
->
rate_est
.
bps
>=
p
->
ewma_rate
)
{
p
->
q
stats
.
overlimits
++
;
spin_unlock
(
&
p
->
lock
);
return
p
->
action
;
}
...
...
@@ -545,7 +534,7 @@ int tcf_police(struct sk_buff *skb, struct tcf_police *p)
}
}
p
->
stats
.
overlimits
++
;
p
->
q
stats
.
overlimits
++
;
spin_unlock
(
&
p
->
lock
);
return
p
->
action
;
}
...
...
@@ -581,9 +570,34 @@ int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
return
-
1
;
}
int
tcf_police_dump_stats
(
struct
sk_buff
*
skb
,
struct
tcf_police
*
p
)
{
struct
gnet_dump
d
;
if
(
gnet_stats_start_copy_compat
(
skb
,
TCA_STATS2
,
TCA_STATS
,
TCA_XSTATS
,
p
->
stats_lock
,
&
d
)
<
0
)
if
(
gnet_stats_copy_basic
(
&
d
,
&
p
->
bstats
)
<
0
||
#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est
(
&
d
,
&
p
->
rate_est
)
<
0
||
#endif
gnet_stats_copy_queue
(
&
d
,
&
p
->
qstats
)
<
0
)
goto
errout
;
if
(
gnet_stats_finish_copy
(
&
d
)
<
0
)
goto
errout
;
return
0
;
errout:
return
-
1
;
}
EXPORT_SYMBOL
(
tcf_police
);
EXPORT_SYMBOL
(
tcf_police_destroy
);
EXPORT_SYMBOL
(
tcf_police_dump
);
EXPORT_SYMBOL
(
tcf_police_dump_stats
);
EXPORT_SYMBOL
(
tcf_police_hash
);
EXPORT_SYMBOL
(
tcf_police_ht
);
EXPORT_SYMBOL
(
tcf_police_locate
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment