Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
bf47f4b0
Commit
bf47f4b0
authored
May 12, 2010
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/kaber/ipmr-2.6
parents
0b9715e6
5b285cac
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
703 additions
and
298 deletions
+703
-298
include/linux/ipv6.h
include/linux/ipv6.h
+1
-0
include/linux/mroute6.h
include/linux/mroute6.h
+12
-23
include/linux/rtnetlink.h
include/linux/rtnetlink.h
+2
-1
include/net/netns/ipv6.h
include/net/netns/ipv6.h
+5
-9
net/ipv6/Kconfig
net/ipv6/Kconfig
+14
-0
net/ipv6/ip6_output.c
net/ipv6/ip6_output.c
+1
-1
net/ipv6/ip6mr.c
net/ipv6/ip6mr.c
+668
-264
No files found.
include/linux/ipv6.h
View file @
bf47f4b0
...
...
@@ -383,6 +383,7 @@ struct raw6_sock {
__u32
checksum
;
/* perform checksum */
__u32
offset
;
/* checksum offset */
struct
icmp6_filter
filter
;
__u32
ip6mr_table
;
/* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
struct
ipv6_pinfo
inet6
;
};
...
...
include/linux/mroute6.h
View file @
bf47f4b0
...
...
@@ -24,7 +24,8 @@
#define MRT6_DEL_MFC (MRT6_BASE+5)
/* Delete a multicast forwarding entry */
#define MRT6_VERSION (MRT6_BASE+6)
/* Get the kernel multicast version */
#define MRT6_ASSERT (MRT6_BASE+7)
/* Activate PIM assert mode */
#define MRT6_PIM (MRT6_BASE+8)
/* enable PIM code */
#define MRT6_PIM (MRT6_BASE+8)
/* enable PIM code */
#define MRT6_TABLE (MRT6_BASE+9)
/* Specify mroute table ID */
#define SIOCGETMIFCNT_IN6 SIOCPROTOPRIVATE
/* IP protocol privates */
#define SIOCGETSGCNT_IN6 (SIOCPROTOPRIVATE+1)
...
...
@@ -182,10 +183,7 @@ struct mif_device {
#define VIFF_STATIC 0x8000
struct
mfc6_cache
{
struct
mfc6_cache
*
next
;
/* Next entry on cache line */
#ifdef CONFIG_NET_NS
struct
net
*
mfc6_net
;
#endif
struct
list_head
list
;
struct
in6_addr
mf6c_mcastgrp
;
/* Group the entry belongs to */
struct
in6_addr
mf6c_origin
;
/* Source of packet */
mifi_t
mf6c_parent
;
/* Source interface */
...
...
@@ -208,18 +206,6 @@ struct mfc6_cache {
}
mfc_un
;
};
static
inline
struct
net
*
mfc6_net
(
const
struct
mfc6_cache
*
mfc
)
{
return
read_pnet
(
&
mfc
->
mfc6_net
);
}
static
inline
void
mfc6_net_set
(
struct
mfc6_cache
*
mfc
,
struct
net
*
net
)
{
write_pnet
(
&
mfc
->
mfc6_net
,
hold_net
(
net
));
}
#define MFC_STATIC 1
#define MFC_NOTIFY 2
...
...
@@ -244,14 +230,17 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct
rtmsg
*
rtm
,
int
nowait
);
#ifdef CONFIG_IPV6_MROUTE
static
inline
struct
sock
*
mroute6_socket
(
struct
net
*
net
)
{
return
net
->
ipv6
.
mroute6_sk
;
}
extern
struct
sock
*
mroute6_socket
(
struct
net
*
net
,
struct
sk_buff
*
skb
);
extern
int
ip6mr_sk_done
(
struct
sock
*
sk
);
#else
static
inline
struct
sock
*
mroute6_socket
(
struct
net
*
net
)
{
return
NULL
;
}
static
inline
int
ip6mr_sk_done
(
struct
sock
*
sk
)
{
return
0
;
}
static
inline
struct
sock
*
mroute6_socket
(
struct
net
*
net
,
struct
sk_buff
*
skb
)
{
return
NULL
;
}
static
inline
int
ip6mr_sk_done
(
struct
sock
*
sk
)
{
return
0
;
}
#endif
#endif
...
...
include/linux/rtnetlink.h
View file @
bf47f4b0
...
...
@@ -11,7 +11,8 @@
* families, values above 128 may be used arbitrarily.
*/
#define RTNL_FAMILY_IPMR 128
#define RTNL_FAMILY_MAX 128
#define RTNL_FAMILY_IP6MR 129
#define RTNL_FAMILY_MAX 129
/****
* Routing/neighbour discovery messages.
...
...
include/net/netns/ipv6.h
View file @
bf47f4b0
...
...
@@ -59,15 +59,11 @@ struct netns_ipv6 {
struct
sock
*
tcp_sk
;
struct
sock
*
igmp_sk
;
#ifdef CONFIG_IPV6_MROUTE
struct
sock
*
mroute6_sk
;
struct
mfc6_cache
**
mfc6_cache_array
;
struct
mif_device
*
vif6_table
;
int
maxvif
;
atomic_t
cache_resolve_queue_len
;
int
mroute_do_assert
;
int
mroute_do_pim
;
#ifdef CONFIG_IPV6_PIMSM_V2
int
mroute_reg_vif_num
;
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct
mr6_table
*
mrt6
;
#else
struct
list_head
mr6_tables
;
struct
fib_rules_ops
*
mr6_rules_ops
;
#endif
#endif
};
...
...
net/ipv6/Kconfig
View file @
bf47f4b0
...
...
@@ -229,6 +229,20 @@ config IPV6_MROUTE
Experimental support for IPv6 multicast forwarding.
If unsure, say N.
config IPV6_MROUTE_MULTIPLE_TABLES
bool "IPv6: multicast policy routing"
depends on IPV6_MROUTE
select FIB_RULES
help
Normally, a multicast router runs a userspace daemon and decides
what to do with a multicast packet based on the source and
destination addresses. If you say Y here, the multicast router
will also be able to take interfaces and packet marks into
account and run multiple instances of userspace daemons
simultaneously, each one handling a single table.
If unsure, say N.
config IPV6_PIMSM_V2
bool "IPv6: PIM-SM version 2 support (EXPERIMENTAL)"
depends on IPV6_MROUTE
...
...
net/ipv6/ip6_output.c
View file @
bf47f4b0
...
...
@@ -108,7 +108,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
struct
inet6_dev
*
idev
=
ip6_dst_idev
(
skb_dst
(
skb
));
if
(
!
(
dev
->
flags
&
IFF_LOOPBACK
)
&&
sk_mc_loop
(
skb
->
sk
)
&&
((
mroute6_socket
(
dev_net
(
dev
))
&&
((
mroute6_socket
(
dev_net
(
dev
)
,
skb
)
&&
!
(
IP6CB
(
skb
)
->
flags
&
IP6SKB_FORWARDED
))
||
ipv6_chk_mcast_addr
(
dev
,
&
ipv6_hdr
(
skb
)
->
daddr
,
&
ipv6_hdr
(
skb
)
->
saddr
)))
{
...
...
net/ipv6/ip6mr.c
View file @
bf47f4b0
...
...
@@ -42,6 +42,7 @@
#include <linux/if_arp.h>
#include <net/checksum.h>
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
...
...
@@ -51,6 +52,34 @@
#include <linux/netfilter_ipv6.h>
#include <net/ip6_checksum.h>
struct
mr6_table
{
struct
list_head
list
;
#ifdef CONFIG_NET_NS
struct
net
*
net
;
#endif
u32
id
;
struct
sock
*
mroute6_sk
;
struct
timer_list
ipmr_expire_timer
;
struct
list_head
mfc6_unres_queue
;
struct
list_head
mfc6_cache_array
[
MFC6_LINES
];
struct
mif_device
vif6_table
[
MAXMIFS
];
int
maxvif
;
atomic_t
cache_resolve_queue_len
;
int
mroute_do_assert
;
int
mroute_do_pim
;
#ifdef CONFIG_IPV6_PIMSM_V2
int
mroute_reg_vif_num
;
#endif
};
struct
ip6mr_rule
{
struct
fib_rule
common
;
};
struct
ip6mr_result
{
struct
mr6_table
*
mrt
;
};
/* Big lock, protecting vif table, mrt cache and mroute socket state.
Note that the changes are semaphored via rtnl_lock.
*/
...
...
@@ -61,9 +90,7 @@ static DEFINE_RWLOCK(mrt_lock);
* Multicast router control variables
*/
#define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL)
static
struct
mfc6_cache
*
mfc_unres_queue
;
/* Queue of unresolved entries */
#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
/* Special spinlock for queue of unresolved entries */
static
DEFINE_SPINLOCK
(
mfc_unres_lock
);
...
...
@@ -78,20 +105,233 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static
struct
kmem_cache
*
mrt_cachep
__read_mostly
;
static
int
ip6_mr_forward
(
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
cache
);
static
int
ip6mr_cache_report
(
struct
net
*
net
,
struct
sk_buff
*
pkt
,
static
struct
mr6_table
*
ip6mr_new_table
(
struct
net
*
net
,
u32
id
);
static
void
ip6mr_free_table
(
struct
mr6_table
*
mrt
);
static
int
ip6_mr_forward
(
struct
net
*
net
,
struct
mr6_table
*
mrt
,
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
cache
);
static
int
ip6mr_cache_report
(
struct
mr6_table
*
mrt
,
struct
sk_buff
*
pkt
,
mifi_t
mifi
,
int
assert
);
static
int
ip6mr_fill_mroute
(
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
c
,
struct
rtmsg
*
rtm
);
static
void
mroute_clean_tables
(
struct
net
*
net
);
static
int
__ip6mr_fill_mroute
(
struct
mr6_table
*
mrt
,
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
c
,
struct
rtmsg
*
rtm
);
static
int
ip6mr_rtm_dumproute
(
struct
sk_buff
*
skb
,
struct
netlink_callback
*
cb
);
static
void
mroute_clean_tables
(
struct
mr6_table
*
mrt
);
static
void
ipmr_expire_process
(
unsigned
long
arg
);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, met) \
list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
static
struct
mr6_table
*
ip6mr_get_table
(
struct
net
*
net
,
u32
id
)
{
struct
mr6_table
*
mrt
;
ip6mr_for_each_table
(
mrt
,
net
)
{
if
(
mrt
->
id
==
id
)
return
mrt
;
}
return
NULL
;
}
static
int
ip6mr_fib_lookup
(
struct
net
*
net
,
struct
flowi
*
flp
,
struct
mr6_table
**
mrt
)
{
struct
ip6mr_result
res
;
struct
fib_lookup_arg
arg
=
{
.
result
=
&
res
,
};
int
err
;
err
=
fib_rules_lookup
(
net
->
ipv6
.
mr6_rules_ops
,
flp
,
0
,
&
arg
);
if
(
err
<
0
)
return
err
;
*
mrt
=
res
.
mrt
;
return
0
;
}
static
int
ip6mr_rule_action
(
struct
fib_rule
*
rule
,
struct
flowi
*
flp
,
int
flags
,
struct
fib_lookup_arg
*
arg
)
{
struct
ip6mr_result
*
res
=
arg
->
result
;
struct
mr6_table
*
mrt
;
switch
(
rule
->
action
)
{
case
FR_ACT_TO_TBL
:
break
;
case
FR_ACT_UNREACHABLE
:
return
-
ENETUNREACH
;
case
FR_ACT_PROHIBIT
:
return
-
EACCES
;
case
FR_ACT_BLACKHOLE
:
default:
return
-
EINVAL
;
}
mrt
=
ip6mr_get_table
(
rule
->
fr_net
,
rule
->
table
);
if
(
mrt
==
NULL
)
return
-
EAGAIN
;
res
->
mrt
=
mrt
;
return
0
;
}
static
int
ip6mr_rule_match
(
struct
fib_rule
*
rule
,
struct
flowi
*
flp
,
int
flags
)
{
return
1
;
}
static
const
struct
nla_policy
ip6mr_rule_policy
[
FRA_MAX
+
1
]
=
{
FRA_GENERIC_POLICY
,
};
static
int
ip6mr_rule_configure
(
struct
fib_rule
*
rule
,
struct
sk_buff
*
skb
,
struct
fib_rule_hdr
*
frh
,
struct
nlattr
**
tb
)
{
return
0
;
}
static
int
ip6mr_rule_compare
(
struct
fib_rule
*
rule
,
struct
fib_rule_hdr
*
frh
,
struct
nlattr
**
tb
)
{
return
1
;
}
static
int
ip6mr_rule_fill
(
struct
fib_rule
*
rule
,
struct
sk_buff
*
skb
,
struct
fib_rule_hdr
*
frh
)
{
frh
->
dst_len
=
0
;
frh
->
src_len
=
0
;
frh
->
tos
=
0
;
return
0
;
}
static
const
struct
fib_rules_ops
__net_initdata
ip6mr_rules_ops_template
=
{
.
family
=
RTNL_FAMILY_IP6MR
,
.
rule_size
=
sizeof
(
struct
ip6mr_rule
),
.
addr_size
=
sizeof
(
struct
in6_addr
),
.
action
=
ip6mr_rule_action
,
.
match
=
ip6mr_rule_match
,
.
configure
=
ip6mr_rule_configure
,
.
compare
=
ip6mr_rule_compare
,
.
default_pref
=
fib_default_rule_pref
,
.
fill
=
ip6mr_rule_fill
,
.
nlgroup
=
RTNLGRP_IPV6_RULE
,
.
policy
=
ip6mr_rule_policy
,
.
owner
=
THIS_MODULE
,
};
static
struct
timer_list
ipmr_expire_timer
;
static
int
__net_init
ip6mr_rules_init
(
struct
net
*
net
)
{
struct
fib_rules_ops
*
ops
;
struct
mr6_table
*
mrt
;
int
err
;
ops
=
fib_rules_register
(
&
ip6mr_rules_ops_template
,
net
);
if
(
IS_ERR
(
ops
))
return
PTR_ERR
(
ops
);
INIT_LIST_HEAD
(
&
net
->
ipv6
.
mr6_tables
);
mrt
=
ip6mr_new_table
(
net
,
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
{
err
=
-
ENOMEM
;
goto
err1
;
}
err
=
fib_default_rule_add
(
ops
,
0x7fff
,
RT6_TABLE_DFLT
,
0
);
if
(
err
<
0
)
goto
err2
;
net
->
ipv6
.
mr6_rules_ops
=
ops
;
return
0
;
err2:
kfree
(
mrt
);
err1:
fib_rules_unregister
(
ops
);
return
err
;
}
static
void
__net_exit
ip6mr_rules_exit
(
struct
net
*
net
)
{
struct
mr6_table
*
mrt
,
*
next
;
list_for_each_entry_safe
(
mrt
,
next
,
&
net
->
ipv6
.
mr6_tables
,
list
)
ip6mr_free_table
(
mrt
);
fib_rules_unregister
(
net
->
ipv6
.
mr6_rules_ops
);
}
#else
#define ip6mr_for_each_table(mrt, net) \
for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
static
struct
mr6_table
*
ip6mr_get_table
(
struct
net
*
net
,
u32
id
)
{
return
net
->
ipv6
.
mrt6
;
}
static
int
ip6mr_fib_lookup
(
struct
net
*
net
,
struct
flowi
*
flp
,
struct
mr6_table
**
mrt
)
{
*
mrt
=
net
->
ipv6
.
mrt6
;
return
0
;
}
static
int
__net_init
ip6mr_rules_init
(
struct
net
*
net
)
{
net
->
ipv6
.
mrt6
=
ip6mr_new_table
(
net
,
RT6_TABLE_DFLT
);
return
net
->
ipv6
.
mrt6
?
0
:
-
ENOMEM
;
}
static
void
__net_exit
ip6mr_rules_exit
(
struct
net
*
net
)
{
ip6mr_free_table
(
net
->
ipv6
.
mrt6
);
}
#endif
static
struct
mr6_table
*
ip6mr_new_table
(
struct
net
*
net
,
u32
id
)
{
struct
mr6_table
*
mrt
;
unsigned
int
i
;
mrt
=
ip6mr_get_table
(
net
,
id
);
if
(
mrt
!=
NULL
)
return
mrt
;
mrt
=
kzalloc
(
sizeof
(
*
mrt
),
GFP_KERNEL
);
if
(
mrt
==
NULL
)
return
NULL
;
mrt
->
id
=
id
;
write_pnet
(
&
mrt
->
net
,
net
);
/* Forwarding cache */
for
(
i
=
0
;
i
<
MFC6_LINES
;
i
++
)
INIT_LIST_HEAD
(
&
mrt
->
mfc6_cache_array
[
i
]);
INIT_LIST_HEAD
(
&
mrt
->
mfc6_unres_queue
);
setup_timer
(
&
mrt
->
ipmr_expire_timer
,
ipmr_expire_process
,
(
unsigned
long
)
mrt
);
#ifdef CONFIG_IPV6_PIMSM_V2
mrt
->
mroute_reg_vif_num
=
-
1
;
#endif
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
list_add_tail_rcu
(
&
mrt
->
list
,
&
net
->
ipv6
.
mr6_tables
);
#endif
return
mrt
;
}
static
void
ip6mr_free_table
(
struct
mr6_table
*
mrt
)
{
del_timer
(
&
mrt
->
ipmr_expire_timer
);
mroute_clean_tables
(
mrt
);
kfree
(
mrt
);
}
#ifdef CONFIG_PROC_FS
struct
ipmr_mfc_iter
{
struct
seq_net_private
p
;
struct
mfc6_cache
**
cache
;
struct
mr6_table
*
mrt
;
struct
list_head
*
cache
;
int
ct
;
};
...
...
@@ -99,22 +339,22 @@ struct ipmr_mfc_iter {
static
struct
mfc6_cache
*
ipmr_mfc_seq_idx
(
struct
net
*
net
,
struct
ipmr_mfc_iter
*
it
,
loff_t
pos
)
{
struct
mr6_table
*
mrt
=
it
->
mrt
;
struct
mfc6_cache
*
mfc
;
it
->
cache
=
net
->
ipv6
.
mfc6_cache_array
;
read_lock
(
&
mrt_lock
);
for
(
it
->
ct
=
0
;
it
->
ct
<
MFC6_LINES
;
it
->
ct
++
)
for
(
mfc
=
net
->
ipv6
.
mfc6_cache_array
[
it
->
ct
];
mfc
;
mfc
=
mfc
->
nex
t
)
for
(
it
->
ct
=
0
;
it
->
ct
<
MFC6_LINES
;
it
->
ct
++
)
{
it
->
cache
=
&
mrt
->
mfc6_cache_array
[
it
->
ct
];
list_for_each_entry
(
mfc
,
it
->
cache
,
lis
t
)
if
(
pos
--
==
0
)
return
mfc
;
}
read_unlock
(
&
mrt_lock
);
it
->
cache
=
&
mfc_unres_queue
;
spin_lock_bh
(
&
mfc_unres_lock
);
for
(
mfc
=
mfc_unres_queue
;
mfc
;
mfc
=
mfc
->
next
)
if
(
net_eq
(
mfc6_net
(
mfc
),
net
)
&&
pos
--
==
0
)
it
->
cache
=
&
mrt
->
mfc6_unres_queue
;
list_for_each_entry
(
mfc
,
it
->
cache
,
list
)
if
(
pos
--
==
0
)
return
mfc
;
spin_unlock_bh
(
&
mfc_unres_lock
);
...
...
@@ -122,15 +362,13 @@ static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
return
NULL
;
}
/*
* The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
*/
struct
ipmr_vif_iter
{
struct
seq_net_private
p
;
struct
mr6_table
*
mrt
;
int
ct
;
};
...
...
@@ -138,11 +376,13 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
struct
ipmr_vif_iter
*
iter
,
loff_t
pos
)
{
for
(
iter
->
ct
=
0
;
iter
->
ct
<
net
->
ipv6
.
maxvif
;
++
iter
->
ct
)
{
if
(
!
MIF_EXISTS
(
net
,
iter
->
ct
))
struct
mr6_table
*
mrt
=
iter
->
mrt
;
for
(
iter
->
ct
=
0
;
iter
->
ct
<
mrt
->
maxvif
;
++
iter
->
ct
)
{
if
(
!
MIF_EXISTS
(
mrt
,
iter
->
ct
))
continue
;
if
(
pos
--
==
0
)
return
&
net
->
ipv6
.
vif6_table
[
iter
->
ct
];
return
&
mrt
->
vif6_table
[
iter
->
ct
];
}
return
NULL
;
}
...
...
@@ -150,7 +390,15 @@ static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
static
void
*
ip6mr_vif_seq_start
(
struct
seq_file
*
seq
,
loff_t
*
pos
)
__acquires
(
mrt_lock
)
{
struct
ipmr_vif_iter
*
iter
=
seq
->
private
;
struct
net
*
net
=
seq_file_net
(
seq
);
struct
mr6_table
*
mrt
;
mrt
=
ip6mr_get_table
(
net
,
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
return
ERR_PTR
(
-
ENOENT
);
iter
->
mrt
=
mrt
;
read_lock
(
&
mrt_lock
);
return
*
pos
?
ip6mr_vif_seq_idx
(
net
,
seq
->
private
,
*
pos
-
1
)
...
...
@@ -161,15 +409,16 @@ static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct
ipmr_vif_iter
*
iter
=
seq
->
private
;
struct
net
*
net
=
seq_file_net
(
seq
);
struct
mr6_table
*
mrt
=
iter
->
mrt
;
++*
pos
;
if
(
v
==
SEQ_START_TOKEN
)
return
ip6mr_vif_seq_idx
(
net
,
iter
,
0
);
while
(
++
iter
->
ct
<
net
->
ipv6
.
maxvif
)
{
if
(
!
MIF_EXISTS
(
ne
t
,
iter
->
ct
))
while
(
++
iter
->
ct
<
mrt
->
maxvif
)
{
if
(
!
MIF_EXISTS
(
mr
t
,
iter
->
ct
))
continue
;
return
&
net
->
ipv6
.
vif6_table
[
iter
->
ct
];
return
&
mrt
->
vif6_table
[
iter
->
ct
];
}
return
NULL
;
}
...
...
@@ -182,7 +431,8 @@ static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
static
int
ip6mr_vif_seq_show
(
struct
seq_file
*
seq
,
void
*
v
)
{
struct
net
*
net
=
seq_file_net
(
seq
);
struct
ipmr_vif_iter
*
iter
=
seq
->
private
;
struct
mr6_table
*
mrt
=
iter
->
mrt
;
if
(
v
==
SEQ_START_TOKEN
)
{
seq_puts
(
seq
,
...
...
@@ -193,7 +443,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
seq_printf
(
seq
,
"%2td %-10s %8ld %7ld %8ld %7ld %05X
\n
"
,
vif
-
net
->
ipv6
.
vif6_table
,
vif
-
mrt
->
vif6_table
,
name
,
vif
->
bytes_in
,
vif
->
pkt_in
,
vif
->
bytes_out
,
vif
->
pkt_out
,
vif
->
flags
);
...
...
@@ -224,8 +474,15 @@ static const struct file_operations ip6mr_vif_fops = {
static
void
*
ipmr_mfc_seq_start
(
struct
seq_file
*
seq
,
loff_t
*
pos
)
{
struct
ipmr_mfc_iter
*
it
=
seq
->
private
;
struct
net
*
net
=
seq_file_net
(
seq
);
struct
mr6_table
*
mrt
;
mrt
=
ip6mr_get_table
(
net
,
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
return
ERR_PTR
(
-
ENOENT
);
it
->
mrt
=
mrt
;
return
*
pos
?
ipmr_mfc_seq_idx
(
net
,
seq
->
private
,
*
pos
-
1
)
:
SEQ_START_TOKEN
;
}
...
...
@@ -235,35 +492,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct
mfc6_cache
*
mfc
=
v
;
struct
ipmr_mfc_iter
*
it
=
seq
->
private
;
struct
net
*
net
=
seq_file_net
(
seq
);
struct
mr6_table
*
mrt
=
it
->
mrt
;
++*
pos
;
if
(
v
==
SEQ_START_TOKEN
)
return
ipmr_mfc_seq_idx
(
net
,
seq
->
private
,
0
);
if
(
mfc
->
next
)
return
mfc
->
next
;
if
(
mfc
->
list
.
next
!=
it
->
cache
)
return
list_entry
(
mfc
->
list
.
next
,
struct
mfc6_cache
,
list
)
;
if
(
it
->
cache
==
&
m
fc
_unres_queue
)
if
(
it
->
cache
==
&
m
rt
->
mfc6
_unres_queue
)
goto
end_of_list
;
BUG_ON
(
it
->
cache
!=
net
->
ipv6
.
mfc6_cache_array
);
BUG_ON
(
it
->
cache
!=
&
mrt
->
mfc6_cache_array
[
it
->
ct
]
);
while
(
++
it
->
ct
<
MFC6_LINES
)
{
mfc
=
net
->
ipv6
.
mfc6_cache_array
[
it
->
ct
];
if
(
mfc
)
return
mfc
;
it
->
cache
=
&
mrt
->
mfc6_cache_array
[
it
->
ct
];
if
(
list_empty
(
it
->
cache
))
continue
;
return
list_first_entry
(
it
->
cache
,
struct
mfc6_cache
,
list
);
}
/* exhausted cache_array, show unresolved */
read_unlock
(
&
mrt_lock
);
it
->
cache
=
&
m
fc
_unres_queue
;
it
->
cache
=
&
m
rt
->
mfc6
_unres_queue
;
it
->
ct
=
0
;
spin_lock_bh
(
&
mfc_unres_lock
);
mfc
=
mfc_unres_queue
;
if
(
mfc
)
return
mfc
;
if
(
!
list_empty
(
it
->
cache
))
return
list_first_entry
(
it
->
cache
,
struct
mfc6_cache
,
list
);
end_of_list:
spin_unlock_bh
(
&
mfc_unres_lock
);
...
...
@@ -275,18 +533,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static
void
ipmr_mfc_seq_stop
(
struct
seq_file
*
seq
,
void
*
v
)
{
struct
ipmr_mfc_iter
*
it
=
seq
->
private
;
struct
net
*
net
=
seq_file_net
(
seq
)
;
struct
mr6_table
*
mrt
=
it
->
mrt
;
if
(
it
->
cache
==
&
m
fc
_unres_queue
)
if
(
it
->
cache
==
&
m
rt
->
mfc6
_unres_queue
)
spin_unlock_bh
(
&
mfc_unres_lock
);
else
if
(
it
->
cache
==
net
->
ipv6
.
mfc6_cache_array
)
else
if
(
it
->
cache
==
mrt
->
mfc6_cache_array
)
read_unlock
(
&
mrt_lock
);
}
static
int
ipmr_mfc_seq_show
(
struct
seq_file
*
seq
,
void
*
v
)
{
int
n
;
struct
net
*
net
=
seq_file_net
(
seq
);
if
(
v
==
SEQ_START_TOKEN
)
{
seq_puts
(
seq
,
...
...
@@ -296,19 +553,20 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
}
else
{
const
struct
mfc6_cache
*
mfc
=
v
;
const
struct
ipmr_mfc_iter
*
it
=
seq
->
private
;
struct
mr6_table
*
mrt
=
it
->
mrt
;
seq_printf
(
seq
,
"%pI6 %pI6 %-3hd"
,
&
mfc
->
mf6c_mcastgrp
,
&
mfc
->
mf6c_origin
,
mfc
->
mf6c_parent
);
if
(
it
->
cache
!=
&
m
fc
_unres_queue
)
{
if
(
it
->
cache
!=
&
m
rt
->
mfc6
_unres_queue
)
{
seq_printf
(
seq
,
" %8lu %8lu %8lu"
,
mfc
->
mfc_un
.
res
.
pkt
,
mfc
->
mfc_un
.
res
.
bytes
,
mfc
->
mfc_un
.
res
.
wrong_if
);
for
(
n
=
mfc
->
mfc_un
.
res
.
minvif
;
n
<
mfc
->
mfc_un
.
res
.
maxvif
;
n
++
)
{
if
(
MIF_EXISTS
(
ne
t
,
n
)
&&
if
(
MIF_EXISTS
(
mr
t
,
n
)
&&
mfc
->
mfc_un
.
res
.
ttls
[
n
]
<
255
)
seq_printf
(
seq
,
" %2d:%-3d"
,
...
...
@@ -355,7 +613,12 @@ static int pim6_rcv(struct sk_buff *skb)
struct
ipv6hdr
*
encap
;
struct
net_device
*
reg_dev
=
NULL
;
struct
net
*
net
=
dev_net
(
skb
->
dev
);
int
reg_vif_num
=
net
->
ipv6
.
mroute_reg_vif_num
;
struct
mr6_table
*
mrt
;
struct
flowi
fl
=
{
.
iif
=
skb
->
dev
->
ifindex
,
.
mark
=
skb
->
mark
,
};
int
reg_vif_num
;
if
(
!
pskb_may_pull
(
skb
,
sizeof
(
*
pim
)
+
sizeof
(
*
encap
)))
goto
drop
;
...
...
@@ -378,9 +641,13 @@ static int pim6_rcv(struct sk_buff *skb)
ntohs
(
encap
->
payload_len
)
+
sizeof
(
*
pim
)
>
skb
->
len
)
goto
drop
;
if
(
ip6mr_fib_lookup
(
net
,
&
fl
,
&
mrt
)
<
0
)
goto
drop
;
reg_vif_num
=
mrt
->
mroute_reg_vif_num
;
read_lock
(
&
mrt_lock
);
if
(
reg_vif_num
>=
0
)
reg_dev
=
net
->
ipv6
.
vif6_table
[
reg_vif_num
].
dev
;
reg_dev
=
mrt
->
vif6_table
[
reg_vif_num
].
dev
;
if
(
reg_dev
)
dev_hold
(
reg_dev
);
read_unlock
(
&
mrt_lock
);
...
...
@@ -417,12 +684,22 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
struct
net_device
*
dev
)
{
struct
net
*
net
=
dev_net
(
dev
);
struct
mr6_table
*
mrt
;
struct
flowi
fl
=
{
.
oif
=
dev
->
ifindex
,
.
iif
=
skb
->
skb_iif
,
.
mark
=
skb
->
mark
,
};
int
err
;
err
=
ip6mr_fib_lookup
(
net
,
&
fl
,
&
mrt
);
if
(
err
<
0
)
return
err
;
read_lock
(
&
mrt_lock
);
dev
->
stats
.
tx_bytes
+=
skb
->
len
;
dev
->
stats
.
tx_packets
++
;
ip6mr_cache_report
(
net
,
skb
,
net
->
ipv6
.
mroute_reg_vif_num
,
MRT6MSG_WHOLEPKT
);
ip6mr_cache_report
(
mrt
,
skb
,
mrt
->
mroute_reg_vif_num
,
MRT6MSG_WHOLEPKT
);
read_unlock
(
&
mrt_lock
);
kfree_skb
(
skb
);
return
NETDEV_TX_OK
;
...
...
@@ -442,11 +719,17 @@ static void reg_vif_setup(struct net_device *dev)
dev
->
features
|=
NETIF_F_NETNS_LOCAL
;
}
static
struct
net_device
*
ip6mr_reg_vif
(
struct
net
*
net
)
static
struct
net_device
*
ip6mr_reg_vif
(
struct
net
*
net
,
struct
mr6_table
*
mrt
)
{
struct
net_device
*
dev
;
char
name
[
IFNAMSIZ
];
dev
=
alloc_netdev
(
0
,
"pim6reg"
,
reg_vif_setup
);
if
(
mrt
->
id
==
RT6_TABLE_DFLT
)
sprintf
(
name
,
"pim6reg"
);
else
sprintf
(
name
,
"pim6reg%u"
,
mrt
->
id
);
dev
=
alloc_netdev
(
0
,
name
,
reg_vif_setup
);
if
(
dev
==
NULL
)
return
NULL
;
...
...
@@ -478,15 +761,16 @@ static struct net_device *ip6mr_reg_vif(struct net *net)
* Delete a VIF entry
*/
static
int
mif6_delete
(
struct
net
*
ne
t
,
int
vifi
,
struct
list_head
*
head
)
static
int
mif6_delete
(
struct
mr6_table
*
mr
t
,
int
vifi
,
struct
list_head
*
head
)
{
struct
mif_device
*
v
;
struct
net_device
*
dev
;
struct
inet6_dev
*
in6_dev
;
if
(
vifi
<
0
||
vifi
>=
net
->
ipv6
.
maxvif
)
if
(
vifi
<
0
||
vifi
>=
mrt
->
maxvif
)
return
-
EADDRNOTAVAIL
;
v
=
&
net
->
ipv6
.
vif6_table
[
vifi
];
v
=
&
mrt
->
vif6_table
[
vifi
];
write_lock_bh
(
&
mrt_lock
);
dev
=
v
->
dev
;
...
...
@@ -498,17 +782,17 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head)
}
#ifdef CONFIG_IPV6_PIMSM_V2
if
(
vifi
==
net
->
ipv6
.
mroute_reg_vif_num
)
net
->
ipv6
.
mroute_reg_vif_num
=
-
1
;
if
(
vifi
==
mrt
->
mroute_reg_vif_num
)
mrt
->
mroute_reg_vif_num
=
-
1
;
#endif
if
(
vifi
+
1
==
net
->
ipv6
.
maxvif
)
{
if
(
vifi
+
1
==
mrt
->
maxvif
)
{
int
tmp
;
for
(
tmp
=
vifi
-
1
;
tmp
>=
0
;
tmp
--
)
{
if
(
MIF_EXISTS
(
ne
t
,
tmp
))
if
(
MIF_EXISTS
(
mr
t
,
tmp
))
break
;
}
net
->
ipv6
.
maxvif
=
tmp
+
1
;
mrt
->
maxvif
=
tmp
+
1
;
}
write_unlock_bh
(
&
mrt_lock
);
...
...
@@ -528,7 +812,6 @@ static int mif6_delete(struct net *net, int vifi, struct list_head *head)
static
inline
void
ip6mr_cache_free
(
struct
mfc6_cache
*
c
)
{
release_net
(
mfc6_net
(
c
));
kmem_cache_free
(
mrt_cachep
,
c
);
}
...
...
@@ -536,12 +819,12 @@ static inline void ip6mr_cache_free(struct mfc6_cache *c)
and reporting error to netlink readers.
*/
static
void
ip6mr_destroy_unres
(
struct
mfc6_cache
*
c
)
static
void
ip6mr_destroy_unres
(
struct
m
r6_table
*
mrt
,
struct
m
fc6_cache
*
c
)
{
struct
net
*
net
=
read_pnet
(
&
mrt
->
net
);
struct
sk_buff
*
skb
;
struct
net
*
net
=
mfc6_net
(
c
);
atomic_dec
(
&
net
->
ipv6
.
cache_resolve_queue_len
);
atomic_dec
(
&
mrt
->
cache_resolve_queue_len
);
while
((
skb
=
skb_dequeue
(
&
c
->
mfc_un
.
unres
.
unresolved
))
!=
NULL
)
{
if
(
ipv6_hdr
(
skb
)
->
version
==
0
)
{
...
...
@@ -559,60 +842,59 @@ static void ip6mr_destroy_unres(struct mfc6_cache *c)
}
/*
Single t
imer process for all the unresolved queue. */
/*
T
imer process for all the unresolved queue. */
static
void
ipmr_do_expire_process
(
unsigned
long
dummy
)
static
void
ipmr_do_expire_process
(
struct
mr6_table
*
mrt
)
{
unsigned
long
now
=
jiffies
;
unsigned
long
expires
=
10
*
HZ
;
struct
mfc6_cache
*
c
,
*
*
cp
;
struct
mfc6_cache
*
c
,
*
next
;
cp
=
&
mfc_unres_queue
;
while
((
c
=
*
cp
)
!=
NULL
)
{
list_for_each_entry_safe
(
c
,
next
,
&
mrt
->
mfc6_unres_queue
,
list
)
{
if
(
time_after
(
c
->
mfc_un
.
unres
.
expires
,
now
))
{
/* not yet... */
unsigned
long
interval
=
c
->
mfc_un
.
unres
.
expires
-
now
;
if
(
interval
<
expires
)
expires
=
interval
;
cp
=
&
c
->
next
;
continue
;
}
*
cp
=
c
->
next
;
ip6mr_destroy_unres
(
c
);
list_del
(
&
c
->
list
)
;
ip6mr_destroy_unres
(
mrt
,
c
);
}
if
(
mfc_unres_queue
!=
NULL
)
mod_timer
(
&
ipmr_expire_timer
,
jiffies
+
expires
);
if
(
!
list_empty
(
&
mrt
->
mfc6_unres_queue
)
)
mod_timer
(
&
mrt
->
ipmr_expire_timer
,
jiffies
+
expires
);
}
static
void
ipmr_expire_process
(
unsigned
long
dummy
)
static
void
ipmr_expire_process
(
unsigned
long
arg
)
{
struct
mr6_table
*
mrt
=
(
struct
mr6_table
*
)
arg
;
if
(
!
spin_trylock
(
&
mfc_unres_lock
))
{
mod_timer
(
&
ipmr_expire_timer
,
jiffies
+
1
);
mod_timer
(
&
mrt
->
ipmr_expire_timer
,
jiffies
+
1
);
return
;
}
if
(
mfc_unres_queue
!=
NULL
)
ipmr_do_expire_process
(
dummy
);
if
(
!
list_empty
(
&
mrt
->
mfc6_unres_queue
)
)
ipmr_do_expire_process
(
mrt
);
spin_unlock
(
&
mfc_unres_lock
);
}
/* Fill oifs list. It is called under write locked mrt_lock. */
static
void
ip6mr_update_thresholds
(
struct
mfc6_cache
*
cache
,
unsigned
char
*
ttls
)
static
void
ip6mr_update_thresholds
(
struct
mr6_table
*
mrt
,
struct
mfc6_cache
*
cache
,
unsigned
char
*
ttls
)
{
int
vifi
;
struct
net
*
net
=
mfc6_net
(
cache
);
cache
->
mfc_un
.
res
.
minvif
=
MAXMIFS
;
cache
->
mfc_un
.
res
.
maxvif
=
0
;
memset
(
cache
->
mfc_un
.
res
.
ttls
,
255
,
MAXMIFS
);
for
(
vifi
=
0
;
vifi
<
net
->
ipv6
.
maxvif
;
vifi
++
)
{
if
(
MIF_EXISTS
(
ne
t
,
vifi
)
&&
for
(
vifi
=
0
;
vifi
<
mrt
->
maxvif
;
vifi
++
)
{
if
(
MIF_EXISTS
(
mr
t
,
vifi
)
&&
ttls
[
vifi
]
&&
ttls
[
vifi
]
<
255
)
{
cache
->
mfc_un
.
res
.
ttls
[
vifi
]
=
ttls
[
vifi
];
if
(
cache
->
mfc_un
.
res
.
minvif
>
vifi
)
...
...
@@ -623,16 +905,17 @@ static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttl
}
}
static
int
mif6_add
(
struct
net
*
net
,
struct
mif6ctl
*
vifc
,
int
mrtsock
)
static
int
mif6_add
(
struct
net
*
net
,
struct
mr6_table
*
mrt
,
struct
mif6ctl
*
vifc
,
int
mrtsock
)
{
int
vifi
=
vifc
->
mif6c_mifi
;
struct
mif_device
*
v
=
&
net
->
ipv6
.
vif6_table
[
vifi
];
struct
mif_device
*
v
=
&
mrt
->
vif6_table
[
vifi
];
struct
net_device
*
dev
;
struct
inet6_dev
*
in6_dev
;
int
err
;
/* Is vif busy ? */
if
(
MIF_EXISTS
(
ne
t
,
vifi
))
if
(
MIF_EXISTS
(
mr
t
,
vifi
))
return
-
EADDRINUSE
;
switch
(
vifc
->
mif6c_flags
)
{
...
...
@@ -642,9 +925,9 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
if
(
net
->
ipv6
.
mroute_reg_vif_num
>=
0
)
if
(
mrt
->
mroute_reg_vif_num
>=
0
)
return
-
EADDRINUSE
;
dev
=
ip6mr_reg_vif
(
net
);
dev
=
ip6mr_reg_vif
(
net
,
mrt
);
if
(
!
dev
)
return
-
ENOBUFS
;
err
=
dev_set_allmulti
(
dev
,
1
);
...
...
@@ -694,50 +977,48 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
v
->
dev
=
dev
;
#ifdef CONFIG_IPV6_PIMSM_V2
if
(
v
->
flags
&
MIFF_REGISTER
)
net
->
ipv6
.
mroute_reg_vif_num
=
vifi
;
mrt
->
mroute_reg_vif_num
=
vifi
;
#endif
if
(
vifi
+
1
>
net
->
ipv6
.
maxvif
)
net
->
ipv6
.
maxvif
=
vifi
+
1
;
if
(
vifi
+
1
>
mrt
->
maxvif
)
mrt
->
maxvif
=
vifi
+
1
;
write_unlock_bh
(
&
mrt_lock
);
return
0
;
}
static
struct
mfc6_cache
*
ip6mr_cache_find
(
struct
net
*
ne
t
,
static
struct
mfc6_cache
*
ip6mr_cache_find
(
struct
mr6_table
*
mr
t
,
struct
in6_addr
*
origin
,
struct
in6_addr
*
mcastgrp
)
{
int
line
=
MFC6_HASH
(
mcastgrp
,
origin
);
struct
mfc6_cache
*
c
;
for
(
c
=
net
->
ipv6
.
mfc6_cache_array
[
line
];
c
;
c
=
c
->
nex
t
)
{
list_for_each_entry
(
c
,
&
mrt
->
mfc6_cache_array
[
line
],
lis
t
)
{
if
(
ipv6_addr_equal
(
&
c
->
mf6c_origin
,
origin
)
&&
ipv6_addr_equal
(
&
c
->
mf6c_mcastgrp
,
mcastgrp
))
break
;
return
c
;
}
return
c
;
return
NULL
;
}
/*
* Allocate a multicast cache entry
*/
static
struct
mfc6_cache
*
ip6mr_cache_alloc
(
struct
net
*
net
)
static
struct
mfc6_cache
*
ip6mr_cache_alloc
(
void
)
{
struct
mfc6_cache
*
c
=
kmem_cache_zalloc
(
mrt_cachep
,
GFP_KERNEL
);
if
(
c
==
NULL
)
return
NULL
;
c
->
mfc_un
.
res
.
minvif
=
MAXMIFS
;
mfc6_net_set
(
c
,
net
);
return
c
;
}
static
struct
mfc6_cache
*
ip6mr_cache_alloc_unres
(
struct
net
*
net
)
static
struct
mfc6_cache
*
ip6mr_cache_alloc_unres
(
void
)
{
struct
mfc6_cache
*
c
=
kmem_cache_zalloc
(
mrt_cachep
,
GFP_ATOMIC
);
if
(
c
==
NULL
)
return
NULL
;
skb_queue_head_init
(
&
c
->
mfc_un
.
unres
.
unresolved
);
c
->
mfc_un
.
unres
.
expires
=
jiffies
+
10
*
HZ
;
mfc6_net_set
(
c
,
net
);
return
c
;
}
...
...
@@ -745,7 +1026,8 @@ static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
* A cache entry has gone into a resolved state from queued
*/
static
void
ip6mr_cache_resolve
(
struct
mfc6_cache
*
uc
,
struct
mfc6_cache
*
c
)
static
void
ip6mr_cache_resolve
(
struct
net
*
net
,
struct
mr6_table
*
mrt
,
struct
mfc6_cache
*
uc
,
struct
mfc6_cache
*
c
)
{
struct
sk_buff
*
skb
;
...
...
@@ -758,7 +1040,7 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
int
err
;
struct
nlmsghdr
*
nlh
=
(
struct
nlmsghdr
*
)
skb_pull
(
skb
,
sizeof
(
struct
ipv6hdr
));
if
(
ip6mr_fill_mroute
(
skb
,
c
,
NLMSG_DATA
(
nlh
))
>
0
)
{
if
(
__ip6mr_fill_mroute
(
mrt
,
skb
,
c
,
NLMSG_DATA
(
nlh
))
>
0
)
{
nlh
->
nlmsg_len
=
skb_tail_pointer
(
skb
)
-
(
u8
*
)
nlh
;
}
else
{
nlh
->
nlmsg_type
=
NLMSG_ERROR
;
...
...
@@ -766,9 +1048,9 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
skb_trim
(
skb
,
nlh
->
nlmsg_len
);
((
struct
nlmsgerr
*
)
NLMSG_DATA
(
nlh
))
->
error
=
-
EMSGSIZE
;
}
err
=
rtnl_unicast
(
skb
,
mfc6_net
(
uc
)
,
NETLINK_CB
(
skb
).
pid
);
err
=
rtnl_unicast
(
skb
,
net
,
NETLINK_CB
(
skb
).
pid
);
}
else
ip6_mr_forward
(
skb
,
c
);
ip6_mr_forward
(
net
,
mrt
,
skb
,
c
);
}
}
...
...
@@ -779,8 +1061,8 @@ static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
* Called under mrt_lock.
*/
static
int
ip6mr_cache_report
(
struct
net
*
net
,
struct
sk_buff
*
pkt
,
mifi_t
mifi
,
int
assert
)
static
int
ip6mr_cache_report
(
struct
mr6_table
*
mrt
,
struct
sk_buff
*
pkt
,
mifi_t
mifi
,
int
assert
)
{
struct
sk_buff
*
skb
;
struct
mrt6msg
*
msg
;
...
...
@@ -816,7 +1098,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
msg
=
(
struct
mrt6msg
*
)
skb_transport_header
(
skb
);
msg
->
im6_mbz
=
0
;
msg
->
im6_msgtype
=
MRT6MSG_WHOLEPKT
;
msg
->
im6_mif
=
net
->
ipv6
.
mroute_reg_vif_num
;
msg
->
im6_mif
=
mrt
->
mroute_reg_vif_num
;
msg
->
im6_pad
=
0
;
ipv6_addr_copy
(
&
msg
->
im6_src
,
&
ipv6_hdr
(
pkt
)
->
saddr
);
ipv6_addr_copy
(
&
msg
->
im6_dst
,
&
ipv6_hdr
(
pkt
)
->
daddr
);
...
...
@@ -851,7 +1133,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
skb
->
ip_summed
=
CHECKSUM_UNNECESSARY
;
}
if
(
net
->
ipv6
.
mroute6_sk
==
NULL
)
{
if
(
mrt
->
mroute6_sk
==
NULL
)
{
kfree_skb
(
skb
);
return
-
EINVAL
;
}
...
...
@@ -859,7 +1141,7 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
/*
* Deliver to user space multicast routing algorithms
*/
ret
=
sock_queue_rcv_skb
(
net
->
ipv6
.
mroute6_sk
,
skb
);
ret
=
sock_queue_rcv_skb
(
mrt
->
mroute6_sk
,
skb
);
if
(
ret
<
0
)
{
if
(
net_ratelimit
())
printk
(
KERN_WARNING
"mroute6: pending queue full, dropping entries.
\n
"
);
...
...
@@ -874,26 +1156,28 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
*/
static
int
ip6mr_cache_unresolved
(
struct
net
*
ne
t
,
mifi_t
mifi
,
struct
sk_buff
*
skb
)
ip6mr_cache_unresolved
(
struct
mr6_table
*
mr
t
,
mifi_t
mifi
,
struct
sk_buff
*
skb
)
{
bool
found
=
false
;
int
err
;
struct
mfc6_cache
*
c
;
spin_lock_bh
(
&
mfc_unres_lock
);
for
(
c
=
mfc_unres_queue
;
c
;
c
=
c
->
nex
t
)
{
if
(
net_eq
(
mfc6_net
(
c
),
net
)
&&
ipv6_addr_equal
(
&
c
->
mf6c_
mcastgrp
,
&
ipv6_hdr
(
skb
)
->
daddr
)
&&
ipv6_addr_equal
(
&
c
->
mf6c_origin
,
&
ipv6_hdr
(
skb
)
->
saddr
))
list_for_each_entry
(
c
,
&
mrt
->
mfc6_unres_queue
,
lis
t
)
{
if
(
ipv6_addr_equal
(
&
c
->
mf6c_mcastgrp
,
&
ipv6_hdr
(
skb
)
->
daddr
)
&&
ipv6_addr_equal
(
&
c
->
mf6c_
origin
,
&
ipv6_hdr
(
skb
)
->
saddr
))
{
found
=
true
;
break
;
}
}
if
(
c
==
NULL
)
{
if
(
!
found
)
{
/*
* Create a new entry if allowable
*/
if
(
atomic_read
(
&
net
->
ipv6
.
cache_resolve_queue_len
)
>=
10
||
(
c
=
ip6mr_cache_alloc_unres
(
net
))
==
NULL
)
{
if
(
atomic_read
(
&
mrt
->
cache_resolve_queue_len
)
>=
10
||
(
c
=
ip6mr_cache_alloc_unres
())
==
NULL
)
{
spin_unlock_bh
(
&
mfc_unres_lock
);
kfree_skb
(
skb
);
...
...
@@ -910,7 +1194,7 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
/*
* Reflect first query at pim6sd
*/
err
=
ip6mr_cache_report
(
ne
t
,
skb
,
mifi
,
MRT6MSG_NOCACHE
);
err
=
ip6mr_cache_report
(
mr
t
,
skb
,
mifi
,
MRT6MSG_NOCACHE
);
if
(
err
<
0
)
{
/* If the report failed throw the cache entry
out - Brad Parker
...
...
@@ -922,11 +1206,10 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
return
err
;
}
atomic_inc
(
&
net
->
ipv6
.
cache_resolve_queue_len
);
c
->
next
=
mfc_unres_queue
;
mfc_unres_queue
=
c
;
atomic_inc
(
&
mrt
->
cache_resolve_queue_len
);
list_add
(
&
c
->
list
,
&
mrt
->
mfc6_unres_queue
);
ipmr_do_expire_process
(
1
);
ipmr_do_expire_process
(
mrt
);
}
/*
...
...
@@ -948,19 +1231,18 @@ ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
* MFC6 cache manipulation by user space
*/
static
int
ip6mr_mfc_delete
(
struct
net
*
ne
t
,
struct
mf6cctl
*
mfc
)
static
int
ip6mr_mfc_delete
(
struct
mr6_table
*
mr
t
,
struct
mf6cctl
*
mfc
)
{
int
line
;
struct
mfc6_cache
*
c
,
*
*
cp
;
struct
mfc6_cache
*
c
,
*
next
;
line
=
MFC6_HASH
(
&
mfc
->
mf6cc_mcastgrp
.
sin6_addr
,
&
mfc
->
mf6cc_origin
.
sin6_addr
);
for
(
cp
=
&
net
->
ipv6
.
mfc6_cache_array
[
line
];
(
c
=
*
cp
)
!=
NULL
;
cp
=
&
c
->
next
)
{
list_for_each_entry_safe
(
c
,
next
,
&
mrt
->
mfc6_cache_array
[
line
],
list
)
{
if
(
ipv6_addr_equal
(
&
c
->
mf6c_origin
,
&
mfc
->
mf6cc_origin
.
sin6_addr
)
&&
ipv6_addr_equal
(
&
c
->
mf6c_mcastgrp
,
&
mfc
->
mf6cc_mcastgrp
.
sin6_addr
))
{
write_lock_bh
(
&
mrt_lock
);
*
cp
=
c
->
next
;
list_del
(
&
c
->
list
)
;
write_unlock_bh
(
&
mrt_lock
);
ip6mr_cache_free
(
c
);
...
...
@@ -975,6 +1257,7 @@ static int ip6mr_device_event(struct notifier_block *this,
{
struct
net_device
*
dev
=
ptr
;
struct
net
*
net
=
dev_net
(
dev
);
struct
mr6_table
*
mrt
;
struct
mif_device
*
v
;
int
ct
;
LIST_HEAD
(
list
);
...
...
@@ -982,10 +1265,12 @@ static int ip6mr_device_event(struct notifier_block *this,
if
(
event
!=
NETDEV_UNREGISTER
)
return
NOTIFY_DONE
;
v
=
&
net
->
ipv6
.
vif6_table
[
0
];
for
(
ct
=
0
;
ct
<
net
->
ipv6
.
maxvif
;
ct
++
,
v
++
)
{
if
(
v
->
dev
==
dev
)
mif6_delete
(
net
,
ct
,
&
list
);
ip6mr_for_each_table
(
mrt
,
net
)
{
v
=
&
mrt
->
vif6_table
[
0
];
for
(
ct
=
0
;
ct
<
mrt
->
maxvif
;
ct
++
,
v
++
)
{
if
(
v
->
dev
==
dev
)
mif6_delete
(
mrt
,
ct
,
&
list
);
}
}
unregister_netdevice_many
(
&
list
);
...
...
@@ -1002,26 +1287,11 @@ static struct notifier_block ip6_mr_notifier = {
static
int
__net_init
ip6mr_net_init
(
struct
net
*
net
)
{
int
err
=
0
;
net
->
ipv6
.
vif6_table
=
kcalloc
(
MAXMIFS
,
sizeof
(
struct
mif_device
),
GFP_KERNEL
);
if
(
!
net
->
ipv6
.
vif6_table
)
{
err
=
-
ENOMEM
;
goto
fail
;
}
/* Forwarding cache */
net
->
ipv6
.
mfc6_cache_array
=
kcalloc
(
MFC6_LINES
,
sizeof
(
struct
mfc6_cache
*
),
GFP_KERNEL
);
if
(
!
net
->
ipv6
.
mfc6_cache_array
)
{
err
=
-
ENOMEM
;
goto
fail_mfc6_cache
;
}
int
err
;
#ifdef CONFIG_IPV6_PIMSM_V2
net
->
ipv6
.
mroute_reg_vif_num
=
-
1
;
#endif
err
=
ip6mr_rules_init
(
net
);
if
(
err
<
0
)
goto
fail
;
#ifdef CONFIG_PROC_FS
err
=
-
ENOMEM
;
...
...
@@ -1030,16 +1300,15 @@ static int __net_init ip6mr_net_init(struct net *net)
if
(
!
proc_net_fops_create
(
net
,
"ip6_mr_cache"
,
0
,
&
ip6mr_mfc_fops
))
goto
proc_cache_fail
;
#endif
return
0
;
#ifdef CONFIG_PROC_FS
proc_cache_fail:
proc_net_remove
(
net
,
"ip6_mr_vif"
);
proc_vif_fail:
kfree
(
net
->
ipv6
.
mfc6_cache_array
);
ip6mr_rules_exit
(
net
);
#endif
fail_mfc6_cache:
kfree
(
net
->
ipv6
.
vif6_table
);
fail:
return
err
;
}
...
...
@@ -1050,9 +1319,7 @@ static void __net_exit ip6mr_net_exit(struct net *net)
proc_net_remove
(
net
,
"ip6_mr_cache"
);
proc_net_remove
(
net
,
"ip6_mr_vif"
);
#endif
mroute_clean_tables
(
net
);
kfree
(
net
->
ipv6
.
mfc6_cache_array
);
kfree
(
net
->
ipv6
.
vif6_table
);
ip6mr_rules_exit
(
net
);
}
static
struct
pernet_operations
ip6mr_net_ops
=
{
...
...
@@ -1075,7 +1342,6 @@ int __init ip6_mr_init(void)
if
(
err
)
goto
reg_pernet_fail
;
setup_timer
(
&
ipmr_expire_timer
,
ipmr_expire_process
,
0
);
err
=
register_netdevice_notifier
(
&
ip6_mr_notifier
);
if
(
err
)
goto
reg_notif_fail
;
...
...
@@ -1086,13 +1352,13 @@ int __init ip6_mr_init(void)
goto
add_proto_fail
;
}
#endif
rtnl_register
(
RTNL_FAMILY_IP6MR
,
RTM_GETROUTE
,
NULL
,
ip6mr_rtm_dumproute
);
return
0
;
#ifdef CONFIG_IPV6_PIMSM_V2
add_proto_fail:
unregister_netdevice_notifier
(
&
ip6_mr_notifier
);
#endif
reg_notif_fail:
del_timer
(
&
ipmr_expire_timer
);
unregister_pernet_subsys
(
&
ip6mr_net_ops
);
reg_pernet_fail:
kmem_cache_destroy
(
mrt_cachep
);
...
...
@@ -1102,15 +1368,16 @@ int __init ip6_mr_init(void)
void
ip6_mr_cleanup
(
void
)
{
unregister_netdevice_notifier
(
&
ip6_mr_notifier
);
del_timer
(
&
ipmr_expire_timer
);
unregister_pernet_subsys
(
&
ip6mr_net_ops
);
kmem_cache_destroy
(
mrt_cachep
);
}
static
int
ip6mr_mfc_add
(
struct
net
*
net
,
struct
mf6cctl
*
mfc
,
int
mrtsock
)
static
int
ip6mr_mfc_add
(
struct
net
*
net
,
struct
mr6_table
*
mrt
,
struct
mf6cctl
*
mfc
,
int
mrtsock
)
{
bool
found
=
false
;
int
line
;
struct
mfc6_cache
*
uc
,
*
c
,
**
cp
;
struct
mfc6_cache
*
uc
,
*
c
;
unsigned
char
ttls
[
MAXMIFS
];
int
i
;
...
...
@@ -1126,17 +1393,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
line
=
MFC6_HASH
(
&
mfc
->
mf6cc_mcastgrp
.
sin6_addr
,
&
mfc
->
mf6cc_origin
.
sin6_addr
);
for
(
cp
=
&
net
->
ipv6
.
mfc6_cache_array
[
line
];
(
c
=
*
cp
)
!=
NULL
;
cp
=
&
c
->
next
)
{
list_for_each_entry
(
c
,
&
mrt
->
mfc6_cache_array
[
line
],
list
)
{
if
(
ipv6_addr_equal
(
&
c
->
mf6c_origin
,
&
mfc
->
mf6cc_origin
.
sin6_addr
)
&&
ipv6_addr_equal
(
&
c
->
mf6c_mcastgrp
,
&
mfc
->
mf6cc_mcastgrp
.
sin6_addr
))
ipv6_addr_equal
(
&
c
->
mf6c_mcastgrp
,
&
mfc
->
mf6cc_mcastgrp
.
sin6_addr
))
{
found
=
true
;
break
;
}
}
if
(
c
!=
NULL
)
{
if
(
found
)
{
write_lock_bh
(
&
mrt_lock
);
c
->
mf6c_parent
=
mfc
->
mf6cc_parent
;
ip6mr_update_thresholds
(
c
,
ttls
);
ip6mr_update_thresholds
(
mrt
,
c
,
ttls
);
if
(
!
mrtsock
)
c
->
mfc_flags
|=
MFC_STATIC
;
write_unlock_bh
(
&
mrt_lock
);
...
...
@@ -1146,43 +1414,42 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
if
(
!
ipv6_addr_is_multicast
(
&
mfc
->
mf6cc_mcastgrp
.
sin6_addr
))
return
-
EINVAL
;
c
=
ip6mr_cache_alloc
(
net
);
c
=
ip6mr_cache_alloc
();
if
(
c
==
NULL
)
return
-
ENOMEM
;
c
->
mf6c_origin
=
mfc
->
mf6cc_origin
.
sin6_addr
;
c
->
mf6c_mcastgrp
=
mfc
->
mf6cc_mcastgrp
.
sin6_addr
;
c
->
mf6c_parent
=
mfc
->
mf6cc_parent
;
ip6mr_update_thresholds
(
c
,
ttls
);
ip6mr_update_thresholds
(
mrt
,
c
,
ttls
);
if
(
!
mrtsock
)
c
->
mfc_flags
|=
MFC_STATIC
;
write_lock_bh
(
&
mrt_lock
);
c
->
next
=
net
->
ipv6
.
mfc6_cache_array
[
line
];
net
->
ipv6
.
mfc6_cache_array
[
line
]
=
c
;
list_add
(
&
c
->
list
,
&
mrt
->
mfc6_cache_array
[
line
]);
write_unlock_bh
(
&
mrt_lock
);
/*
* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
found
=
false
;
spin_lock_bh
(
&
mfc_unres_lock
);
for
(
cp
=
&
mfc_unres_queue
;
(
uc
=
*
cp
)
!=
NULL
;
cp
=
&
uc
->
next
)
{
if
(
net_eq
(
mfc6_net
(
uc
),
net
)
&&
ipv6_addr_equal
(
&
uc
->
mf6c_origin
,
&
c
->
mf6c_origin
)
&&
list_for_each_entry
(
uc
,
&
mrt
->
mfc6_unres_queue
,
list
)
{
if
(
ipv6_addr_equal
(
&
uc
->
mf6c_origin
,
&
c
->
mf6c_origin
)
&&
ipv6_addr_equal
(
&
uc
->
mf6c_mcastgrp
,
&
c
->
mf6c_mcastgrp
))
{
*
cp
=
uc
->
next
;
atomic_dec
(
&
net
->
ipv6
.
cache_resolve_queue_len
);
list_del
(
&
uc
->
list
);
atomic_dec
(
&
mrt
->
cache_resolve_queue_len
);
found
=
true
;
break
;
}
}
if
(
mfc_unres_queue
==
NULL
)
del_timer
(
&
ipmr_expire_timer
);
if
(
list_empty
(
&
mrt
->
mfc6_unres_queue
)
)
del_timer
(
&
mrt
->
ipmr_expire_timer
);
spin_unlock_bh
(
&
mfc_unres_lock
);
if
(
uc
)
{
ip6mr_cache_resolve
(
uc
,
c
);
if
(
found
)
{
ip6mr_cache_resolve
(
net
,
mrt
,
uc
,
c
);
ip6mr_cache_free
(
uc
);
}
return
0
;
...
...
@@ -1192,17 +1459,18 @@ static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
* Close the multicast socket, and clear the vif tables etc
*/
static
void
mroute_clean_tables
(
struct
net
*
ne
t
)
static
void
mroute_clean_tables
(
struct
mr6_table
*
mr
t
)
{
int
i
;
LIST_HEAD
(
list
);
struct
mfc6_cache
*
c
,
*
next
;
/*
* Shut down all active vif entries
*/
for
(
i
=
0
;
i
<
net
->
ipv6
.
maxvif
;
i
++
)
{
if
(
!
(
net
->
ipv6
.
vif6_table
[
i
].
flags
&
VIFF_STATIC
))
mif6_delete
(
ne
t
,
i
,
&
list
);
for
(
i
=
0
;
i
<
mrt
->
maxvif
;
i
++
)
{
if
(
!
(
mrt
->
vif6_table
[
i
].
flags
&
VIFF_STATIC
))
mif6_delete
(
mr
t
,
i
,
&
list
);
}
unregister_netdevice_many
(
&
list
);
...
...
@@ -1210,48 +1478,36 @@ static void mroute_clean_tables(struct net *net)
* Wipe the cache
*/
for
(
i
=
0
;
i
<
MFC6_LINES
;
i
++
)
{
struct
mfc6_cache
*
c
,
**
cp
;
cp
=
&
net
->
ipv6
.
mfc6_cache_array
[
i
];
while
((
c
=
*
cp
)
!=
NULL
)
{
if
(
c
->
mfc_flags
&
MFC_STATIC
)
{
cp
=
&
c
->
next
;
list_for_each_entry_safe
(
c
,
next
,
&
mrt
->
mfc6_cache_array
[
i
],
list
)
{
if
(
c
->
mfc_flags
&
MFC_STATIC
)
continue
;
}
write_lock_bh
(
&
mrt_lock
);
*
cp
=
c
->
next
;
list_del
(
&
c
->
list
)
;
write_unlock_bh
(
&
mrt_lock
);
ip6mr_cache_free
(
c
);
}
}
if
(
atomic_read
(
&
net
->
ipv6
.
cache_resolve_queue_len
)
!=
0
)
{
struct
mfc6_cache
*
c
,
**
cp
;
if
(
atomic_read
(
&
mrt
->
cache_resolve_queue_len
)
!=
0
)
{
spin_lock_bh
(
&
mfc_unres_lock
);
cp
=
&
mfc_unres_queue
;
while
((
c
=
*
cp
)
!=
NULL
)
{
if
(
!
net_eq
(
mfc6_net
(
c
),
net
))
{
cp
=
&
c
->
next
;
continue
;
}
*
cp
=
c
->
next
;
ip6mr_destroy_unres
(
c
);
list_for_each_entry_safe
(
c
,
next
,
&
mrt
->
mfc6_unres_queue
,
list
)
{
list_del
(
&
c
->
list
);
ip6mr_destroy_unres
(
mrt
,
c
);
}
spin_unlock_bh
(
&
mfc_unres_lock
);
}
}
static
int
ip6mr_sk_init
(
struct
sock
*
sk
)
static
int
ip6mr_sk_init
(
struct
mr6_table
*
mrt
,
struct
sock
*
sk
)
{
int
err
=
0
;
struct
net
*
net
=
sock_net
(
sk
);
rtnl_lock
();
write_lock_bh
(
&
mrt_lock
);
if
(
likely
(
net
->
ipv6
.
mroute6_sk
==
NULL
))
{
net
->
ipv6
.
mroute6_sk
=
sk
;
if
(
likely
(
mrt
->
mroute6_sk
==
NULL
))
{
mrt
->
mroute6_sk
=
sk
;
net
->
ipv6
.
devconf_all
->
mc_forwarding
++
;
}
else
...
...
@@ -1265,24 +1521,43 @@ static int ip6mr_sk_init(struct sock *sk)
int
ip6mr_sk_done
(
struct
sock
*
sk
)
{
int
err
=
0
;
int
err
=
-
EACCES
;
struct
net
*
net
=
sock_net
(
sk
);
struct
mr6_table
*
mrt
;
rtnl_lock
();
if
(
sk
==
net
->
ipv6
.
mroute6_sk
)
{
write_lock_bh
(
&
mrt_lock
);
net
->
ipv6
.
mroute6_sk
=
NULL
;
net
->
ipv6
.
devconf_all
->
mc_forwarding
--
;
write_unlock_bh
(
&
mrt_lock
);
ip6mr_for_each_table
(
mrt
,
net
)
{
if
(
sk
==
mrt
->
mroute6_sk
)
{
write_lock_bh
(
&
mrt_lock
);
mrt
->
mroute6_sk
=
NULL
;
net
->
ipv6
.
devconf_all
->
mc_forwarding
--
;
write_unlock_bh
(
&
mrt_lock
);
mroute_clean_tables
(
net
);
}
else
err
=
-
EACCES
;
mroute_clean_tables
(
mrt
);
err
=
0
;
break
;
}
}
rtnl_unlock
();
return
err
;
}
struct
sock
*
mroute6_socket
(
struct
net
*
net
,
struct
sk_buff
*
skb
)
{
struct
mr6_table
*
mrt
;
struct
flowi
fl
=
{
.
iif
=
skb
->
skb_iif
,
.
oif
=
skb
->
dev
->
ifindex
,
.
mark
=
skb
->
mark
,
};
if
(
ip6mr_fib_lookup
(
net
,
&
fl
,
&
mrt
)
<
0
)
return
NULL
;
return
mrt
->
mroute6_sk
;
}
/*
* Socket options and virtual interface manipulation. The whole
* virtual interface system is a complete heap, but unfortunately
...
...
@@ -1297,9 +1572,14 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
struct
mf6cctl
mfc
;
mifi_t
mifi
;
struct
net
*
net
=
sock_net
(
sk
);
struct
mr6_table
*
mrt
;
mrt
=
ip6mr_get_table
(
net
,
raw6_sk
(
sk
)
->
ip6mr_table
?
:
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
return
-
ENOENT
;
if
(
optname
!=
MRT6_INIT
)
{
if
(
sk
!=
net
->
ipv6
.
mroute6_sk
&&
!
capable
(
CAP_NET_ADMIN
))
if
(
sk
!=
mrt
->
mroute6_sk
&&
!
capable
(
CAP_NET_ADMIN
))
return
-
EACCES
;
}
...
...
@@ -1311,7 +1591,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if
(
optlen
<
sizeof
(
int
))
return
-
EINVAL
;
return
ip6mr_sk_init
(
sk
);
return
ip6mr_sk_init
(
mrt
,
sk
);
case
MRT6_DONE
:
return
ip6mr_sk_done
(
sk
);
...
...
@@ -1324,7 +1604,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if
(
vif
.
mif6c_mifi
>=
MAXMIFS
)
return
-
ENFILE
;
rtnl_lock
();
ret
=
mif6_add
(
net
,
&
vif
,
sk
==
net
->
ipv6
.
mroute6_sk
);
ret
=
mif6_add
(
net
,
mrt
,
&
vif
,
sk
==
mrt
->
mroute6_sk
);
rtnl_unlock
();
return
ret
;
...
...
@@ -1334,7 +1614,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
if
(
copy_from_user
(
&
mifi
,
optval
,
sizeof
(
mifi_t
)))
return
-
EFAULT
;
rtnl_lock
();
ret
=
mif6_delete
(
ne
t
,
mifi
,
NULL
);
ret
=
mif6_delete
(
mr
t
,
mifi
,
NULL
);
rtnl_unlock
();
return
ret
;
...
...
@@ -1350,10 +1630,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
return
-
EFAULT
;
rtnl_lock
();
if
(
optname
==
MRT6_DEL_MFC
)
ret
=
ip6mr_mfc_delete
(
ne
t
,
&
mfc
);
ret
=
ip6mr_mfc_delete
(
mr
t
,
&
mfc
);
else
ret
=
ip6mr_mfc_add
(
net
,
&
mfc
,
sk
==
net
->
ipv6
.
mroute6_sk
);
ret
=
ip6mr_mfc_add
(
net
,
mrt
,
&
mfc
,
sk
==
mrt
->
mroute6_sk
);
rtnl_unlock
();
return
ret
;
...
...
@@ -1365,7 +1644,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
int
v
;
if
(
get_user
(
v
,
(
int
__user
*
)
optval
))
return
-
EFAULT
;
net
->
ipv6
.
mroute_do_assert
=
!!
v
;
mrt
->
mroute_do_assert
=
!!
v
;
return
0
;
}
...
...
@@ -1378,14 +1657,35 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
v
=
!!
v
;
rtnl_lock
();
ret
=
0
;
if
(
v
!=
net
->
ipv6
.
mroute_do_pim
)
{
net
->
ipv6
.
mroute_do_pim
=
v
;
net
->
ipv6
.
mroute_do_assert
=
v
;
if
(
v
!=
mrt
->
mroute_do_pim
)
{
mrt
->
mroute_do_pim
=
v
;
mrt
->
mroute_do_assert
=
v
;
}
rtnl_unlock
();
return
ret
;
}
#endif
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
case
MRT6_TABLE
:
{
u32
v
;
if
(
optlen
!=
sizeof
(
u32
))
return
-
EINVAL
;
if
(
get_user
(
v
,
(
u32
__user
*
)
optval
))
return
-
EFAULT
;
if
(
sk
==
mrt
->
mroute6_sk
)
return
-
EBUSY
;
rtnl_lock
();
ret
=
0
;
if
(
!
ip6mr_new_table
(
net
,
v
))
ret
=
-
ENOMEM
;
raw6_sk
(
sk
)
->
ip6mr_table
=
v
;
rtnl_unlock
();
return
ret
;
}
#endif
/*
* Spurious command, or MRT6_VERSION which you cannot
...
...
@@ -1406,6 +1706,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
int
olr
;
int
val
;
struct
net
*
net
=
sock_net
(
sk
);
struct
mr6_table
*
mrt
;
mrt
=
ip6mr_get_table
(
net
,
raw6_sk
(
sk
)
->
ip6mr_table
?
:
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
return
-
ENOENT
;
switch
(
optname
)
{
case
MRT6_VERSION
:
...
...
@@ -1413,11 +1718,11 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
break
;
#ifdef CONFIG_IPV6_PIMSM_V2
case
MRT6_PIM
:
val
=
net
->
ipv6
.
mroute_do_pim
;
val
=
mrt
->
mroute_do_pim
;
break
;
#endif
case
MRT6_ASSERT
:
val
=
net
->
ipv6
.
mroute_do_assert
;
val
=
mrt
->
mroute_do_assert
;
break
;
default:
return
-
ENOPROTOOPT
;
...
...
@@ -1448,16 +1753,21 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
struct
mif_device
*
vif
;
struct
mfc6_cache
*
c
;
struct
net
*
net
=
sock_net
(
sk
);
struct
mr6_table
*
mrt
;
mrt
=
ip6mr_get_table
(
net
,
raw6_sk
(
sk
)
->
ip6mr_table
?
:
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
return
-
ENOENT
;
switch
(
cmd
)
{
case
SIOCGETMIFCNT_IN6
:
if
(
copy_from_user
(
&
vr
,
arg
,
sizeof
(
vr
)))
return
-
EFAULT
;
if
(
vr
.
mifi
>=
net
->
ipv6
.
maxvif
)
if
(
vr
.
mifi
>=
mrt
->
maxvif
)
return
-
EINVAL
;
read_lock
(
&
mrt_lock
);
vif
=
&
net
->
ipv6
.
vif6_table
[
vr
.
mifi
];
if
(
MIF_EXISTS
(
ne
t
,
vr
.
mifi
))
{
vif
=
&
mrt
->
vif6_table
[
vr
.
mifi
];
if
(
MIF_EXISTS
(
mr
t
,
vr
.
mifi
))
{
vr
.
icount
=
vif
->
pkt_in
;
vr
.
ocount
=
vif
->
pkt_out
;
vr
.
ibytes
=
vif
->
bytes_in
;
...
...
@@ -1475,7 +1785,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
return
-
EFAULT
;
read_lock
(
&
mrt_lock
);
c
=
ip6mr_cache_find
(
ne
t
,
&
sr
.
src
.
sin6_addr
,
&
sr
.
grp
.
sin6_addr
);
c
=
ip6mr_cache_find
(
mr
t
,
&
sr
.
src
.
sin6_addr
,
&
sr
.
grp
.
sin6_addr
);
if
(
c
)
{
sr
.
pktcnt
=
c
->
mfc_un
.
res
.
pkt
;
sr
.
bytecnt
=
c
->
mfc_un
.
res
.
bytes
;
...
...
@@ -1505,11 +1815,11 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
* Processing handlers for ip6mr_forward
*/
static
int
ip6mr_forward2
(
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
c
,
int
vifi
)
static
int
ip6mr_forward2
(
struct
net
*
net
,
struct
mr6_table
*
mrt
,
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
c
,
int
vifi
)
{
struct
ipv6hdr
*
ipv6h
;
struct
net
*
net
=
mfc6_net
(
c
);
struct
mif_device
*
vif
=
&
net
->
ipv6
.
vif6_table
[
vifi
];
struct
mif_device
*
vif
=
&
mrt
->
vif6_table
[
vifi
];
struct
net_device
*
dev
;
struct
dst_entry
*
dst
;
struct
flowi
fl
;
...
...
@@ -1523,7 +1833,7 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
vif
->
bytes_out
+=
skb
->
len
;
vif
->
dev
->
stats
.
tx_bytes
+=
skb
->
len
;
vif
->
dev
->
stats
.
tx_packets
++
;
ip6mr_cache_report
(
ne
t
,
skb
,
vifi
,
MRT6MSG_WHOLEPKT
);
ip6mr_cache_report
(
mr
t
,
skb
,
vifi
,
MRT6MSG_WHOLEPKT
);
goto
out_free
;
}
#endif
...
...
@@ -1578,22 +1888,22 @@ static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
return
0
;
}
static
int
ip6mr_find_vif
(
struct
net_device
*
dev
)
static
int
ip6mr_find_vif
(
struct
mr6_table
*
mrt
,
struct
net_device
*
dev
)
{
struct
net
*
net
=
dev_net
(
dev
);
int
ct
;
for
(
ct
=
net
->
ipv6
.
maxvif
-
1
;
ct
>=
0
;
ct
--
)
{
if
(
net
->
ipv6
.
vif6_table
[
ct
].
dev
==
dev
)
for
(
ct
=
mrt
->
maxvif
-
1
;
ct
>=
0
;
ct
--
)
{
if
(
mrt
->
vif6_table
[
ct
].
dev
==
dev
)
break
;
}
return
ct
;
}
static
int
ip6_mr_forward
(
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
cache
)
static
int
ip6_mr_forward
(
struct
net
*
net
,
struct
mr6_table
*
mrt
,
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
cache
)
{
int
psend
=
-
1
;
int
vif
,
ct
;
struct
net
*
net
=
mfc6_net
(
cache
);
vif
=
cache
->
mf6c_parent
;
cache
->
mfc_un
.
res
.
pkt
++
;
...
...
@@ -1602,30 +1912,30 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
/*
* Wrong interface: drop packet and (maybe) send PIM assert.
*/
if
(
net
->
ipv6
.
vif6_table
[
vif
].
dev
!=
skb
->
dev
)
{
if
(
mrt
->
vif6_table
[
vif
].
dev
!=
skb
->
dev
)
{
int
true_vifi
;
cache
->
mfc_un
.
res
.
wrong_if
++
;
true_vifi
=
ip6mr_find_vif
(
skb
->
dev
);
true_vifi
=
ip6mr_find_vif
(
mrt
,
skb
->
dev
);
if
(
true_vifi
>=
0
&&
net
->
ipv6
.
mroute_do_assert
&&
if
(
true_vifi
>=
0
&&
mrt
->
mroute_do_assert
&&
/* pimsm uses asserts, when switching from RPT to SPT,
so that we cannot check that packet arrived on an oif.
It is bad, but otherwise we would need to move pretty
large chunk of pimd to kernel. Ough... --ANK
*/
(
net
->
ipv6
.
mroute_do_pim
||
(
mrt
->
mroute_do_pim
||
cache
->
mfc_un
.
res
.
ttls
[
true_vifi
]
<
255
)
&&
time_after
(
jiffies
,
cache
->
mfc_un
.
res
.
last_assert
+
MFC_ASSERT_THRESH
))
{
cache
->
mfc_un
.
res
.
last_assert
=
jiffies
;
ip6mr_cache_report
(
ne
t
,
skb
,
true_vifi
,
MRT6MSG_WRONGMIF
);
ip6mr_cache_report
(
mr
t
,
skb
,
true_vifi
,
MRT6MSG_WRONGMIF
);
}
goto
dont_forward
;
}
net
->
ipv6
.
vif6_table
[
vif
].
pkt_in
++
;
net
->
ipv6
.
vif6_table
[
vif
].
bytes_in
+=
skb
->
len
;
mrt
->
vif6_table
[
vif
].
pkt_in
++
;
mrt
->
vif6_table
[
vif
].
bytes_in
+=
skb
->
len
;
/*
* Forward the frame
...
...
@@ -1635,13 +1945,13 @@ static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
if
(
psend
!=
-
1
)
{
struct
sk_buff
*
skb2
=
skb_clone
(
skb
,
GFP_ATOMIC
);
if
(
skb2
)
ip6mr_forward2
(
skb2
,
cache
,
psend
);
ip6mr_forward2
(
net
,
mrt
,
skb2
,
cache
,
psend
);
}
psend
=
ct
;
}
}
if
(
psend
!=
-
1
)
{
ip6mr_forward2
(
skb
,
cache
,
psend
);
ip6mr_forward2
(
net
,
mrt
,
skb
,
cache
,
psend
);
return
0
;
}
...
...
@@ -1659,9 +1969,19 @@ int ip6_mr_input(struct sk_buff *skb)
{
struct
mfc6_cache
*
cache
;
struct
net
*
net
=
dev_net
(
skb
->
dev
);
struct
mr6_table
*
mrt
;
struct
flowi
fl
=
{
.
iif
=
skb
->
dev
->
ifindex
,
.
mark
=
skb
->
mark
,
};
int
err
;
err
=
ip6mr_fib_lookup
(
net
,
&
fl
,
&
mrt
);
if
(
err
<
0
)
return
err
;
read_lock
(
&
mrt_lock
);
cache
=
ip6mr_cache_find
(
ne
t
,
cache
=
ip6mr_cache_find
(
mr
t
,
&
ipv6_hdr
(
skb
)
->
saddr
,
&
ipv6_hdr
(
skb
)
->
daddr
);
/*
...
...
@@ -1670,9 +1990,9 @@ int ip6_mr_input(struct sk_buff *skb)
if
(
cache
==
NULL
)
{
int
vif
;
vif
=
ip6mr_find_vif
(
skb
->
dev
);
vif
=
ip6mr_find_vif
(
mrt
,
skb
->
dev
);
if
(
vif
>=
0
)
{
int
err
=
ip6mr_cache_unresolved
(
ne
t
,
vif
,
skb
);
int
err
=
ip6mr_cache_unresolved
(
mr
t
,
vif
,
skb
);
read_unlock
(
&
mrt_lock
);
return
err
;
...
...
@@ -1682,7 +2002,7 @@ int ip6_mr_input(struct sk_buff *skb)
return
-
ENODEV
;
}
ip6_mr_forward
(
skb
,
cache
);
ip6_mr_forward
(
net
,
mrt
,
skb
,
cache
);
read_unlock
(
&
mrt_lock
);
...
...
@@ -1690,12 +2010,11 @@ int ip6_mr_input(struct sk_buff *skb)
}
static
int
ip6mr_fill_mroute
(
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
c
,
struct
rtmsg
*
rtm
)
static
int
__ip6mr_fill_mroute
(
struct
mr6_table
*
mrt
,
struct
sk_buff
*
skb
,
struct
mfc6_cache
*
c
,
struct
rtmsg
*
rtm
)
{
int
ct
;
struct
rtnexthop
*
nhp
;
struct
net
*
net
=
mfc6_net
(
c
);
u8
*
b
=
skb_tail_pointer
(
skb
);
struct
rtattr
*
mp_head
;
...
...
@@ -1703,19 +2022,19 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
if
(
c
->
mf6c_parent
>
MAXMIFS
)
return
-
ENOENT
;
if
(
MIF_EXISTS
(
ne
t
,
c
->
mf6c_parent
))
RTA_PUT
(
skb
,
RTA_IIF
,
4
,
&
net
->
ipv6
.
vif6_table
[
c
->
mf6c_parent
].
dev
->
ifindex
);
if
(
MIF_EXISTS
(
mr
t
,
c
->
mf6c_parent
))
RTA_PUT
(
skb
,
RTA_IIF
,
4
,
&
mrt
->
vif6_table
[
c
->
mf6c_parent
].
dev
->
ifindex
);
mp_head
=
(
struct
rtattr
*
)
skb_put
(
skb
,
RTA_LENGTH
(
0
));
for
(
ct
=
c
->
mfc_un
.
res
.
minvif
;
ct
<
c
->
mfc_un
.
res
.
maxvif
;
ct
++
)
{
if
(
MIF_EXISTS
(
ne
t
,
ct
)
&&
c
->
mfc_un
.
res
.
ttls
[
ct
]
<
255
)
{
if
(
MIF_EXISTS
(
mr
t
,
ct
)
&&
c
->
mfc_un
.
res
.
ttls
[
ct
]
<
255
)
{
if
(
skb_tailroom
(
skb
)
<
RTA_ALIGN
(
RTA_ALIGN
(
sizeof
(
*
nhp
))
+
4
))
goto
rtattr_failure
;
nhp
=
(
struct
rtnexthop
*
)
skb_put
(
skb
,
RTA_ALIGN
(
sizeof
(
*
nhp
)));
nhp
->
rtnh_flags
=
0
;
nhp
->
rtnh_hops
=
c
->
mfc_un
.
res
.
ttls
[
ct
];
nhp
->
rtnh_ifindex
=
net
->
ipv6
.
vif6_table
[
ct
].
dev
->
ifindex
;
nhp
->
rtnh_ifindex
=
mrt
->
vif6_table
[
ct
].
dev
->
ifindex
;
nhp
->
rtnh_len
=
sizeof
(
*
nhp
);
}
}
...
...
@@ -1733,11 +2052,16 @@ int ip6mr_get_route(struct net *net,
struct
sk_buff
*
skb
,
struct
rtmsg
*
rtm
,
int
nowait
)
{
int
err
;
struct
mr6_table
*
mrt
;
struct
mfc6_cache
*
cache
;
struct
rt6_info
*
rt
=
(
struct
rt6_info
*
)
skb_dst
(
skb
);
mrt
=
ip6mr_get_table
(
net
,
RT6_TABLE_DFLT
);
if
(
mrt
==
NULL
)
return
-
ENOENT
;
read_lock
(
&
mrt_lock
);
cache
=
ip6mr_cache_find
(
ne
t
,
&
rt
->
rt6i_src
.
addr
,
&
rt
->
rt6i_dst
.
addr
);
cache
=
ip6mr_cache_find
(
mr
t
,
&
rt
->
rt6i_src
.
addr
,
&
rt
->
rt6i_dst
.
addr
);
if
(
!
cache
)
{
struct
sk_buff
*
skb2
;
...
...
@@ -1751,7 +2075,7 @@ int ip6mr_get_route(struct net *net,
}
dev
=
skb
->
dev
;
if
(
dev
==
NULL
||
(
vif
=
ip6mr_find_vif
(
dev
))
<
0
)
{
if
(
dev
==
NULL
||
(
vif
=
ip6mr_find_vif
(
mrt
,
dev
))
<
0
)
{
read_unlock
(
&
mrt_lock
);
return
-
ENODEV
;
}
...
...
@@ -1780,7 +2104,7 @@ int ip6mr_get_route(struct net *net,
ipv6_addr_copy
(
&
iph
->
saddr
,
&
rt
->
rt6i_src
.
addr
);
ipv6_addr_copy
(
&
iph
->
daddr
,
&
rt
->
rt6i_dst
.
addr
);
err
=
ip6mr_cache_unresolved
(
ne
t
,
vif
,
skb2
);
err
=
ip6mr_cache_unresolved
(
mr
t
,
vif
,
skb2
);
read_unlock
(
&
mrt_lock
);
return
err
;
...
...
@@ -1789,8 +2113,88 @@ int ip6mr_get_route(struct net *net,
if
(
!
nowait
&&
(
rtm
->
rtm_flags
&
RTM_F_NOTIFY
))
cache
->
mfc_flags
|=
MFC_NOTIFY
;
err
=
ip6mr_fill_mroute
(
skb
,
cache
,
rtm
);
err
=
__ip6mr_fill_mroute
(
mrt
,
skb
,
cache
,
rtm
);
read_unlock
(
&
mrt_lock
);
return
err
;
}
static
int
ip6mr_fill_mroute
(
struct
mr6_table
*
mrt
,
struct
sk_buff
*
skb
,
u32
pid
,
u32
seq
,
struct
mfc6_cache
*
c
)
{
struct
nlmsghdr
*
nlh
;
struct
rtmsg
*
rtm
;
nlh
=
nlmsg_put
(
skb
,
pid
,
seq
,
RTM_NEWROUTE
,
sizeof
(
*
rtm
),
NLM_F_MULTI
);
if
(
nlh
==
NULL
)
return
-
EMSGSIZE
;
rtm
=
nlmsg_data
(
nlh
);
rtm
->
rtm_family
=
RTNL_FAMILY_IPMR
;
rtm
->
rtm_dst_len
=
128
;
rtm
->
rtm_src_len
=
128
;
rtm
->
rtm_tos
=
0
;
rtm
->
rtm_table
=
mrt
->
id
;
NLA_PUT_U32
(
skb
,
RTA_TABLE
,
mrt
->
id
);
rtm
->
rtm_scope
=
RT_SCOPE_UNIVERSE
;
rtm
->
rtm_protocol
=
RTPROT_UNSPEC
;
rtm
->
rtm_flags
=
0
;
NLA_PUT
(
skb
,
RTA_SRC
,
16
,
&
c
->
mf6c_origin
);
NLA_PUT
(
skb
,
RTA_DST
,
16
,
&
c
->
mf6c_mcastgrp
);
if
(
__ip6mr_fill_mroute
(
mrt
,
skb
,
c
,
rtm
)
<
0
)
goto
nla_put_failure
;
return
nlmsg_end
(
skb
,
nlh
);
nla_put_failure:
nlmsg_cancel
(
skb
,
nlh
);
return
-
EMSGSIZE
;
}
static
int
ip6mr_rtm_dumproute
(
struct
sk_buff
*
skb
,
struct
netlink_callback
*
cb
)
{
struct
net
*
net
=
sock_net
(
skb
->
sk
);
struct
mr6_table
*
mrt
;
struct
mfc6_cache
*
mfc
;
unsigned
int
t
=
0
,
s_t
;
unsigned
int
h
=
0
,
s_h
;
unsigned
int
e
=
0
,
s_e
;
s_t
=
cb
->
args
[
0
];
s_h
=
cb
->
args
[
1
];
s_e
=
cb
->
args
[
2
];
read_lock
(
&
mrt_lock
);
ip6mr_for_each_table
(
mrt
,
net
)
{
if
(
t
<
s_t
)
goto
next_table
;
if
(
t
>
s_t
)
s_h
=
0
;
for
(
h
=
s_h
;
h
<
MFC6_LINES
;
h
++
)
{
list_for_each_entry
(
mfc
,
&
mrt
->
mfc6_cache_array
[
h
],
list
)
{
if
(
e
<
s_e
)
goto
next_entry
;
if
(
ip6mr_fill_mroute
(
mrt
,
skb
,
NETLINK_CB
(
cb
->
skb
).
pid
,
cb
->
nlh
->
nlmsg_seq
,
mfc
)
<
0
)
goto
done
;
next_entry:
e
++
;
}
e
=
s_e
=
0
;
}
s_h
=
0
;
next_table:
t
++
;
}
done:
read_unlock
(
&
mrt_lock
);
cb
->
args
[
2
]
=
e
;
cb
->
args
[
1
]
=
h
;
cb
->
args
[
0
]
=
t
;
return
skb
->
len
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment