Commit 3c5aa0bc authored by David S. Miller's avatar David S. Miller

Merge branch 'ipv4-ipv6-mcast-align'

Yuval Mintz says:

====================
ipmr, ip6mr: Align multicast routing for IPv4 & IPv6

Historically ip6mr was based [cut-n-paste] on ipmr and the two have not
diverged too much. Apparently as ipv4 multicast routing is more common
than its ipv6 brethren modifications since then are mostly one-way,
affecting ipmr while leaving ip6mr unchanged.

This series is meant to re-factor both ipmr and ip6mr into having common
structures [and some functionality], adding 2 new common files -
mroute_base.h and ipmr_base.c.

The series begins by bringing ip6mr up to speed to some of the changes
applied in the past to ipmr [#2, #3].
It is then possible to re-factor a lot of the common structures -
vif devices [#1], mr_table [#4] mfc_cache [#6], and use the common
structures in both ipmr and ip6mr.

The rest of the patches re-factor some choice flows used by both ipmr
and ip6mr and eliminates duplicity.

This series would later allow for easy extension of ipmr offloading
to support ip6mr offloading as well, as almost all structures
related to the offloading would be shared between the two protocols.

Changes from previous versions
------------------------------
v2:
  - #6 Corrected reporting logic when hitting an unresolved cache
  - #7 Addressed kernel doc style [Thanks Nikolay]

RFC -> v1:
  - Corrected support for CONFIG_IP{,V6}_MROUTE_MULTIPLE_TABLES
  - Addressed a couple of kbuild test robot issues
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a25724b0 7b0db857
......@@ -126,8 +126,8 @@ mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
ivif = mr_route->mfc4->mfc_parent;
return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
ivif = mr_route->mfc4->_c.mfc_parent;
return mr_route->mfc4->_c.mfc_un.res.ttls[ivif] != 255;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
......@@ -364,7 +364,7 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->mfc4 = mfc;
mr_route->mr_table = mr_table;
for (i = 0; i < MAXVIFS; i++) {
if (mfc->mfc_un.res.ttls[i] != 255) {
if (mfc->_c.mfc_un.res.ttls[i] != 255) {
err = mlxsw_sp_mr_route_evif_link(mr_route,
&mr_table->vifs[i]);
if (err)
......@@ -374,7 +374,8 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
}
}
mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
mlxsw_sp_mr_route_ivif_link(mr_route,
&mr_table->vifs[mfc->_c.mfc_parent]);
mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
return mr_route;
......@@ -418,9 +419,9 @@ static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
if (offload)
mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
mr_route->mfc4->_c.mfc_flags |= MFC_OFFLOAD;
else
mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
mr_route->mfc4->_c.mfc_flags &= ~MFC_OFFLOAD;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
......@@ -943,10 +944,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
if (mr_route->mfc4->mfc_un.res.pkt != packets)
mr_route->mfc4->mfc_un.res.lastuse = jiffies;
mr_route->mfc4->mfc_un.res.pkt = packets;
mr_route->mfc4->mfc_un.res.bytes = bytes;
if (mr_route->mfc4->_c.mfc_un.res.pkt != packets)
mr_route->mfc4->_c.mfc_un.res.lastuse = jiffies;
mr_route->mfc4->_c.mfc_un.res.pkt = packets;
mr_route->mfc4->_c.mfc_un.res.bytes = bytes;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
......
......@@ -4,11 +4,10 @@
#include <linux/in.h>
#include <linux/pim.h>
#include <linux/rhashtable.h>
#include <net/sock.h>
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <uapi/linux/mroute.h>
#include <linux/mroute_base.h>
#ifdef CONFIG_IP_MROUTE
static inline int ip_mroute_opt(int opt)
......@@ -56,18 +55,6 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
}
#endif
struct vif_device {
struct net_device *dev; /* Device we are using */
struct netdev_phys_item_id dev_parent_id; /* Device parent ID */
unsigned long bytes_in,bytes_out;
unsigned long pkt_in,pkt_out; /* Statistics */
unsigned long rate_limit; /* Traffic shaping (NI) */
unsigned char threshold; /* TTL threshold */
unsigned short flags; /* Control flags */
__be32 local,remote; /* Addresses(remote for tunnels)*/
int link; /* Physical interface index */
};
struct vif_entry_notifier_info {
struct fib_notifier_info info;
struct net_device *dev;
......@@ -78,34 +65,6 @@ struct vif_entry_notifier_info {
#define VIFF_STATIC 0x8000
#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
struct mr_table {
struct list_head list;
possible_net_t net;
u32 id;
struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
struct vif_device vif_table[MAXVIFS];
struct rhltable mfc_hash;
struct list_head mfc_cache_list;
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
int mroute_reg_vif_num;
};
/* mfc_flags:
* MFC_STATIC - the entry was added statically (not by a routing daemon)
* MFC_OFFLOAD - the entry was offloaded to the hardware
*/
enum {
MFC_STATIC = BIT(0),
MFC_OFFLOAD = BIT(1),
};
struct mfc_cache_cmp_arg {
__be32 mfc_mcastgrp;
__be32 mfc_origin;
......@@ -113,28 +72,13 @@ struct mfc_cache_cmp_arg {
/**
* struct mfc_cache - multicast routing entries
* @mnode: rhashtable list
* @_c: Common multicast routing information; has to be first [for casting]
* @mfc_mcastgrp: destination multicast group address
* @mfc_origin: source address
* @cmparg: used for rhashtable comparisons
* @mfc_parent: source interface (iif)
* @mfc_flags: entry flags
* @expires: unresolved entry expire time
* @unresolved: unresolved cached skbs
* @last_assert: time of last assert
* @minvif: minimum VIF id
* @maxvif: maximum VIF id
* @bytes: bytes that have passed for this entry
* @pkt: packets that have passed for this entry
* @wrong_if: number of wrong source interface hits
* @lastuse: time of last use of the group (traffic or update)
* @ttls: OIF TTL threshold array
* @refcount: reference count for this entry
* @list: global entry list
* @rcu: used for entry destruction
*/
struct mfc_cache {
struct rhlist_head mnode;
struct mr_mfc _c;
union {
struct {
__be32 mfc_mcastgrp;
......@@ -142,28 +86,6 @@ struct mfc_cache {
};
struct mfc_cache_cmp_arg cmparg;
};
vifi_t mfc_parent;
int mfc_flags;
union {
struct {
unsigned long expires;
struct sk_buff_head unresolved;
} unres;
struct {
unsigned long last_assert;
int minvif;
int maxvif;
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXVIFS];
refcount_t refcount;
} res;
} mfc_un;
struct list_head list;
struct rcu_head rcu;
};
struct mfc_entry_notifier_info {
......@@ -187,12 +109,12 @@ static inline void ipmr_cache_free(struct mfc_cache *mfc_cache)
static inline void ipmr_cache_put(struct mfc_cache *c)
{
if (refcount_dec_and_test(&c->mfc_un.res.refcount))
if (refcount_dec_and_test(&c->_c.mfc_un.res.refcount))
ipmr_cache_free(c);
}
static inline void ipmr_cache_hold(struct mfc_cache *c)
{
refcount_inc(&c->mfc_un.res.refcount);
refcount_inc(&c->_c.mfc_un.res.refcount);
}
#endif
......@@ -7,6 +7,7 @@
#include <linux/skbuff.h> /* for struct sk_buff_head */
#include <net/net_namespace.h>
#include <uapi/linux/mroute6.h>
#include <linux/mroute_base.h>
#ifdef CONFIG_IPV6_MROUTE
static inline int ip6_mroute_opt(int opt)
......@@ -62,57 +63,24 @@ static inline void ip6_mr_cleanup(void)
}
#endif
struct mif_device {
struct net_device *dev; /* Device we are using */
unsigned long bytes_in,bytes_out;
unsigned long pkt_in,pkt_out; /* Statistics */
unsigned long rate_limit; /* Traffic shaping (NI) */
unsigned char threshold; /* TTL threshold */
unsigned short flags; /* Control flags */
int link; /* Physical interface index */
};
#define VIFF_STATIC 0x8000
struct mfc6_cache {
struct list_head list;
struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
struct in6_addr mf6c_origin; /* Source of packet */
mifi_t mf6c_parent; /* Source interface */
int mfc_flags; /* Flags on line */
struct mfc6_cache_cmp_arg {
struct in6_addr mf6c_mcastgrp;
struct in6_addr mf6c_origin;
};
struct mfc6_cache {
struct mr_mfc _c;
union {
struct {
unsigned long expires;
struct sk_buff_head unresolved; /* Unresolved buffers */
} unres;
struct {
unsigned long last_assert;
int minvif;
int maxvif;
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXMIFS]; /* TTL thresholds */
} res;
} mfc_un;
struct in6_addr mf6c_mcastgrp;
struct in6_addr mf6c_origin;
};
struct mfc6_cache_cmp_arg cmparg;
};
};
#define MFC_STATIC 1
#define MFC_NOTIFY 2
#define MFC6_LINES 64
#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
(__force u32)(a)->s6_addr32[1] ^ \
(__force u32)(a)->s6_addr32[2] ^ \
(__force u32)(a)->s6_addr32[3] ^ \
(__force u32)(g)->s6_addr32[0] ^ \
(__force u32)(g)->s6_addr32[1] ^ \
(__force u32)(g)->s6_addr32[2] ^ \
(__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
struct rtmsg;
......@@ -120,12 +88,12 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
#else
static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
return NULL;
return false;
}
static inline int ip6mr_sk_done(struct sock *sk)
{
......
#ifndef __LINUX_MROUTE_BASE_H
#define __LINUX_MROUTE_BASE_H
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
/**
* struct vif_device - interface representor for multicast routing
* @dev: network device being used
* @bytes_in: statistic; bytes ingressing
* @bytes_out: statistic; bytes egresing
* @pkt_in: statistic; packets ingressing
* @pkt_out: statistic; packets egressing
* @rate_limit: Traffic shaping (NI)
* @threshold: TTL threshold
* @flags: Control flags
* @link: Physical interface index
* @dev_parent_id: device parent id
* @local: Local address
* @remote: Remote address for tunnels
*/
struct vif_device {
struct net_device *dev;
unsigned long bytes_in, bytes_out;
unsigned long pkt_in, pkt_out;
unsigned long rate_limit;
unsigned char threshold;
unsigned short flags;
int link;
/* Currently only used by ipmr */
struct netdev_phys_item_id dev_parent_id;
__be32 local, remote;
};
#ifndef MAXVIFS
/* This one is nasty; value is defined in uapi using different symbols for
* mroute and morute6 but both map into same 32.
*/
#define MAXVIFS 32
#endif
#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
/* mfc_flags:
* MFC_STATIC - the entry was added statically (not by a routing daemon)
* MFC_OFFLOAD - the entry was offloaded to the hardware
*/
enum {
MFC_STATIC = BIT(0),
MFC_OFFLOAD = BIT(1),
};
/**
* struct mr_mfc - common multicast routing entries
* @mnode: rhashtable list
* @mfc_parent: source interface (iif)
* @mfc_flags: entry flags
* @expires: unresolved entry expire time
* @unresolved: unresolved cached skbs
* @last_assert: time of last assert
* @minvif: minimum VIF id
* @maxvif: maximum VIF id
* @bytes: bytes that have passed for this entry
* @pkt: packets that have passed for this entry
* @wrong_if: number of wrong source interface hits
* @lastuse: time of last use of the group (traffic or update)
* @ttls: OIF TTL threshold array
* @refcount: reference count for this entry
* @list: global entry list
* @rcu: used for entry destruction
*/
struct mr_mfc {
struct rhlist_head mnode;
unsigned short mfc_parent;
int mfc_flags;
union {
struct {
unsigned long expires;
struct sk_buff_head unresolved;
} unres;
struct {
unsigned long last_assert;
int minvif;
int maxvif;
unsigned long bytes;
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXVIFS];
refcount_t refcount;
} res;
} mfc_un;
struct list_head list;
struct rcu_head rcu;
};
struct mr_table;
/**
* struct mr_table_ops - callbacks and info for protocol-specific ops
* @rht_params: parameters for accessing the MFC hash
* @cmparg_any: a hash key to be used for matching on (*,*) routes
*/
struct mr_table_ops {
const struct rhashtable_params *rht_params;
void *cmparg_any;
};
/**
* struct mr_table - a multicast routing table
* @list: entry within a list of multicast routing tables
* @net: net where this table belongs
* @ops: protocol specific operations
* @id: identifier of the table
* @mroute_sk: socket associated with the table
* @ipmr_expire_timer: timer for handling unresolved routes
* @mfc_unres_queue: list of unresolved MFC entries
* @vif_table: array containing all possible vifs
* @mfc_hash: Hash table of all resolved routes for easy lookup
* @mfc_cache_list: list of resovled routes for possible traversal
* @maxvif: Identifier of highest value vif currently in use
* @cache_resolve_queue_len: current size of unresolved queue
* @mroute_do_assert: Whether to inform userspace on wrong ingress
* @mroute_do_pim: Whether to receive IGMP PIMv1
* @mroute_reg_vif_num: PIM-device vif index
*/
struct mr_table {
struct list_head list;
possible_net_t net;
struct mr_table_ops ops;
u32 id;
struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
struct vif_device vif_table[MAXVIFS];
struct rhltable mfc_hash;
struct list_head mfc_cache_list;
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
int mroute_reg_vif_num;
};
#ifdef CONFIG_IP_MROUTE_COMMON
void vif_device_init(struct vif_device *v,
struct net_device *dev,
unsigned long rate_limit,
unsigned char threshold,
unsigned short flags,
unsigned short get_iflink_mask);
struct mr_table *
mr_table_alloc(struct net *net, u32 id,
struct mr_table_ops *ops,
void (*expire_func)(struct timer_list *t),
void (*table_set)(struct mr_table *mrt,
struct net *net));
/* These actually return 'struct mr_mfc *', but to avoid need for explicit
* castings they simply return void.
*/
void *mr_mfc_find_parent(struct mr_table *mrt,
void *hasharg, int parent);
void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi);
void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg);
int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm);
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
int (*fill)(struct mr_table *mrt,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
unsigned long rate_limit,
unsigned char threshold,
unsigned short flags,
unsigned short get_iflink_mask)
{
}
static inline void *
mr_table_alloc(struct net *net, u32 id,
struct mr_table_ops *ops,
void (*expire_func)(struct timer_list *t),
void (*table_set)(struct mr_table *mrt,
struct net *net))
{
return NULL;
}
static inline void *mr_mfc_find_parent(struct mr_table *mrt,
void *hasharg, int parent)
{
return NULL;
}
static inline void *mr_mfc_find_any_parent(struct mr_table *mrt,
int vifi)
{
return NULL;
}
static inline struct mr_mfc *mr_mfc_find_any(struct mr_table *mrt,
int vifi, void *hasharg)
{
return NULL;
}
static inline int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm)
{
return -EINVAL;
}
static inline int
mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
int (*fill)(struct mr_table *mrt,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock)
{
return -EINVAL;
}
#endif
static inline void *mr_mfc_find(struct mr_table *mrt, void *hasharg)
{
return mr_mfc_find_parent(mrt, hasharg, -1);
}
#ifdef CONFIG_PROC_FS
struct mr_vif_iter {
struct seq_net_private p;
struct mr_table *mrt;
int ct;
};
struct mr_mfc_iter {
struct seq_net_private p;
struct mr_table *mrt;
struct list_head *cache;
/* Lock protecting the mr_table's unresolved queue */
spinlock_t *lock;
};
#ifdef CONFIG_IP_MROUTE_COMMON
void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos);
void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos);
static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? mr_vif_seq_idx(seq_file_net(seq),
seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
/* These actually return 'struct mr_mfc *', but to avoid need for explicit
* castings they simply return void.
*/
void *mr_mfc_seq_idx(struct net *net,
struct mr_mfc_iter *it, loff_t pos);
void *mr_mfc_seq_next(struct seq_file *seq, void *v,
loff_t *pos);
static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
struct mr_table *mrt, spinlock_t *lock)
{
struct mr_mfc_iter *it = seq->private;
it->mrt = mrt;
it->cache = NULL;
it->lock = lock;
return *pos ? mr_mfc_seq_idx(seq_file_net(seq),
seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
{
struct mr_mfc_iter *it = seq->private;
struct mr_table *mrt = it->mrt;
if (it->cache == &mrt->mfc_unres_queue)
spin_unlock_bh(it->lock);
else if (it->cache == &mrt->mfc_cache_list)
rcu_read_unlock();
}
#else
static inline void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter,
loff_t pos)
{
return NULL;
}
static inline void *mr_vif_seq_next(struct seq_file *seq,
void *v, loff_t *pos)
{
return NULL;
}
static inline void *mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
{
return NULL;
}
static inline void *mr_mfc_seq_idx(struct net *net,
struct mr_mfc_iter *it, loff_t pos)
{
return NULL;
}
static inline void *mr_mfc_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
return NULL;
}
static inline void *mr_mfc_seq_start(struct seq_file *seq, loff_t *pos,
struct mr_table *mrt, spinlock_t *lock)
{
return NULL;
}
static inline void mr_mfc_seq_stop(struct seq_file *seq, void *v)
{
}
#endif
#endif
#endif
......@@ -85,7 +85,7 @@ struct netns_ipv6 {
struct sock *mc_autojoin_sk;
#ifdef CONFIG_IPV6_MROUTE
#ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
struct mr6_table *mrt6;
struct mr_table *mrt6;
#else
struct list_head mr6_tables;
struct fib_rules_ops *mr6_rules_ops;
......
......@@ -212,9 +212,14 @@ config NET_IPGRE_BROADCAST
Network), but can be distributed all over the Internet. If you want
to do that, say Y here and to "IP multicast routing" below.
config IP_MROUTE_COMMON
bool
depends on IP_MROUTE || IPV6_MROUTE
config IP_MROUTE
bool "IP: multicast routing"
depends on IP_MULTICAST
select IP_MROUTE_COMMON
help
This is used if you want your machine to act as a router for IP
packets that have several destination addresses. It is needed on the
......
......@@ -20,6 +20,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_IP_MROUTE_COMMON) += ipmr_base.o
obj-$(CONFIG_NET_IPIP) += ipip.o
gre-y := gre_demux.o
obj-$(CONFIG_NET_FOU) += fou.o
......
This diff is collapsed.
/* Linux multicast routing support
* Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
*/
#include <linux/mroute_base.h>
/* Sets everything common except 'dev', since that is done under locking */
void vif_device_init(struct vif_device *v,
struct net_device *dev,
unsigned long rate_limit,
unsigned char threshold,
unsigned short flags,
unsigned short get_iflink_mask)
{
v->dev = NULL;
v->bytes_in = 0;
v->bytes_out = 0;
v->pkt_in = 0;
v->pkt_out = 0;
v->rate_limit = rate_limit;
v->flags = flags;
v->threshold = threshold;
if (v->flags & get_iflink_mask)
v->link = dev_get_iflink(dev);
else
v->link = dev->ifindex;
}
EXPORT_SYMBOL(vif_device_init);
struct mr_table *
mr_table_alloc(struct net *net, u32 id,
struct mr_table_ops *ops,
void (*expire_func)(struct timer_list *t),
void (*table_set)(struct mr_table *mrt,
struct net *net))
{
struct mr_table *mrt;
mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
if (!mrt)
return NULL;
mrt->id = id;
write_pnet(&mrt->net, net);
mrt->ops = *ops;
rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
INIT_LIST_HEAD(&mrt->mfc_cache_list);
INIT_LIST_HEAD(&mrt->mfc_unres_queue);
timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
mrt->mroute_reg_vif_num = -1;
table_set(mrt, net);
return mrt;
}
EXPORT_SYMBOL(mr_table_alloc);
void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
{
struct rhlist_head *tmp, *list;
struct mr_mfc *c;
list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
rhl_for_each_entry_rcu(c, tmp, list, mnode)
if (parent == -1 || parent == c->mfc_parent)
return c;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_find_parent);
void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
{
struct rhlist_head *tmp, *list;
struct mr_mfc *c;
list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
*mrt->ops.rht_params);
rhl_for_each_entry_rcu(c, tmp, list, mnode)
if (c->mfc_un.res.ttls[vifi] < 255)
return c;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_find_any_parent);
void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
{
struct rhlist_head *tmp, *list;
struct mr_mfc *c, *proxy;
list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
rhl_for_each_entry_rcu(c, tmp, list, mnode) {
if (c->mfc_un.res.ttls[vifi] < 255)
return c;
/* It's ok if the vifi is part of the static tree */
proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
return c;
}
return mr_mfc_find_any_parent(mrt, vifi);
}
EXPORT_SYMBOL(mr_mfc_find_any);
#ifdef CONFIG_PROC_FS
void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
{
struct mr_table *mrt = iter->mrt;
for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
if (!VIF_EXISTS(mrt, iter->ct))
continue;
if (pos-- == 0)
return &mrt->vif_table[iter->ct];
}
return NULL;
}
EXPORT_SYMBOL(mr_vif_seq_idx);
void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct mr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct mr_table *mrt = iter->mrt;
++*pos;
if (v == SEQ_START_TOKEN)
return mr_vif_seq_idx(net, iter, 0);
while (++iter->ct < mrt->maxvif) {
if (!VIF_EXISTS(mrt, iter->ct))
continue;
return &mrt->vif_table[iter->ct];
}
return NULL;
}
EXPORT_SYMBOL(mr_vif_seq_next);
void *mr_mfc_seq_idx(struct net *net,
struct mr_mfc_iter *it, loff_t pos)
{
struct mr_table *mrt = it->mrt;
struct mr_mfc *mfc;
rcu_read_lock();
it->cache = &mrt->mfc_cache_list;
list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
if (pos-- == 0)
return mfc;
rcu_read_unlock();
spin_lock_bh(it->lock);
it->cache = &mrt->mfc_unres_queue;
list_for_each_entry(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
spin_unlock_bh(it->lock);
it->cache = NULL;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_seq_idx);
void *mr_mfc_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
struct mr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
struct mr_table *mrt = it->mrt;
struct mr_mfc *c = v;
++*pos;
if (v == SEQ_START_TOKEN)
return mr_mfc_seq_idx(net, seq->private, 0);
if (c->list.next != it->cache)
return list_entry(c->list.next, struct mr_mfc, list);
if (it->cache == &mrt->mfc_unres_queue)
goto end_of_list;
/* exhausted cache_array, show unresolved */
rcu_read_unlock();
it->cache = &mrt->mfc_unres_queue;
spin_lock_bh(it->lock);
if (!list_empty(it->cache))
return list_first_entry(it->cache, struct mr_mfc, list);
end_of_list:
spin_unlock_bh(it->lock);
it->cache = NULL;
return NULL;
}
EXPORT_SYMBOL(mr_mfc_seq_next);
#endif
int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mr_mfc *c, struct rtmsg *rtm)
{
struct rta_mfc_stats mfcs;
struct nlattr *mp_attr;
struct rtnexthop *nhp;
unsigned long lastuse;
int ct;
/* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mfc_parent >= MAXVIFS) {
rtm->rtm_flags |= RTNH_F_UNRESOLVED;
return -ENOENT;
}
if (VIF_EXISTS(mrt, c->mfc_parent) &&
nla_put_u32(skb, RTA_IIF,
mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
return -EMSGSIZE;
if (c->mfc_flags & MFC_OFFLOAD)
rtm->rtm_flags |= RTNH_F_OFFLOAD;
mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
if (!mp_attr)
return -EMSGSIZE;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
struct vif_device *vif;
nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
if (!nhp) {
nla_nest_cancel(skb, mp_attr);
return -EMSGSIZE;
}
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
vif = &mrt->vif_table[ct];
nhp->rtnh_ifindex = vif->dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
nla_nest_end(skb, mp_attr);
lastuse = READ_ONCE(c->mfc_un.res.lastuse);
lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
RTA_PAD))
return -EMSGSIZE;
rtm->rtm_type = RTN_MULTICAST;
return 1;
}
EXPORT_SYMBOL(mr_fill_mroute);
int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
struct mr_table *(*iter)(struct net *net,
struct mr_table *mrt),
int (*fill)(struct mr_table *mrt,
struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c,
int cmd, int flags),
spinlock_t *lock)
{
unsigned int t = 0, e = 0, s_t = cb->args[0], s_e = cb->args[1];
struct net *net = sock_net(skb->sk);
struct mr_table *mrt;
struct mr_mfc *mfc;
rcu_read_lock();
for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
if (t < s_t)
goto next_table;
list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
if (e < s_e)
goto next_entry;
if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, mfc,
RTM_NEWROUTE, NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
}
e = 0;
s_e = 0;
spin_lock_bh(lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
goto next_entry2;
if (fill(mrt, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, mfc,
RTM_NEWROUTE, NLM_F_MULTI) < 0) {
spin_unlock_bh(lock);
goto done;
}
next_entry2:
e++;
}
spin_unlock_bh(lock);
e = 0;
s_e = 0;
next_table:
t++;
}
done:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = t;
return skb->len;
}
EXPORT_SYMBOL(mr_rtm_dumproute);
......@@ -278,6 +278,7 @@ config IPV6_SUBTREES
config IPV6_MROUTE
bool "IPv6: multicast routing"
depends on IPV6
select IP_MROUTE_COMMON
---help---
Experimental support for IPv6 multicast forwarding.
If unsure, say N.
......
......@@ -71,7 +71,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
((mroute6_socket(net, skb) &&
((mroute6_is_socket(net, skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr))) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment