Commit 18fb0b46 authored by David S. Miller's avatar David S. Miller
parents 39cebdb7 6d9c153a
......@@ -352,7 +352,7 @@ static void __exit dsa_loop_exit(void)
mdio_driver_unregister(&dsa_loop_drv);
for (i = 0; i < NUM_FIXED_PHYS; i++)
if (phydevs[i])
if (!IS_ERR(phydevs[i]))
fixed_phy_unregister(phydevs[i]);
}
module_exit(dsa_loop_exit);
......
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
#include <linux/percpu_counter.h>
struct netns_frags {
/* The percpu_counter "mem" need to be cacheline aligned.
* mem.count must not share cacheline with other writers
*/
struct percpu_counter mem ____cacheline_aligned_in_smp;
/* Keep atomic mem on separate cachelines in structs that include it */
atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
......@@ -108,15 +103,10 @@ struct inet_frags {
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
static inline int inet_frags_init_net(struct netns_frags *nf)
{
return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
}
static inline void inet_frags_uninit_net(struct netns_frags *nf)
static inline void inet_frags_init_net(struct netns_frags *nf)
{
percpu_counter_destroy(&nf->mem);
atomic_set(&nf->mem, 0);
}
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
......@@ -140,31 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */
/* The default percpu_counter batch size is not big enough to scale to
* fragmentation mem acct sizes.
* The mem size of a 64K fragment is approx:
* (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
*/
static unsigned int frag_percpu_counter_batch = 130000;
static inline int frag_mem_limit(struct netns_frags *nf)
{
return percpu_counter_read(&nf->mem);
return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
atomic_add(i, &nf->mem);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
return percpu_counter_sum_positive(&nf->mem);
return atomic_read(&nf->mem);
}
/* RFC 3168 support :
......
......@@ -189,10 +189,11 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
rcu_read_lock();
err = ip_route_input_noref(skb, dst, src, tos, devin);
if (!err)
if (!err) {
skb_dst_force_safe(skb);
if (!skb_dst(skb))
err = -EINVAL;
if (!skb_dst(skb))
err = -EINVAL;
}
rcu_read_unlock();
return err;
......
......@@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
res = inet_frags_init_net(&ieee802154_lowpan->frags);
if (res)
return res;
res = lowpan_frags_ns_sysctl_register(net);
if (res)
inet_frags_uninit_net(&ieee802154_lowpan->frags);
return res;
inet_frags_init_net(&ieee802154_lowpan->frags);
return lowpan_frags_ns_sysctl_register(net);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
......
......@@ -234,10 +234,8 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
cond_resched();
if (read_seqretry(&f->rnd_seqlock, seq) ||
percpu_counter_sum(&nf->mem))
sum_frag_mem_limit(nf))
goto evict_again;
percpu_counter_destroy(&nf->mem);
}
EXPORT_SYMBOL(inet_frags_exit_net);
......
......@@ -844,8 +844,6 @@ static void __init ip4_frags_ctl_register(void)
static int __net_init ipv4_frags_init_net(struct net *net)
{
int res;
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
......@@ -871,13 +869,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
net->ipv4.frags.max_dist = 64;
res = inet_frags_init_net(&net->ipv4.frags);
if (res)
return res;
res = ip4_frags_ns_ctl_register(net);
if (res)
inet_frags_uninit_net(&net->ipv4.frags);
return res;
inet_frags_init_net(&net->ipv4.frags);
return ip4_frags_ns_ctl_register(net);
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
......
......@@ -622,18 +622,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
static int nf_ct_net_init(struct net *net)
{
int res;
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
res = inet_frags_init_net(&net->nf_frag.frags);
if (res)
return res;
res = nf_ct_frag6_sysctl_register(net);
if (res)
inet_frags_uninit_net(&net->nf_frag.frags);
return res;
inet_frags_init_net(&net->nf_frag.frags);
return nf_ct_frag6_sysctl_register(net);
}
static void nf_ct_net_exit(struct net *net)
......
......@@ -714,19 +714,13 @@ static void ip6_frags_sysctl_unregister(void)
static int __net_init ipv6_frags_init_net(struct net *net)
{
int res;
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
res = inet_frags_init_net(&net->ipv6.frags);
if (res)
return res;
res = ip6_frags_ns_sysctl_register(net);
if (res)
inet_frags_uninit_net(&net->ipv6.frags);
return res;
inet_frags_init_net(&net->ipv6.frags);
return ip6_frags_ns_sysctl_register(net);
}
static void __net_exit ipv6_frags_exit_net(struct net *net)
......
......@@ -329,13 +329,21 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
struct hlist_head *g_head;
struct hlist_head *head;
struct l2tp_net *pn;
int err;
head = l2tp_session_id_hash(tunnel, session->session_id);
write_lock_bh(&tunnel->hlist_lock);
if (!tunnel->acpt_newsess) {
err = -ENODEV;
goto err_tlock;
}
hlist_for_each_entry(session_walk, head, hlist)
if (session_walk->session_id == session->session_id)
goto exist;
if (session_walk->session_id == session->session_id) {
err = -EEXIST;
goto err_tlock;
}
if (tunnel->version == L2TP_HDR_VER_3) {
pn = l2tp_pernet(tunnel->l2tp_net);
......@@ -343,12 +351,21 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
session->session_id);
spin_lock_bh(&pn->l2tp_session_hlist_lock);
hlist_for_each_entry(session_walk, g_head, global_hlist)
if (session_walk->session_id == session->session_id)
goto exist_glob;
if (session_walk->session_id == session->session_id) {
err = -EEXIST;
goto err_tlock_pnlock;
}
l2tp_tunnel_inc_refcount(tunnel);
sock_hold(tunnel->sock);
hlist_add_head_rcu(&session->global_hlist, g_head);
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
} else {
l2tp_tunnel_inc_refcount(tunnel);
sock_hold(tunnel->sock);
}
hlist_add_head(&session->hlist, head);
......@@ -356,12 +373,12 @@ static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
return 0;
exist_glob:
err_tlock_pnlock:
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
exist:
err_tlock:
write_unlock_bh(&tunnel->hlist_lock);
return -EEXIST;
return err;
}
/* Lookup a tunnel by id
......@@ -1251,7 +1268,6 @@ static void l2tp_tunnel_destruct(struct sock *sk)
/* Remove hooks into tunnel socket */
sk->sk_destruct = tunnel->old_sk_destruct;
sk->sk_user_data = NULL;
tunnel->sock = NULL;
/* Remove the tunnel struct from the tunnel list */
pn = l2tp_pernet(tunnel->l2tp_net);
......@@ -1261,6 +1277,8 @@ static void l2tp_tunnel_destruct(struct sock *sk)
atomic_dec(&l2tp_tunnel_count);
l2tp_tunnel_closeall(tunnel);
tunnel->sock = NULL;
l2tp_tunnel_dec_refcount(tunnel);
/* Call the original destructor */
......@@ -1285,6 +1303,7 @@ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
tunnel->name);
write_lock_bh(&tunnel->hlist_lock);
tunnel->acpt_newsess = false;
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
again:
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
......@@ -1581,6 +1600,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
rwlock_init(&tunnel->hlist_lock);
tunnel->acpt_newsess = true;
/* The net we belong to */
tunnel->l2tp_net = net;
......@@ -1829,11 +1849,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
return ERR_PTR(err);
}
l2tp_tunnel_inc_refcount(tunnel);
/* Ensure tunnel socket isn't deleted */
sock_hold(tunnel->sock);
/* Ignore management session in session count value */
if (session->session_id != 0)
atomic_inc(&l2tp_session_count);
......
......@@ -162,6 +162,10 @@ struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
bool acpt_newsess; /* Indicates whether this
* tunnel accepts new sessions.
* Protected by hlist_lock.
*/
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
* hashed by id */
......@@ -197,7 +201,9 @@ struct l2tp_tunnel {
};
struct l2tp_nl_cmd_ops {
int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
struct l2tp_session_cfg *cfg);
int (*session_delete)(struct l2tp_session *session);
};
......
......@@ -262,24 +262,19 @@ static void l2tp_eth_adjust_mtu(struct l2tp_tunnel *tunnel,
dev->needed_headroom += session->hdr_len;
}
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
struct l2tp_session_cfg *cfg)
{
unsigned char name_assign_type;
struct net_device *dev;
char name[IFNAMSIZ];
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
struct l2tp_eth_net *pn;
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (!tunnel) {
rc = -ENODEV;
goto out;
}
if (cfg->ifname) {
strlcpy(name, cfg->ifname, IFNAMSIZ);
name_assign_type = NET_NAME_USER;
......
......@@ -643,10 +643,10 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
break;
}
ret = -EPROTONOSUPPORT;
if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
session_id, peer_session_id, &cfg);
ret = l2tp_nl_cmd_ops[cfg.pw_type]->session_create(net, tunnel,
session_id,
peer_session_id,
&cfg);
if (ret >= 0) {
session = l2tp_session_get(net, tunnel, session_id, false);
......
......@@ -788,25 +788,20 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
#ifdef CONFIG_L2TP_V3
/* Called when creating sessions via the netlink interface.
*/
static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
/* Called when creating sessions via the netlink interface. */
static int pppol2tp_session_create(struct net *net, struct l2tp_tunnel *tunnel,
u32 session_id, u32 peer_session_id,
struct l2tp_session_cfg *cfg)
{
int error;
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct pppol2tp_session *ps;
tunnel = l2tp_tunnel_find(net, tunnel_id);
/* Error if we can't find the tunnel */
error = -ENOENT;
if (tunnel == NULL)
goto out;
/* Error if tunnel socket is not prepped */
if (tunnel->sock == NULL)
if (!tunnel->sock) {
error = -ENOENT;
goto out;
}
/* Default MTU values. */
if (cfg->mtu == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment