Commit 8747d82d authored by David S. Miller's avatar David S. Miller

Merge branch 'mirred-recurse'

John Hurley says:

====================
Track recursive calls in TC act_mirred

These patches aim to prevent act_mirred causing stack overflow events from
recursively calling packet xmit or receive functions. Such events can
occur with poor TC configuration that causes packets to travel in loops
within the system.

Florian Westphal advises that a recursion crash and packets looping are
separate issues and should be treated as such. David Miller futher points
out that pcpu counters cannot track the precise skb context required to
detect loops. Hence these patches are not aimed at detecting packet loops,
rather, preventing stack flows arising from such loops.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5cdda5f1 e2ca070f
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <net/net_namespace.h> #include <net/net_namespace.h>
/* TC action not accessible from user space */ /* TC action not accessible from user space */
#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1) #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
/* Basic packet classifier frontend definitions. */ /* Basic packet classifier frontend definitions. */
......
...@@ -279,7 +279,7 @@ struct tcf_result { ...@@ -279,7 +279,7 @@ struct tcf_result {
}; };
const struct tcf_proto *goto_tp; const struct tcf_proto *goto_tp;
/* used by the TC_ACT_REINSERT action */ /* used in the skb_tc_reinsert function */
struct { struct {
bool ingress; bool ingress;
struct gnet_stats_queue *qstats; struct gnet_stats_queue *qstats;
......
...@@ -4689,9 +4689,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, ...@@ -4689,9 +4689,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
__skb_push(skb, skb->mac_len); __skb_push(skb, skb->mac_len);
skb_do_redirect(skb); skb_do_redirect(skb);
return NULL; return NULL;
case TC_ACT_REINSERT: case TC_ACT_CONSUMED:
/* this does not scrub the packet, and updates stats on error */
skb_tc_reinsert(skb, &cl_res);
return NULL; return NULL;
default: default:
break; break;
......
...@@ -27,6 +27,9 @@ ...@@ -27,6 +27,9 @@
static LIST_HEAD(mirred_list); static LIST_HEAD(mirred_list);
static DEFINE_SPINLOCK(mirred_list_lock); static DEFINE_SPINLOCK(mirred_list_lock);
#define MIRRED_RECURSION_LIMIT 4
static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
static bool tcf_mirred_is_act_redirect(int action) static bool tcf_mirred_is_act_redirect(int action)
{ {
return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR; return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
...@@ -210,6 +213,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -210,6 +213,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
struct sk_buff *skb2 = skb; struct sk_buff *skb2 = skb;
bool m_mac_header_xmit; bool m_mac_header_xmit;
struct net_device *dev; struct net_device *dev;
unsigned int rec_level;
int retval, err = 0; int retval, err = 0;
bool use_reinsert; bool use_reinsert;
bool want_ingress; bool want_ingress;
...@@ -217,6 +221,14 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -217,6 +221,14 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
int m_eaction; int m_eaction;
int mac_len; int mac_len;
rec_level = __this_cpu_inc_return(mirred_rec_level);
if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
netdev_name(skb->dev));
__this_cpu_dec(mirred_rec_level);
return TC_ACT_SHOT;
}
tcf_lastuse_update(&m->tcf_tm); tcf_lastuse_update(&m->tcf_tm);
bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb); bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
...@@ -277,7 +289,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -277,7 +289,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (use_reinsert) { if (use_reinsert) {
res->ingress = want_ingress; res->ingress = want_ingress;
res->qstats = this_cpu_ptr(m->common.cpu_qstats); res->qstats = this_cpu_ptr(m->common.cpu_qstats);
return TC_ACT_REINSERT; skb_tc_reinsert(skb, res);
__this_cpu_dec(mirred_rec_level);
return TC_ACT_CONSUMED;
} }
} }
...@@ -292,6 +306,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -292,6 +306,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (tcf_mirred_is_act_redirect(m_eaction)) if (tcf_mirred_is_act_redirect(m_eaction))
retval = TC_ACT_SHOT; retval = TC_ACT_SHOT;
} }
__this_cpu_dec(mirred_rec_level);
return retval; return retval;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment