Commit 16085e48 authored by Victor Nogueira's avatar Victor Nogueira Committed by David S. Miller

net/sched: act_mirred: Create function tcf_mirred_to_dev and improve readability

As a preparation for adding block ID to mirred, separate the part of
mirred that redirect/mirrors to a dev into a specific function so that it
can be called by blockcast for each dev.

Also improve readability. Eg. rename use_reinsert to dont_clone and skb2
to skb_to_send.
Co-developed-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Co-developed-by: default avatarPedro Tammela <pctammela@mojatatu.com>
Signed-off-by: default avatarPedro Tammela <pctammela@mojatatu.com>
Signed-off-by: default avatarVictor Nogueira <victor@mojatatu.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a7042cf8
...@@ -225,48 +225,26 @@ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb) ...@@ -225,48 +225,26 @@ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
return err; return err;
} }
TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
const struct tc_action *a, struct net_device *dev,
struct tcf_result *res) const bool m_mac_header_xmit, int m_eaction,
int retval)
{ {
struct tcf_mirred *m = to_mirred(a); struct sk_buff *skb_to_send = skb;
struct sk_buff *skb2 = skb;
bool m_mac_header_xmit;
struct net_device *dev;
unsigned int nest_level;
int retval, err = 0;
bool use_reinsert;
bool want_ingress; bool want_ingress;
bool is_redirect; bool is_redirect;
bool expects_nh; bool expects_nh;
bool at_ingress; bool at_ingress;
int m_eaction; bool dont_clone;
int mac_len; int mac_len;
bool at_nh; bool at_nh;
int err;
nest_level = __this_cpu_inc_return(mirred_nest_level); is_redirect = tcf_mirred_is_act_redirect(m_eaction);
if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
netdev_name(skb->dev));
__this_cpu_dec(mirred_nest_level);
return TC_ACT_SHOT;
}
tcf_lastuse_update(&m->tcf_tm);
tcf_action_update_bstats(&m->common, skb);
m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
m_eaction = READ_ONCE(m->tcfm_eaction);
retval = READ_ONCE(m->tcf_action);
dev = rcu_dereference_bh(m->tcfm_dev);
if (unlikely(!dev)) {
pr_notice_once("tc mirred: target device is gone\n");
goto out;
}
if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n", net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name); dev->name);
err = -ENODEV;
goto out; goto out;
} }
...@@ -274,61 +252,98 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, ...@@ -274,61 +252,98 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
* since we can't easily detect the clsact caller, skip clone only for * since we can't easily detect the clsact caller, skip clone only for
* ingress - that covers the TC S/W datapath. * ingress - that covers the TC S/W datapath.
*/ */
is_redirect = tcf_mirred_is_act_redirect(m_eaction);
at_ingress = skb_at_tc_ingress(skb); at_ingress = skb_at_tc_ingress(skb);
use_reinsert = at_ingress && is_redirect && dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
tcf_mirred_can_reinsert(retval); tcf_mirred_can_reinsert(retval);
if (!use_reinsert) { if (!dont_clone) {
skb2 = skb_clone(skb, GFP_ATOMIC); skb_to_send = skb_clone(skb, GFP_ATOMIC);
if (!skb2) if (!skb_to_send) {
err = -ENOMEM;
goto out; goto out;
}
} }
want_ingress = tcf_mirred_act_wants_ingress(m_eaction); want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
/* All mirred/redirected skbs should clear previous ct info */ /* All mirred/redirected skbs should clear previous ct info */
nf_reset_ct(skb2); nf_reset_ct(skb_to_send);
if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */ if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
skb_dst_drop(skb2); skb_dst_drop(skb_to_send);
expects_nh = want_ingress || !m_mac_header_xmit; expects_nh = want_ingress || !m_mac_header_xmit;
at_nh = skb->data == skb_network_header(skb); at_nh = skb->data == skb_network_header(skb);
if (at_nh != expects_nh) { if (at_nh != expects_nh) {
mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : mac_len = at_ingress ? skb->mac_len :
skb_network_offset(skb); skb_network_offset(skb);
if (expects_nh) { if (expects_nh) {
/* target device/action expect data at nh */ /* target device/action expect data at nh */
skb_pull_rcsum(skb2, mac_len); skb_pull_rcsum(skb_to_send, mac_len);
} else { } else {
/* target device/action expect data at mac */ /* target device/action expect data at mac */
skb_push_rcsum(skb2, mac_len); skb_push_rcsum(skb_to_send, mac_len);
} }
} }
skb2->skb_iif = skb->dev->ifindex; skb_to_send->skb_iif = skb->dev->ifindex;
skb2->dev = dev; skb_to_send->dev = dev;
/* mirror is always swallowed */
if (is_redirect) { if (is_redirect) {
skb_set_redirected(skb2, skb2->tc_at_ingress); if (skb == skb_to_send)
retval = TC_ACT_CONSUMED;
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) { skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
err = tcf_mirred_forward(want_ingress, skb);
if (err) err = tcf_mirred_forward(want_ingress, skb_to_send);
tcf_action_inc_overlimit_qstats(&m->common); } else {
__this_cpu_dec(mirred_nest_level); err = tcf_mirred_forward(want_ingress, skb_to_send);
return TC_ACT_CONSUMED;
}
} }
err = tcf_mirred_forward(want_ingress, skb2);
if (err) { if (err) {
out: out:
tcf_action_inc_overlimit_qstats(&m->common); tcf_action_inc_overlimit_qstats(&m->common);
if (tcf_mirred_is_act_redirect(m_eaction)) if (is_redirect)
retval = TC_ACT_SHOT; retval = TC_ACT_SHOT;
} }
return retval;
}
TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_mirred *m = to_mirred(a);
int retval = READ_ONCE(m->tcf_action);
unsigned int nest_level;
bool m_mac_header_xmit;
struct net_device *dev;
int m_eaction;
nest_level = __this_cpu_inc_return(mirred_nest_level);
if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
netdev_name(skb->dev));
retval = TC_ACT_SHOT;
goto dec_nest_level;
}
tcf_lastuse_update(&m->tcf_tm);
tcf_action_update_bstats(&m->common, skb);
dev = rcu_dereference_bh(m->tcfm_dev);
if (unlikely(!dev)) {
pr_notice_once("tc mirred: target device is gone\n");
tcf_action_inc_overlimit_qstats(&m->common);
goto dec_nest_level;
}
m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
m_eaction = READ_ONCE(m->tcfm_eaction);
retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
retval);
dec_nest_level:
__this_cpu_dec(mirred_nest_level); __this_cpu_dec(mirred_nest_level);
return retval; return retval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment