Commit e5cf1baf authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller

act_mirred: use TC_ACT_REINSERT when possible

When mirred is invoked from the ingress path, and it wants to redirect
the processed packet, it can now use the TC_ACT_REINSERT action,
filling the tcf_result accordingly, and avoiding a per packet
skb_clone().

Overall this gives a ~10% improvement in forwarding performance for the
TC S/W data path and TC S/W performances are now comparable to the
kernel openvswitch datapath.

v1 -> v2: use ACT_MIRRED instead of ACT_REDIRECT
v2 -> v3: updated after action rename, fixed typo into the commit
	message
v3 -> v4: updated again after action rename, added more comments to
	the code (JiriP), skip the optimization if the control action
	need to touch the tcf_result (Paolo)
v4 -> v5: fix sparse warning (kbuild bot)
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Acked-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cd11b164
......@@ -25,6 +25,7 @@
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <linux/tc_act/tc_mirred.h>
#include <net/tc_act/tc_mirred.h>
......@@ -49,6 +50,18 @@ static bool tcf_mirred_act_wants_ingress(int action)
}
}
static bool tcf_mirred_can_reinsert(int action)
{
switch (action) {
case TC_ACT_SHOT:
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
return true;
}
return false;
}
static void tcf_mirred_release(struct tc_action *a)
{
struct tcf_mirred *m = to_mirred(a);
......@@ -171,10 +184,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
struct tcf_mirred *m = to_mirred(a);
struct sk_buff *skb2 = skb;
bool m_mac_header_xmit;
struct net_device *dev;
struct sk_buff *skb2;
int retval, err = 0;
bool use_reinsert;
bool want_ingress;
bool is_redirect;
int m_eaction;
int mac_len;
......@@ -196,16 +212,25 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
goto out;
}
/* we could easily avoid the clone only if called by ingress and clsact;
* since we can't easily detect the clsact caller, skip clone only for
* ingress - that covers the TC S/W datapath.
*/
is_redirect = tcf_mirred_is_act_redirect(m_eaction);
use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
tcf_mirred_can_reinsert(retval);
if (!use_reinsert) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
goto out;
}
/* If action's target direction differs than filter's direction,
* and devices expect a mac header on xmit, then mac push/pull is
* needed.
*/
if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) &&
m_mac_header_xmit) {
want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) {
if (!skb_at_tc_ingress(skb)) {
/* caught at egress, act ingress: pull mac */
mac_len = skb_network_header(skb) - skb_mac_header(skb);
......@@ -216,15 +241,23 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
}
}
skb2->skb_iif = skb->dev->ifindex;
skb2->dev = dev;
/* mirror is always swallowed */
if (tcf_mirred_is_act_redirect(m_eaction)) {
if (is_redirect) {
skb2->tc_redirected = 1;
skb2->tc_from_ingress = skb2->tc_at_ingress;
/* let's the caller reinsert the packet, if possible */
if (use_reinsert) {
res->ingress = want_ingress;
res->qstats = this_cpu_ptr(m->common.cpu_qstats);
return TC_ACT_REINSERT;
}
}
skb2->skb_iif = skb->dev->ifindex;
skb2->dev = dev;
if (!tcf_mirred_act_wants_ingress(m_eaction))
if (!want_ingress)
err = dev_queue_xmit(skb2);
else
err = netif_receive_skb(skb2);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment