Commit 92c075db authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

net: sched: fix tc_should_offload for specific clsact classes

When offloading classifiers such as u32 or flower to hardware, and the
qdisc is clsact (TC_H_CLSACT), then we need to differentiate its classes,
since not all of them handle ingress, therefore we must leave those in
software path. Add a .tcf_cl_offload() callback, so we can generically
handle them, tested on ixgbe.

Fixes: 10cbc684 ("net/sched: cls_flower: Hardware offloaded filters statistics support")
Fixes: 5b33f488 ("net/flower: Introduce hardware offload support")
Fixes: a1b7c5fd ("net: sched: add cls_u32 offload hooks for netdevs")
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarJohn Fastabend <john.r.fastabend@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a03e6fe5
...@@ -392,16 +392,20 @@ struct tc_cls_u32_offload { ...@@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
}; };
}; };
static inline bool tc_should_offload(struct net_device *dev, u32 flags) static inline bool tc_should_offload(const struct net_device *dev,
const struct tcf_proto *tp, u32 flags)
{ {
const struct Qdisc *sch = tp->q;
const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
if (!(dev->features & NETIF_F_HW_TC)) if (!(dev->features & NETIF_F_HW_TC))
return false; return false;
if (flags & TCA_CLS_FLAGS_SKIP_HW) if (flags & TCA_CLS_FLAGS_SKIP_HW)
return false; return false;
if (!dev->netdev_ops->ndo_setup_tc) if (!dev->netdev_ops->ndo_setup_tc)
return false; return false;
if (cops && cops->tcf_cl_offload)
return cops->tcf_cl_offload(tp->classid);
return true; return true;
} }
......
...@@ -168,6 +168,7 @@ struct Qdisc_class_ops { ...@@ -168,6 +168,7 @@ struct Qdisc_class_ops {
/* Filter manipulation */ /* Filter manipulation */
struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid); u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long); void (*unbind_tcf)(struct Qdisc *, unsigned long);
......
...@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie) ...@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
struct tc_cls_flower_offload offload = {0}; struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc; struct tc_to_netdev tc;
if (!tc_should_offload(dev, 0)) if (!tc_should_offload(dev, tp, 0))
return; return;
offload.command = TC_CLSFLOWER_DESTROY; offload.command = TC_CLSFLOWER_DESTROY;
...@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp, ...@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
struct tc_cls_flower_offload offload = {0}; struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc; struct tc_to_netdev tc;
if (!tc_should_offload(dev, flags)) if (!tc_should_offload(dev, tp, flags))
return; return;
offload.command = TC_CLSFLOWER_REPLACE; offload.command = TC_CLSFLOWER_REPLACE;
...@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) ...@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload offload = {0}; struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc; struct tc_to_netdev tc;
if (!tc_should_offload(dev, 0)) if (!tc_should_offload(dev, tp, 0))
return; return;
offload.command = TC_CLSFLOWER_STATS; offload.command = TC_CLSFLOWER_STATS;
......
...@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) ...@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
offload.type = TC_SETUP_CLSU32; offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload; offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, 0)) { if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle; offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
...@@ -457,7 +457,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, ...@@ -457,7 +457,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
struct tc_to_netdev offload; struct tc_to_netdev offload;
int err; int err;
if (!tc_should_offload(dev, flags)) if (!tc_should_offload(dev, tp, flags))
return tc_skip_sw(flags) ? -EINVAL : 0; return tc_skip_sw(flags) ? -EINVAL : 0;
offload.type = TC_SETUP_CLSU32; offload.type = TC_SETUP_CLSU32;
...@@ -485,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) ...@@ -485,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
offload.type = TC_SETUP_CLSU32; offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload; offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, 0)) { if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
offload.cls_u32->hnode.divisor = h->divisor; offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle; offload.cls_u32->hnode.handle = h->handle;
...@@ -508,7 +508,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, ...@@ -508,7 +508,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
offload.type = TC_SETUP_CLSU32; offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload; offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, flags)) { if (tc_should_offload(dev, tp, flags)) {
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
offload.cls_u32->knode.handle = n->handle; offload.cls_u32->knode.handle = n->handle;
offload.cls_u32->knode.fshift = n->fshift; offload.cls_u32->knode.fshift = n->fshift;
......
...@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) ...@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
return TC_H_MIN(classid) + 1; return TC_H_MIN(classid) + 1;
} }
static bool ingress_cl_offload(u32 classid)
{
return true;
}
static unsigned long ingress_bind_filter(struct Qdisc *sch, static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid) unsigned long parent, u32 classid)
{ {
...@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = { ...@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
.put = ingress_put, .put = ingress_put,
.walk = ingress_walk, .walk = ingress_walk,
.tcf_chain = ingress_find_tcf, .tcf_chain = ingress_find_tcf,
.tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter, .bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put, .unbind_tcf = ingress_put,
}; };
...@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) ...@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
} }
} }
static bool clsact_cl_offload(u32 classid)
{
return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
}
static unsigned long clsact_bind_filter(struct Qdisc *sch, static unsigned long clsact_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid) unsigned long parent, u32 classid)
{ {
...@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { ...@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
.put = ingress_put, .put = ingress_put,
.walk = ingress_walk, .walk = ingress_walk,
.tcf_chain = clsact_find_tcf, .tcf_chain = clsact_find_tcf,
.tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter, .bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put, .unbind_tcf = ingress_put,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment