Commit d853d112 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-let-the-offloader-decide-what-to-offload'

Jiri Pirko says:

====================
net: sched: let the offloader decide what to offload

Currently there is a Qdisc_class_ops->tcf_cl_offload callback
that is called to find out if cls would offload rule or not.
This is only supported by sch_ingress and sch_clsact.
So the Qdisc are to decide. However, the driver knows what is he
able to offload, so move the decision making to drivers completely.
Just pass classid there and provide set of helpers to allow
identification of qdisc.

As a side effect, this actually allows clsact egress rules
offload in mlxsw.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c5ebc440 7b06e8ae
...@@ -2892,7 +2892,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) ...@@ -2892,7 +2892,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int cxgb_setup_tc_cls_u32(struct net_device *dev, static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32) struct tc_cls_u32_offload *cls_u32)
{ {
if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || if (is_classid_clsact_ingress(cls_u32->common.classid) ||
cls_u32->common.chain_index) cls_u32->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -9230,7 +9230,7 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev, ...@@ -9230,7 +9230,7 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev,
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || if (is_classid_clsact_ingress(cls_u32->common.classid) ||
cls_u32->common.chain_index) cls_u32->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -3031,7 +3031,7 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, ...@@ -3031,7 +3031,7 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || if (is_classid_clsact_ingress(cls_flower->common.classid) ||
cls_flower->common.chain_index) cls_flower->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -657,7 +657,7 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, ...@@ -657,7 +657,7 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || if (is_classid_clsact_ingress(cls_flower->common.classid) ||
cls_flower->common.chain_index) cls_flower->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -1696,7 +1696,14 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1696,7 +1696,14 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f) struct tc_cls_matchall_offload *f)
{ {
bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); bool ingress;
if (is_classid_clsact_ingress(f->common.classid))
ingress = true;
else if (is_classid_clsact_egress(f->common.classid))
ingress = false;
else
return -EOPNOTSUPP;
if (f->common.chain_index) if (f->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1717,7 +1724,14 @@ static int ...@@ -1717,7 +1724,14 @@ static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_flower_offload *f) struct tc_cls_flower_offload *f)
{ {
bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); bool ingress;
if (is_classid_clsact_ingress(f->common.classid))
ingress = true;
else if (is_classid_clsact_egress(f->common.classid))
ingress = false;
else
return -EOPNOTSUPP;
if (f->common.chain_index) if (f->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -127,7 +127,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, ...@@ -127,7 +127,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) ||
TC_H_MAJ(cls_bpf->common.handle) != TC_H_MAJ(TC_H_INGRESS) || is_classid_clsact_ingress(cls_bpf->common.classid) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) || cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index) cls_bpf->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -390,7 +390,7 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, ...@@ -390,7 +390,7 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *cls_flower = type_data; struct tc_cls_flower_offload *cls_flower = type_data;
if (type != TC_SETUP_CLSFLOWER || if (type != TC_SETUP_CLSFLOWER ||
TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || is_classid_clsact_ingress(cls_flower->common.classid) ||
!eth_proto_is_802_3(cls_flower->common.protocol) || !eth_proto_is_802_3(cls_flower->common.protocol) ||
cls_flower->common.chain_index) cls_flower->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -406,20 +406,20 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) ...@@ -406,20 +406,20 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
#endif /* CONFIG_NET_CLS_IND */ #endif /* CONFIG_NET_CLS_IND */
struct tc_cls_common_offload { struct tc_cls_common_offload {
u32 handle;
u32 chain_index; u32 chain_index;
__be16 protocol; __be16 protocol;
u32 prio; u32 prio;
u32 classid;
}; };
static inline void static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
const struct tcf_proto *tp) const struct tcf_proto *tp)
{ {
cls_common->handle = tp->q->handle;
cls_common->chain_index = tp->chain->index; cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol; cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio; cls_common->prio = tp->prio;
cls_common->classid = tp->classid;
} }
struct tc_cls_u32_knode { struct tc_cls_u32_knode {
...@@ -457,19 +457,12 @@ struct tc_cls_u32_offload { ...@@ -457,19 +457,12 @@ struct tc_cls_u32_offload {
}; };
}; };
static inline bool tc_can_offload(const struct net_device *dev, static inline bool tc_can_offload(const struct net_device *dev)
const struct tcf_proto *tp)
{ {
const struct Qdisc *sch = tp->q;
const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
if (!(dev->features & NETIF_F_HW_TC)) if (!(dev->features & NETIF_F_HW_TC))
return false; return false;
if (!dev->netdev_ops->ndo_setup_tc) if (!dev->netdev_ops->ndo_setup_tc)
return false; return false;
if (cops && cops->tcf_cl_offload)
return cops->tcf_cl_offload(tp->classid);
return true; return true;
} }
...@@ -478,12 +471,11 @@ static inline bool tc_skip_hw(u32 flags) ...@@ -478,12 +471,11 @@ static inline bool tc_skip_hw(u32 flags)
return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
} }
static inline bool tc_should_offload(const struct net_device *dev, static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
const struct tcf_proto *tp, u32 flags)
{ {
if (tc_skip_hw(flags)) if (tc_skip_hw(flags))
return false; return false;
return tc_can_offload(dev, tp); return tc_can_offload(dev);
} }
static inline bool tc_skip_sw(u32 flags) static inline bool tc_skip_sw(u32 flags)
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <net/sch_generic.h> #include <net/sch_generic.h>
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000 #define DEFAULT_TX_QUEUE_LEN 1000
...@@ -132,4 +133,17 @@ static inline unsigned int psched_mtu(const struct net_device *dev) ...@@ -132,4 +133,17 @@ static inline unsigned int psched_mtu(const struct net_device *dev)
return dev->mtu + dev->hard_header_len; return dev->mtu + dev->hard_header_len;
} }
static inline bool is_classid_clsact_ingress(u32 classid)
{
/* This also returns true for ingress qdisc */
return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS);
}
static inline bool is_classid_clsact_egress(u32 classid)
{
return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS);
}
#endif #endif
...@@ -156,7 +156,6 @@ struct Qdisc_class_ops { ...@@ -156,7 +156,6 @@ struct Qdisc_class_ops {
/* Filter manipulation */ /* Filter manipulation */
struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid); u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long); void (*unbind_tcf)(struct Qdisc *, unsigned long);
......
...@@ -914,7 +914,14 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, ...@@ -914,7 +914,14 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls) struct tc_cls_matchall_offload *cls)
{ {
bool ingress = TC_H_MAJ(cls->common.handle) == TC_H_MAJ(TC_H_INGRESS); bool ingress;
if (is_classid_clsact_ingress(cls->common.classid))
ingress = true;
else if (is_classid_clsact_egress(cls->common.classid))
ingress = false;
else
return -EOPNOTSUPP;
if (cls->common.chain_index) if (cls->common.chain_index)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -178,7 +178,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, ...@@ -178,7 +178,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
(oldprog && tc_skip_sw(oldprog->gen_flags)); (oldprog && tc_skip_sw(oldprog->gen_flags));
if (oldprog && oldprog->offloaded) { if (oldprog && oldprog->offloaded) {
if (tc_should_offload(dev, tp, prog->gen_flags)) { if (tc_should_offload(dev, prog->gen_flags)) {
cmd = TC_CLSBPF_REPLACE; cmd = TC_CLSBPF_REPLACE;
} else if (!tc_skip_sw(prog->gen_flags)) { } else if (!tc_skip_sw(prog->gen_flags)) {
obj = oldprog; obj = oldprog;
...@@ -187,7 +187,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, ...@@ -187,7 +187,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
return -EINVAL; return -EINVAL;
} }
} else { } else {
if (!tc_should_offload(dev, tp, prog->gen_flags)) if (!tc_should_offload(dev, prog->gen_flags))
return skip_sw ? -EINVAL : 0; return skip_sw ? -EINVAL : 0;
cmd = TC_CLSBPF_ADD; cmd = TC_CLSBPF_ADD;
} }
......
...@@ -227,7 +227,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) ...@@ -227,7 +227,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {}; struct tc_cls_flower_offload cls_flower = {};
struct net_device *dev = f->hw_dev; struct net_device *dev = f->hw_dev;
if (!tc_can_offload(dev, tp)) if (!tc_can_offload(dev))
return; return;
tc_cls_common_offload_init(&cls_flower.common, tp); tc_cls_common_offload_init(&cls_flower.common, tp);
...@@ -246,9 +246,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, ...@@ -246,9 +246,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
struct tc_cls_flower_offload cls_flower = {}; struct tc_cls_flower_offload cls_flower = {};
int err; int err;
if (!tc_can_offload(dev, tp)) { if (!tc_can_offload(dev)) {
if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) || if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
(f->hw_dev && !tc_can_offload(f->hw_dev, tp))) { (f->hw_dev && !tc_can_offload(f->hw_dev))) {
f->hw_dev = dev; f->hw_dev = dev;
return tc_skip_sw(f->flags) ? -EINVAL : 0; return tc_skip_sw(f->flags) ? -EINVAL : 0;
} }
...@@ -281,7 +281,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) ...@@ -281,7 +281,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {}; struct tc_cls_flower_offload cls_flower = {};
struct net_device *dev = f->hw_dev; struct net_device *dev = f->hw_dev;
if (!tc_can_offload(dev, tp)) if (!tc_can_offload(dev))
return; return;
tc_cls_common_offload_init(&cls_flower.common, tp); tc_cls_common_offload_init(&cls_flower.common, tp);
......
...@@ -92,7 +92,7 @@ static void mall_destroy(struct tcf_proto *tp) ...@@ -92,7 +92,7 @@ static void mall_destroy(struct tcf_proto *tp)
if (!head) if (!head)
return; return;
if (tc_should_offload(dev, tp, head->flags)) if (tc_should_offload(dev, head->flags))
mall_destroy_hw_filter(tp, head, (unsigned long) head); mall_destroy_hw_filter(tp, head, (unsigned long) head);
call_rcu(&head->rcu, mall_destroy_rcu); call_rcu(&head->rcu, mall_destroy_rcu);
...@@ -172,7 +172,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, ...@@ -172,7 +172,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (err) if (err)
goto err_set_parms; goto err_set_parms;
if (tc_should_offload(dev, tp, flags)) { if (tc_should_offload(dev, flags)) {
err = mall_replace_hw_filter(tp, new, (unsigned long) new); err = mall_replace_hw_filter(tp, new, (unsigned long) new);
if (err) { if (err) {
if (tc_skip_sw(flags)) if (tc_skip_sw(flags))
......
...@@ -433,7 +433,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) ...@@ -433,7 +433,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
struct net_device *dev = tp->q->dev_queue->dev; struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
if (!tc_should_offload(dev, tp, 0)) if (!tc_should_offload(dev, 0))
return; return;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
...@@ -450,7 +450,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -450,7 +450,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
if (!tc_should_offload(dev, tp, flags)) if (!tc_should_offload(dev, flags))
return tc_skip_sw(flags) ? -EINVAL : 0; return tc_skip_sw(flags) ? -EINVAL : 0;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
...@@ -471,7 +471,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) ...@@ -471,7 +471,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
struct net_device *dev = tp->q->dev_queue->dev; struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
if (!tc_should_offload(dev, tp, 0)) if (!tc_should_offload(dev, 0))
return; return;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
...@@ -490,7 +490,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -490,7 +490,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
if (!tc_should_offload(dev, tp, flags)) if (!tc_should_offload(dev, flags))
return tc_skip_sw(flags) ? -EINVAL : 0; return tc_skip_sw(flags) ? -EINVAL : 0;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
......
...@@ -32,11 +32,6 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) ...@@ -32,11 +32,6 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
return TC_H_MIN(classid) + 1; return TC_H_MIN(classid) + 1;
} }
static bool ingress_cl_offload(u32 classid)
{
return true;
}
static unsigned long ingress_bind_filter(struct Qdisc *sch, static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid) unsigned long parent, u32 classid)
{ {
...@@ -103,7 +98,6 @@ static const struct Qdisc_class_ops ingress_class_ops = { ...@@ -103,7 +98,6 @@ static const struct Qdisc_class_ops ingress_class_ops = {
.put = ingress_put, .put = ingress_put,
.walk = ingress_walk, .walk = ingress_walk,
.tcf_block = ingress_tcf_block, .tcf_block = ingress_tcf_block,
.tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter, .bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put, .unbind_tcf = ingress_put,
}; };
...@@ -134,11 +128,6 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) ...@@ -134,11 +128,6 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
} }
} }
static bool clsact_cl_offload(u32 classid)
{
return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
}
static unsigned long clsact_bind_filter(struct Qdisc *sch, static unsigned long clsact_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid) unsigned long parent, u32 classid)
{ {
...@@ -198,7 +187,6 @@ static const struct Qdisc_class_ops clsact_class_ops = { ...@@ -198,7 +187,6 @@ static const struct Qdisc_class_ops clsact_class_ops = {
.put = ingress_put, .put = ingress_put,
.walk = ingress_walk, .walk = ingress_walk,
.tcf_block = clsact_tcf_block, .tcf_block = clsact_tcf_block,
.tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter, .bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put, .unbind_tcf = ingress_put,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment