Commit d853d112 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-let-the-offloader-decide-what-to-offload'

Jiri Pirko says:

====================
net: sched: let the offloader decide what to offload

Currently there is a Qdisc_class_ops->tcf_cl_offload callback
that is called to find out if cls would offload rule or not.
This is only supported by sch_ingress and sch_clsact.
So the Qdisc are to decide. However, the driver knows what is he
able to offload, so move the decision making to drivers completely.
Just pass classid there and provide set of helpers to allow
identification of qdisc.

As a side effect, this actually allows clsact egress rules
offload in mlxsw.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c5ebc440 7b06e8ae
......@@ -2892,7 +2892,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32)
{
if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
if (is_classid_clsact_ingress(cls_u32->common.classid) ||
cls_u32->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -9230,7 +9230,7 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
if (is_classid_clsact_ingress(cls_u32->common.classid) ||
cls_u32->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -3031,7 +3031,7 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
if (is_classid_clsact_ingress(cls_flower->common.classid) ||
cls_flower->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -657,7 +657,7 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
if (is_classid_clsact_ingress(cls_flower->common.classid) ||
cls_flower->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -1696,7 +1696,14 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *f)
{
bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS);
bool ingress;
if (is_classid_clsact_ingress(f->common.classid))
ingress = true;
else if (is_classid_clsact_egress(f->common.classid))
ingress = false;
else
return -EOPNOTSUPP;
if (f->common.chain_index)
return -EOPNOTSUPP;
......@@ -1717,7 +1724,14 @@ static int
mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_flower_offload *f)
{
bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS);
bool ingress;
if (is_classid_clsact_ingress(f->common.classid))
ingress = true;
else if (is_classid_clsact_egress(f->common.classid))
ingress = false;
else
return -EOPNOTSUPP;
if (f->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -127,7 +127,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) ||
TC_H_MAJ(cls_bpf->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
is_classid_clsact_ingress(cls_bpf->common.classid) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -390,7 +390,7 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *cls_flower = type_data;
if (type != TC_SETUP_CLSFLOWER ||
TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) ||
is_classid_clsact_ingress(cls_flower->common.classid) ||
!eth_proto_is_802_3(cls_flower->common.protocol) ||
cls_flower->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -406,20 +406,20 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
#endif /* CONFIG_NET_CLS_IND */
struct tc_cls_common_offload {
u32 handle;
u32 chain_index;
__be16 protocol;
u32 prio;
u32 classid;
};
static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
const struct tcf_proto *tp)
{
cls_common->handle = tp->q->handle;
cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio;
cls_common->classid = tp->classid;
}
struct tc_cls_u32_knode {
......@@ -457,19 +457,12 @@ struct tc_cls_u32_offload {
};
};
static inline bool tc_can_offload(const struct net_device *dev,
const struct tcf_proto *tp)
static inline bool tc_can_offload(const struct net_device *dev)
{
const struct Qdisc *sch = tp->q;
const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
if (!(dev->features & NETIF_F_HW_TC))
return false;
if (!dev->netdev_ops->ndo_setup_tc)
return false;
if (cops && cops->tcf_cl_offload)
return cops->tcf_cl_offload(tp->classid);
return true;
}
......@@ -478,12 +471,11 @@ static inline bool tc_skip_hw(u32 flags)
return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
}
static inline bool tc_should_offload(const struct net_device *dev,
const struct tcf_proto *tp, u32 flags)
static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
{
if (tc_skip_hw(flags))
return false;
return tc_can_offload(dev, tp);
return tc_can_offload(dev);
}
static inline bool tc_skip_sw(u32 flags)
......
......@@ -5,6 +5,7 @@
#include <linux/ktime.h>
#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
......@@ -132,4 +133,17 @@ static inline unsigned int psched_mtu(const struct net_device *dev)
return dev->mtu + dev->hard_header_len;
}
static inline bool is_classid_clsact_ingress(u32 classid)
{
/* This also returns true for ingress qdisc */
return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS);
}
static inline bool is_classid_clsact_egress(u32 classid)
{
return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) &&
TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS);
}
#endif
......@@ -156,7 +156,6 @@ struct Qdisc_class_ops {
/* Filter manipulation */
struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
......
......@@ -914,7 +914,14 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev,
static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
bool ingress = TC_H_MAJ(cls->common.handle) == TC_H_MAJ(TC_H_INGRESS);
bool ingress;
if (is_classid_clsact_ingress(cls->common.classid))
ingress = true;
else if (is_classid_clsact_egress(cls->common.classid))
ingress = false;
else
return -EOPNOTSUPP;
if (cls->common.chain_index)
return -EOPNOTSUPP;
......
......@@ -178,7 +178,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
(oldprog && tc_skip_sw(oldprog->gen_flags));
if (oldprog && oldprog->offloaded) {
if (tc_should_offload(dev, tp, prog->gen_flags)) {
if (tc_should_offload(dev, prog->gen_flags)) {
cmd = TC_CLSBPF_REPLACE;
} else if (!tc_skip_sw(prog->gen_flags)) {
obj = oldprog;
......@@ -187,7 +187,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
return -EINVAL;
}
} else {
if (!tc_should_offload(dev, tp, prog->gen_flags))
if (!tc_should_offload(dev, prog->gen_flags))
return skip_sw ? -EINVAL : 0;
cmd = TC_CLSBPF_ADD;
}
......
......@@ -227,7 +227,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {};
struct net_device *dev = f->hw_dev;
if (!tc_can_offload(dev, tp))
if (!tc_can_offload(dev))
return;
tc_cls_common_offload_init(&cls_flower.common, tp);
......@@ -246,9 +246,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
struct tc_cls_flower_offload cls_flower = {};
int err;
if (!tc_can_offload(dev, tp)) {
if (!tc_can_offload(dev)) {
if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) ||
(f->hw_dev && !tc_can_offload(f->hw_dev, tp))) {
(f->hw_dev && !tc_can_offload(f->hw_dev))) {
f->hw_dev = dev;
return tc_skip_sw(f->flags) ? -EINVAL : 0;
}
......@@ -281,7 +281,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload cls_flower = {};
struct net_device *dev = f->hw_dev;
if (!tc_can_offload(dev, tp))
if (!tc_can_offload(dev))
return;
tc_cls_common_offload_init(&cls_flower.common, tp);
......
......@@ -92,7 +92,7 @@ static void mall_destroy(struct tcf_proto *tp)
if (!head)
return;
if (tc_should_offload(dev, tp, head->flags))
if (tc_should_offload(dev, head->flags))
mall_destroy_hw_filter(tp, head, (unsigned long) head);
call_rcu(&head->rcu, mall_destroy_rcu);
......@@ -172,7 +172,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto err_set_parms;
if (tc_should_offload(dev, tp, flags)) {
if (tc_should_offload(dev, flags)) {
err = mall_replace_hw_filter(tp, new, (unsigned long) new);
if (err) {
if (tc_skip_sw(flags))
......
......@@ -433,7 +433,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload cls_u32 = {};
if (!tc_should_offload(dev, tp, 0))
if (!tc_should_offload(dev, 0))
return;
tc_cls_common_offload_init(&cls_u32.common, tp);
......@@ -450,7 +450,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
struct tc_cls_u32_offload cls_u32 = {};
int err;
if (!tc_should_offload(dev, tp, flags))
if (!tc_should_offload(dev, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
tc_cls_common_offload_init(&cls_u32.common, tp);
......@@ -471,7 +471,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload cls_u32 = {};
if (!tc_should_offload(dev, tp, 0))
if (!tc_should_offload(dev, 0))
return;
tc_cls_common_offload_init(&cls_u32.common, tp);
......@@ -490,7 +490,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tc_cls_u32_offload cls_u32 = {};
int err;
if (!tc_should_offload(dev, tp, flags))
if (!tc_should_offload(dev, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
tc_cls_common_offload_init(&cls_u32.common, tp);
......
......@@ -32,11 +32,6 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
return TC_H_MIN(classid) + 1;
}
static bool ingress_cl_offload(u32 classid)
{
return true;
}
static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
......@@ -103,7 +98,6 @@ static const struct Qdisc_class_ops ingress_class_ops = {
.put = ingress_put,
.walk = ingress_walk,
.tcf_block = ingress_tcf_block,
.tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
};
......@@ -134,11 +128,6 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
}
}
static bool clsact_cl_offload(u32 classid)
{
return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
}
static unsigned long clsact_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
......@@ -198,7 +187,6 @@ static const struct Qdisc_class_ops clsact_class_ops = {
.put = ingress_put,
.walk = ingress_walk,
.tcf_block = clsact_tcf_block,
.tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment