Commit caa72601 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

net: sched: keep track of offloaded filters and check tc offload feature

During block bind, we need to check tc offload feature. If it is
disabled yet still the block contains offloaded filters, forbid the
bind. Also forbid to register callback for a block that already
contains offloaded filters, as the play back is not supported now.
For keeping track of offloaded filters there is a new counter
introduced, alongside with couple of helpers called from cls_* code.
These helpers set and clear TCA_CLS_FLAGS_IN_HW flag.
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Acked-by: default avatarJamal Hadi Salim <jhs@mojatatu.com>
Acked-by: default avatarDavid Ahern <dsahern@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent edf6711c
...@@ -289,8 +289,26 @@ struct tcf_block { ...@@ -289,8 +289,26 @@ struct tcf_block {
struct list_head cb_list; struct list_head cb_list;
struct list_head owner_list; struct list_head owner_list;
bool keep_dst; bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
}; };
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
{
if (*flags & TCA_CLS_FLAGS_IN_HW)
return;
*flags |= TCA_CLS_FLAGS_IN_HW;
block->offloadcnt++;
}
static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
{
if (!(*flags & TCA_CLS_FLAGS_IN_HW))
return;
*flags &= ~TCA_CLS_FLAGS_IN_HW;
block->offloadcnt--;
}
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{ {
struct qdisc_skb_cb *qcb; struct qdisc_skb_cb *qcb;
......
...@@ -265,31 +265,66 @@ void tcf_chain_put(struct tcf_chain *chain) ...@@ -265,31 +265,66 @@ void tcf_chain_put(struct tcf_chain *chain)
} }
EXPORT_SYMBOL(tcf_chain_put); EXPORT_SYMBOL(tcf_chain_put);
static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q, static bool tcf_block_offload_in_use(struct tcf_block *block)
{
return block->offloadcnt;
}
static int tcf_block_offload_cmd(struct tcf_block *block,
struct net_device *dev,
struct tcf_block_ext_info *ei, struct tcf_block_ext_info *ei,
enum tc_block_command command) enum tc_block_command command)
{ {
struct net_device *dev = q->dev_queue->dev;
struct tc_block_offload bo = {}; struct tc_block_offload bo = {};
if (!dev->netdev_ops->ndo_setup_tc)
return;
bo.command = command; bo.command = command;
bo.binder_type = ei->binder_type; bo.binder_type = ei->binder_type;
bo.block = block; bo.block = block;
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
} }
static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei) struct tcf_block_ext_info *ei)
{ {
tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND); struct net_device *dev = q->dev_queue->dev;
int err;
if (!dev->netdev_ops->ndo_setup_tc)
goto no_offload_dev_inc;
/* If tc offload feature is disabled and the block we try to bind
* to already has some offloaded filters, forbid to bind.
*/
if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
return -EOPNOTSUPP;
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
if (err == -EOPNOTSUPP)
goto no_offload_dev_inc;
return err;
no_offload_dev_inc:
if (tcf_block_offload_in_use(block))
return -EOPNOTSUPP;
block->nooffloaddevcnt++;
return 0;
} }
static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei) struct tcf_block_ext_info *ei)
{ {
tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND); struct net_device *dev = q->dev_queue->dev;
int err;
if (!dev->netdev_ops->ndo_setup_tc)
goto no_offload_dev_dec;
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
if (err == -EOPNOTSUPP)
goto no_offload_dev_dec;
return;
no_offload_dev_dec:
WARN_ON(block->nooffloaddevcnt-- == 0);
} }
static int static int
...@@ -502,10 +537,16 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, ...@@ -502,10 +537,16 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
ei, extack); ei, extack);
if (err) if (err)
goto err_chain_head_change_cb_add; goto err_chain_head_change_cb_add;
tcf_block_offload_bind(block, q, ei);
err = tcf_block_offload_bind(block, q, ei);
if (err)
goto err_block_offload_bind;
*p_block = block; *p_block = block;
return 0; return 0;
err_block_offload_bind:
tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
err_chain_head_change_cb_add: err_chain_head_change_cb_add:
tcf_block_owner_del(block, q, ei->binder_type); tcf_block_owner_del(block, q, ei->binder_type);
err_block_owner_add: err_block_owner_add:
...@@ -637,9 +678,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, ...@@ -637,9 +678,16 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
{ {
struct tcf_block_cb *block_cb; struct tcf_block_cb *block_cb;
/* At this point, playback of previous block cb calls is not supported,
* so forbid to register to block which already has some offloaded
* filters present.
*/
if (tcf_block_offload_in_use(block))
return ERR_PTR(-EOPNOTSUPP);
block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
if (!block_cb) if (!block_cb)
return NULL; return ERR_PTR(-ENOMEM);
block_cb->cb = cb; block_cb->cb = cb;
block_cb->cb_ident = cb_ident; block_cb->cb_ident = cb_ident;
block_cb->cb_priv = cb_priv; block_cb->cb_priv = cb_priv;
...@@ -655,7 +703,7 @@ int tcf_block_cb_register(struct tcf_block *block, ...@@ -655,7 +703,7 @@ int tcf_block_cb_register(struct tcf_block *block,
struct tcf_block_cb *block_cb; struct tcf_block_cb *block_cb;
block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
return block_cb ? 0 : -ENOMEM; return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
} }
EXPORT_SYMBOL(tcf_block_cb_register); EXPORT_SYMBOL(tcf_block_cb_register);
...@@ -685,6 +733,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type, ...@@ -685,6 +733,10 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
int ok_count = 0; int ok_count = 0;
int err; int err;
/* Make sure all netdevs sharing this block are offload-capable. */
if (block->nooffloaddevcnt && err_stop)
return -EOPNOTSUPP;
list_for_each_entry(block_cb, &block->cb_list, list) { list_for_each_entry(block_cb, &block->cb_list, list) {
err = block_cb->cb(type, type_data, block_cb->cb_priv); err = block_cb->cb(type, type_data, block_cb->cb_priv);
if (err) { if (err) {
......
...@@ -167,13 +167,16 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, ...@@ -167,13 +167,16 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
cls_bpf.exts_integrated = obj->exts_integrated; cls_bpf.exts_integrated = obj->exts_integrated;
cls_bpf.gen_flags = obj->gen_flags; cls_bpf.gen_flags = obj->gen_flags;
if (oldprog)
tcf_block_offload_dec(block, &oldprog->gen_flags);
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
if (prog) { if (prog) {
if (err < 0) { if (err < 0) {
cls_bpf_offload_cmd(tp, oldprog, prog); cls_bpf_offload_cmd(tp, oldprog, prog);
return err; return err;
} else if (err > 0) { } else if (err > 0) {
prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; tcf_block_offload_inc(block, &prog->gen_flags);
} }
} }
......
...@@ -229,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) ...@@ -229,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
&cls_flower, false); &cls_flower, false);
tcf_block_offload_dec(block, &f->flags);
} }
static int fl_hw_replace_filter(struct tcf_proto *tp, static int fl_hw_replace_filter(struct tcf_proto *tp,
...@@ -256,7 +257,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, ...@@ -256,7 +257,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
fl_hw_destroy_filter(tp, f); fl_hw_destroy_filter(tp, f);
return err; return err;
} else if (err > 0) { } else if (err > 0) {
f->flags |= TCA_CLS_FLAGS_IN_HW; tcf_block_offload_inc(block, &f->flags);
} }
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
......
...@@ -81,6 +81,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, ...@@ -81,6 +81,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false); tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
tcf_block_offload_dec(block, &head->flags);
} }
static int mall_replace_hw_filter(struct tcf_proto *tp, static int mall_replace_hw_filter(struct tcf_proto *tp,
...@@ -103,7 +104,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, ...@@ -103,7 +104,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
mall_destroy_hw_filter(tp, head, cookie); mall_destroy_hw_filter(tp, head, cookie);
return err; return err;
} else if (err > 0) { } else if (err > 0) {
head->flags |= TCA_CLS_FLAGS_IN_HW; tcf_block_offload_inc(block, &head->flags);
} }
if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW)) if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
......
...@@ -529,16 +529,17 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -529,16 +529,17 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
return 0; return 0;
} }
static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n)
{ {
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = handle; cls_u32.knode.handle = n->handle;
tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
tcf_block_offload_dec(block, &n->flags);
} }
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
...@@ -567,10 +568,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -567,10 +568,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) { if (err < 0) {
u32_remove_hw_knode(tp, n->handle); u32_remove_hw_knode(tp, n);
return err; return err;
} else if (err > 0) { } else if (err > 0) {
n->flags |= TCA_CLS_FLAGS_IN_HW; tcf_block_offload_inc(block, &n->flags);
} }
if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW)) if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
...@@ -589,7 +590,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) ...@@ -589,7 +590,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
RCU_INIT_POINTER(ht->ht[h], RCU_INIT_POINTER(ht->ht[h],
rtnl_dereference(n->next)); rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res); tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n->handle); u32_remove_hw_knode(tp, n);
idr_remove_ext(&ht->handle_idr, n->handle); idr_remove_ext(&ht->handle_idr, n->handle);
if (tcf_exts_get_net(&n->exts)) if (tcf_exts_get_net(&n->exts))
call_rcu(&n->rcu, u32_delete_key_freepf_rcu); call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
...@@ -682,7 +683,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last) ...@@ -682,7 +683,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last)
goto out; goto out;
if (TC_U32_KEY(ht->handle)) { if (TC_U32_KEY(ht->handle)) {
u32_remove_hw_knode(tp, ht->handle); u32_remove_hw_knode(tp, (struct tc_u_knode *)ht);
ret = u32_delete_key(tp, (struct tc_u_knode *)ht); ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment