Commit 245dc512 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

net: sched: cls_u32: call block callbacks for offload

Use the newly introduced callbacks infrastructure and call block
callbacks alongside with the existing per-netdev ndo_setup_tc.
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 77460411
...@@ -465,39 +465,57 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) ...@@ -465,39 +465,57 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
{ {
struct net_device *dev = tp->q->dev_queue->dev; struct net_device *dev = tp->q->dev_queue->dev;
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
if (!tc_should_offload(dev, 0))
return;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
cls_u32.command = TC_CLSU32_DELETE_HNODE; cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
cls_u32.hnode.prio = h->prio; cls_u32.hnode.prio = h->prio;
if (tc_can_offload(dev))
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
} }
static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
u32 flags) u32 flags)
{ {
struct net_device *dev = tp->q->dev_queue->dev; struct net_device *dev = tp->q->dev_queue->dev;
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
bool skip_sw = tc_skip_sw(flags);
bool offloaded = false;
int err; int err;
if (!tc_should_offload(dev, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
cls_u32.command = TC_CLSU32_NEW_HNODE; cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
cls_u32.hnode.prio = h->prio; cls_u32.hnode.prio = h->prio;
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); if (tc_can_offload(dev)) {
if (tc_skip_sw(flags)) err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32,
&cls_u32);
if (err) {
if (skip_sw)
return err; return err;
} else {
offloaded = true;
}
}
err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) {
u32_clear_hw_hnode(tp, h);
return err;
} else if (err > 0) {
offloaded = true;
}
if (skip_sw && !offloaded)
return -EINVAL;
return 0; return 0;
} }
...@@ -505,28 +523,27 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -505,28 +523,27 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
{ {
struct net_device *dev = tp->q->dev_queue->dev; struct net_device *dev = tp->q->dev_queue->dev;
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
if (!tc_should_offload(dev, 0))
return;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = handle; cls_u32.knode.handle = handle;
if (tc_can_offload(dev))
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
} }
static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
u32 flags) u32 flags)
{ {
struct net_device *dev = tp->q->dev_queue->dev; struct net_device *dev = tp->q->dev_queue->dev;
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
bool skip_sw = tc_skip_sw(flags);
int err; int err;
if (!tc_should_offload(dev, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
tc_cls_common_offload_init(&cls_u32.common, tp); tc_cls_common_offload_init(&cls_u32.common, tp);
cls_u32.command = TC_CLSU32_REPLACE_KNODE; cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
...@@ -543,13 +560,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -543,13 +560,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
if (n->ht_down) if (n->ht_down)
cls_u32.knode.link_handle = n->ht_down->handle; cls_u32.knode.link_handle = n->ht_down->handle;
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32);
if (!err) if (tc_can_offload(dev)) {
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32,
&cls_u32);
if (err) {
if (skip_sw)
return err;
} else {
n->flags |= TCA_CLS_FLAGS_IN_HW; n->flags |= TCA_CLS_FLAGS_IN_HW;
}
}
if (tc_skip_sw(flags)) err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) {
u32_remove_hw_knode(tp, n->handle);
return err; return err;
} else if (err > 0) {
n->flags |= TCA_CLS_FLAGS_IN_HW;
}
if (skip_sw && !(n->flags && TCA_CLS_FLAGS_IN_HW))
return -EINVAL;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment