Commit 74fc4f82 authored by Eli Cohen's avatar Eli Cohen Committed by David S. Miller

net: Fix offloading indirect devices dependency on qdisc order creation

Currently, when creating an ingress qdisc on an indirect device before
the driver registered for callbacks, the driver will not have a chance
to register its filter configuration callbacks.

To fix that, modify the code such that it keeps track of all the ingress
qdiscs that call flow_indr_dev_setup_offload(). When a driver calls
flow_indr_dev_register(),  go through the list of tracked ingress qdiscs
and call the driver callback entry point so as to give it a chance to
register its callback.
Reviewed-by: default avatarJiri Pirko <jiri@nvidia.com>
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c1c5cb3a
...@@ -453,6 +453,7 @@ struct flow_block_offload { ...@@ -453,6 +453,7 @@ struct flow_block_offload {
struct list_head *driver_block_list; struct list_head *driver_block_list;
struct netlink_ext_ack *extack; struct netlink_ext_ack *extack;
struct Qdisc *sch; struct Qdisc *sch;
struct list_head *cb_list_head;
}; };
enum tc_setup_type; enum tc_setup_type;
......
...@@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple); ...@@ -321,6 +321,7 @@ EXPORT_SYMBOL(flow_block_cb_setup_simple);
static DEFINE_MUTEX(flow_indr_block_lock); static DEFINE_MUTEX(flow_indr_block_lock);
static LIST_HEAD(flow_block_indr_list); static LIST_HEAD(flow_block_indr_list);
static LIST_HEAD(flow_block_indr_dev_list); static LIST_HEAD(flow_block_indr_dev_list);
static LIST_HEAD(flow_indir_dev_list);
struct flow_indr_dev { struct flow_indr_dev {
struct list_head list; struct list_head list;
...@@ -345,6 +346,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb, ...@@ -345,6 +346,33 @@ static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
return indr_dev; return indr_dev;
} }
struct flow_indir_dev_info {
void *data;
struct net_device *dev;
struct Qdisc *sch;
enum tc_setup_type type;
void (*cleanup)(struct flow_block_cb *block_cb);
struct list_head list;
enum flow_block_command command;
enum flow_block_binder_type binder_type;
struct list_head *cb_list;
};
static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{
struct flow_block_offload bo;
struct flow_indir_dev_info *cur;
list_for_each_entry(cur, &flow_indir_dev_list, list) {
memset(&bo, 0, sizeof(bo));
bo.command = cur->command;
bo.binder_type = cur->binder_type;
INIT_LIST_HEAD(&bo.cb_list);
cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
list_splice(&bo.cb_list, cur->cb_list);
}
}
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
{ {
struct flow_indr_dev *indr_dev; struct flow_indr_dev *indr_dev;
...@@ -366,6 +394,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) ...@@ -366,6 +394,7 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
} }
list_add(&indr_dev->list, &flow_block_indr_dev_list); list_add(&indr_dev->list, &flow_block_indr_dev_list);
existing_qdiscs_register(cb, cb_priv);
mutex_unlock(&flow_indr_block_lock); mutex_unlock(&flow_indr_block_lock);
return 0; return 0;
...@@ -462,7 +491,59 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, ...@@ -462,7 +491,59 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
} }
EXPORT_SYMBOL(flow_indr_block_cb_alloc); EXPORT_SYMBOL(flow_indr_block_cb_alloc);
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, static struct flow_indir_dev_info *find_indir_dev(void *data)
{
struct flow_indir_dev_info *cur;
list_for_each_entry(cur, &flow_indir_dev_list, list) {
if (cur->data == data)
return cur;
}
return NULL;
}
static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
struct flow_block_offload *bo)
{
struct flow_indir_dev_info *info;
info = find_indir_dev(data);
if (info)
return -EEXIST;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->data = data;
info->dev = dev;
info->sch = sch;
info->type = type;
info->cleanup = cleanup;
info->command = bo->command;
info->binder_type = bo->binder_type;
info->cb_list = bo->cb_list_head;
list_add(&info->list, &flow_indir_dev_list);
return 0;
}
static int indir_dev_remove(void *data)
{
struct flow_indir_dev_info *info;
info = find_indir_dev(data);
if (!info)
return -ENOENT;
list_del(&info->list);
kfree(info);
return 0;
}
int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
enum tc_setup_type type, void *data, enum tc_setup_type type, void *data,
struct flow_block_offload *bo, struct flow_block_offload *bo,
void (*cleanup)(struct flow_block_cb *block_cb)) void (*cleanup)(struct flow_block_cb *block_cb))
...@@ -470,6 +551,12 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, ...@@ -470,6 +551,12 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
struct flow_indr_dev *this; struct flow_indr_dev *this;
mutex_lock(&flow_indr_block_lock); mutex_lock(&flow_indr_block_lock);
if (bo->command == FLOW_BLOCK_BIND)
indir_dev_add(data, dev, sch, type, cleanup, bo);
else if (bo->command == FLOW_BLOCK_UNBIND)
indir_dev_remove(data);
list_for_each_entry(this, &flow_block_indr_dev_list, list) list_for_each_entry(this, &flow_block_indr_dev_list, list)
this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
......
...@@ -1096,6 +1096,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo, ...@@ -1096,6 +1096,7 @@ static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
bo->command = cmd; bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack; bo->extack = extack;
bo->cb_list_head = &flowtable->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list); INIT_LIST_HEAD(&bo->cb_list);
} }
......
...@@ -353,6 +353,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo, ...@@ -353,6 +353,7 @@ static void nft_flow_block_offload_init(struct flow_block_offload *bo,
bo->command = cmd; bo->command = cmd;
bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
bo->extack = extack; bo->extack = extack;
bo->cb_list_head = &basechain->flow_block.cb_list;
INIT_LIST_HEAD(&bo->cb_list); INIT_LIST_HEAD(&bo->cb_list);
} }
......
...@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo, ...@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
bo->block_shared = shared; bo->block_shared = shared;
bo->extack = extack; bo->extack = extack;
bo->sch = sch; bo->sch = sch;
bo->cb_list_head = &flow_block->cb_list;
INIT_LIST_HEAD(&bo->cb_list); INIT_LIST_HEAD(&bo->cb_list);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment