Commit a1db2178 authored by wenxu's avatar wenxu Committed by David S. Miller

net: flow_offload: fix flow_indr_dev_unregister path

If the representor is removed, then identify the indirect flow_blocks
that need to be removed by the release callback and the port representor
structure. To identify the port representor structure, a new
indr.cb_priv field needs to be introduced. The flow_block also needs to
be removed from the driver list from the cleanup path.

Fixes: 1fac52da ("net: flow_offload: consolidate indirect flow_block infrastructure")
Signed-off-by: default avatarwenxu <wenxu@ucloud.cn>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 66f1939a
...@@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, ...@@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
cb_priv, cb_priv, cb_priv, cb_priv,
bnxt_tc_setup_indr_rel, f, bnxt_tc_setup_indr_rel, f,
netdev, data, cleanup); netdev, data, bp, cleanup);
if (IS_ERR(block_cb)) { if (IS_ERR(block_cb)) {
list_del(&cb_priv->list); list_del(&cb_priv->list);
kfree(cb_priv); kfree(cb_priv);
...@@ -2079,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp) ...@@ -2079,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
return; return;
flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
bnxt_tc_setup_indr_block_cb); bnxt_tc_setup_indr_rel);
rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table); rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table); rhashtable_destroy(&tc_info->decap_l2_table);
......
...@@ -442,7 +442,8 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, ...@@ -442,7 +442,8 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
mlx5e_rep_indr_block_unbind, mlx5e_rep_indr_block_unbind,
f, netdev, data, cleanup); f, netdev, data, rpriv,
cleanup);
if (IS_ERR(block_cb)) { if (IS_ERR(block_cb)) {
list_del(&indr_priv->list); list_del(&indr_priv->list);
kfree(indr_priv); kfree(indr_priv);
...@@ -503,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv) ...@@ -503,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
{ {
flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv, flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
mlx5e_rep_indr_setup_tc_cb); mlx5e_rep_indr_block_unbind);
} }
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
......
...@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app) ...@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app)
flush_work(&app_priv->cmsg_work); flush_work(&app_priv->cmsg_work);
flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app, flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
nfp_flower_setup_indr_block_cb); nfp_flower_setup_indr_tc_release);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app); nfp_flower_qos_cleanup(app);
......
...@@ -462,8 +462,7 @@ int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, ...@@ -462,8 +462,7 @@ int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
enum tc_setup_type type, void *type_data, enum tc_setup_type type, void *type_data,
void *data, void *data,
void (*cleanup)(struct flow_block_cb *block_cb)); void (*cleanup)(struct flow_block_cb *block_cb));
int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data, void nfp_flower_setup_indr_tc_release(void *cb_priv);
void *cb_priv);
void void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv); __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
......
...@@ -1619,7 +1619,7 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, ...@@ -1619,7 +1619,7 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
return NULL; return NULL;
} }
int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv) void *type_data, void *cb_priv)
{ {
struct nfp_flower_indr_block_cb_priv *priv = cb_priv; struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
...@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, ...@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
} }
} }
static void nfp_flower_setup_indr_tc_release(void *cb_priv) void nfp_flower_setup_indr_tc_release(void *cb_priv)
{ {
struct nfp_flower_indr_block_cb_priv *priv = cb_priv; struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
...@@ -1680,7 +1680,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, ...@@ -1680,7 +1680,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
cb_priv, cb_priv, cb_priv, cb_priv,
nfp_flower_setup_indr_tc_release, nfp_flower_setup_indr_tc_release,
f, netdev, data, cleanup); f, netdev, data, app, cleanup);
if (IS_ERR(block_cb)) { if (IS_ERR(block_cb)) {
list_del(&cb_priv->list); list_del(&cb_priv->list);
kfree(cb_priv); kfree(cb_priv);
......
...@@ -450,6 +450,7 @@ struct flow_block_indr { ...@@ -450,6 +450,7 @@ struct flow_block_indr {
struct net_device *dev; struct net_device *dev;
enum flow_block_binder_type binder_type; enum flow_block_binder_type binder_type;
void *data; void *data;
void *cb_priv;
void (*cleanup)(struct flow_block_cb *block_cb); void (*cleanup)(struct flow_block_cb *block_cb);
}; };
...@@ -472,6 +473,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, ...@@ -472,6 +473,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void (*release)(void *cb_priv), void (*release)(void *cb_priv),
struct flow_block_offload *bo, struct flow_block_offload *bo,
struct net_device *dev, void *data, struct net_device *dev, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb)); void (*cleanup)(struct flow_block_cb *block_cb));
void flow_block_cb_free(struct flow_block_cb *block_cb); void flow_block_cb_free(struct flow_block_cb *block_cb);
...@@ -551,7 +553,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, ...@@ -551,7 +553,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
flow_setup_cb_t *setup_cb); void (*release)(void *cb_priv));
int flow_indr_dev_setup_offload(struct net_device *dev, int flow_indr_dev_setup_offload(struct net_device *dev,
enum tc_setup_type type, void *data, enum tc_setup_type type, void *data,
struct flow_block_offload *bo, struct flow_block_offload *bo,
......
...@@ -372,14 +372,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) ...@@ -372,14 +372,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
} }
EXPORT_SYMBOL(flow_indr_dev_register); EXPORT_SYMBOL(flow_indr_dev_register);
static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv, static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
void *cb_priv,
struct list_head *cleanup_list) struct list_head *cleanup_list)
{ {
struct flow_block_cb *this, *next; struct flow_block_cb *this, *next;
list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
if (this->cb == setup_cb && if (this->release == release &&
this->cb_priv == cb_priv) { this->indr.cb_priv == cb_priv) {
list_move(&this->indr.list, cleanup_list); list_move(&this->indr.list, cleanup_list);
return; return;
} }
...@@ -397,7 +398,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list) ...@@ -397,7 +398,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list)
} }
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
flow_setup_cb_t *setup_cb) void (*release)(void *cb_priv))
{ {
struct flow_indr_dev *this, *next, *indr_dev = NULL; struct flow_indr_dev *this, *next, *indr_dev = NULL;
LIST_HEAD(cleanup_list); LIST_HEAD(cleanup_list);
...@@ -418,7 +419,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, ...@@ -418,7 +419,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
return; return;
} }
__flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list); __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
mutex_unlock(&flow_indr_block_lock); mutex_unlock(&flow_indr_block_lock);
flow_block_indr_notify(&cleanup_list); flow_block_indr_notify(&cleanup_list);
...@@ -429,10 +430,12 @@ EXPORT_SYMBOL(flow_indr_dev_unregister); ...@@ -429,10 +430,12 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
static void flow_block_indr_init(struct flow_block_cb *flow_block, static void flow_block_indr_init(struct flow_block_cb *flow_block,
struct flow_block_offload *bo, struct flow_block_offload *bo,
struct net_device *dev, void *data, struct net_device *dev, void *data,
void *cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb)) void (*cleanup)(struct flow_block_cb *block_cb))
{ {
flow_block->indr.binder_type = bo->binder_type; flow_block->indr.binder_type = bo->binder_type;
flow_block->indr.data = data; flow_block->indr.data = data;
flow_block->indr.cb_priv = cb_priv;
flow_block->indr.dev = dev; flow_block->indr.dev = dev;
flow_block->indr.cleanup = cleanup; flow_block->indr.cleanup = cleanup;
} }
...@@ -442,6 +445,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, ...@@ -442,6 +445,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
void (*release)(void *cb_priv), void (*release)(void *cb_priv),
struct flow_block_offload *bo, struct flow_block_offload *bo,
struct net_device *dev, void *data, struct net_device *dev, void *data,
void *indr_cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb)) void (*cleanup)(struct flow_block_cb *block_cb))
{ {
struct flow_block_cb *block_cb; struct flow_block_cb *block_cb;
...@@ -450,7 +454,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, ...@@ -450,7 +454,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
if (IS_ERR(block_cb)) if (IS_ERR(block_cb))
goto out; goto out;
flow_block_indr_init(block_cb, bo, dev, data, cleanup); flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
list_add(&block_cb->indr.list, &flow_block_indr_list); list_add(&block_cb->indr.list, &flow_block_indr_list);
out: out:
......
...@@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb) ...@@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
nf_flow_table_gc_cleanup(flowtable, dev); nf_flow_table_gc_cleanup(flowtable, dev);
down_write(&flowtable->flow_block_lock); down_write(&flowtable->flow_block_lock);
list_del(&block_cb->list); list_del(&block_cb->list);
list_del(&block_cb->driver_list);
flow_block_cb_free(block_cb); flow_block_cb_free(block_cb);
up_write(&flowtable->flow_block_lock); up_write(&flowtable->flow_block_lock);
} }
......
...@@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb) ...@@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND, nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
basechain, &extack); basechain, &extack);
mutex_lock(&net->nft.commit_mutex); mutex_lock(&net->nft.commit_mutex);
list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list); list_move(&block_cb->list, &bo.cb_list);
nft_flow_offload_unbind(&bo, basechain); nft_flow_offload_unbind(&bo, basechain);
mutex_unlock(&net->nft.commit_mutex); mutex_unlock(&net->nft.commit_mutex);
......
...@@ -652,6 +652,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) ...@@ -652,6 +652,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
&block->flow_block, tcf_block_shared(block), &block->flow_block, tcf_block_shared(block),
&extack); &extack);
down_write(&block->cb_lock); down_write(&block->cb_lock);
list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list); list_move(&block_cb->list, &bo.cb_list);
up_write(&block->cb_lock); up_write(&block->cb_lock);
rtnl_lock(); rtnl_lock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment