Commit 2acc4551 authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5e: CT: Return err_ptr from internal functions

Instead of having to deal with converting between int and ERR_PTR for
return values in mlx5_tc_ct_flow_offload(), make the internal helper
functions return a ptr to mlx5_flow_handle instead of passing it as
output param, this will also avoid gcc confusion and false alarms,
thus we remove the redundant ERR_PTR rule initialization.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Suggested-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarRoi Dayan <roid@mellanox.com>
parent d12f4521
...@@ -1400,12 +1400,11 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ...@@ -1400,12 +1400,11 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + fte_id match +------------------------> * + fte_id match +------------------------>
* +--------------+ * +--------------+
*/ */
static int static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec, struct mlx5_flow_spec *orig_spec,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr)
struct mlx5_flow_handle **flow_rule)
{ {
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
...@@ -1425,7 +1424,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1425,7 +1424,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
if (!post_ct_spec || !ct_flow) { if (!post_ct_spec || !ct_flow) {
kfree(post_ct_spec); kfree(post_ct_spec);
kfree(ct_flow); kfree(ct_flow);
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
/* Register for CT established events */ /* Register for CT established events */
...@@ -1546,11 +1545,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1546,11 +1545,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
} }
attr->ct_attr.ct_flow = ct_flow; attr->ct_attr.ct_flow = ct_flow;
*flow_rule = ct_flow->post_ct_rule;
dealloc_mod_hdr_actions(&pre_mod_acts); dealloc_mod_hdr_actions(&pre_mod_acts);
kfree(post_ct_spec); kfree(post_ct_spec);
return 0; return rule;
err_insert_orig: err_insert_orig:
mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule, mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
...@@ -1568,16 +1566,15 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1568,16 +1566,15 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
kfree(post_ct_spec); kfree(post_ct_spec);
kfree(ct_flow); kfree(ct_flow);
netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
return err; return ERR_PTR(err);
} }
static int static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec, struct mlx5_flow_spec *orig_spec,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts, struct mlx5e_tc_mod_hdr_acts *mod_acts)
struct mlx5_flow_handle **flow_rule)
{ {
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_eswitch *esw = ct_priv->esw;
...@@ -1589,7 +1586,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, ...@@ -1589,7 +1586,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) if (!ct_flow)
return -ENOMEM; return ERR_PTR(-ENOMEM);
/* Base esw attributes on original rule attribute */ /* Base esw attributes on original rule attribute */
pre_ct_attr = &ct_flow->pre_ct_attr; pre_ct_attr = &ct_flow->pre_ct_attr;
...@@ -1624,16 +1621,14 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, ...@@ -1624,16 +1621,14 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
attr->ct_attr.ct_flow = ct_flow; attr->ct_attr.ct_flow = ct_flow;
ct_flow->pre_ct_rule = rule; ct_flow->pre_ct_rule = rule;
*flow_rule = rule; return rule;
return 0;
err_insert: err_insert:
mlx5_modify_header_dealloc(priv->mdev, mod_hdr); mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
err_set_registers: err_set_registers:
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err); "Failed to offload ct clear flow, err %d\n", err);
return err; return ERR_PTR(err);
} }
struct mlx5_flow_handle * struct mlx5_flow_handle *
...@@ -1645,22 +1640,18 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, ...@@ -1645,22 +1640,18 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
{ {
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL); struct mlx5_flow_handle *rule;
int err;
if (!ct_priv) if (!ct_priv)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&ct_priv->control_lock); mutex_lock(&ct_priv->control_lock);
if (clear_action) if (clear_action)
err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, rule = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, mod_hdr_acts);
mod_hdr_acts, &rule);
else else
err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr, rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
&rule);
mutex_unlock(&ct_priv->control_lock); mutex_unlock(&ct_priv->control_lock);
if (err)
return ERR_PTR(err);
return rule; return rule;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment