Commit 44a674d6 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2021-01-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-01-26

This series introduces some fixes to mlx5 driver.

* tag 'mlx5-fixes-2021-01-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: CT: Fix incorrect removal of tuple_nat_node from nat rhashtable
  net/mlx5e: Revert parameters on errors when changing MTU and LRO state without reset
  net/mlx5e: Revert parameters on errors when changing trust state without reset
  net/mlx5e: Correctly handle changing the number of queues when the interface is down
  net/mlx5e: Fix CT rule + encap slow path offload and deletion
  net/mlx5e: Disable hw-tc-offload when MLX5_CLS_ACT config is disabled
  net/mlx5: Maintain separate page trees for ECPF and PF functions
  net/mlx5e: Fix IPSEC stats
  net/mlx5e: Reduce tc unsupported key print level
  net/mlx5e: free page before return
  net/mlx5e: E-switch, Fix rate calculation for overflow
  net/mlx5: Fix memory leak on flow table creation error flow
====================

Link: https://lore.kernel.org/r/20210126234345.202096-1-saeedm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents e41aec79 e2194a17
......@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
if (err)
return err;
goto free_page;
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
......
......@@ -167,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
static bool
mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
{
return !!(entry->tuple_nat_node.next);
}
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
......@@ -911,13 +917,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_insert:
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node, tuples_nat_ht_params);
if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node, tuples_nat_ht_params);
err_tuple_nat:
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
&entry->tuple_node,
tuples_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
&entry->tuple_node,
tuples_ht_params);
err_tuple:
err_set:
kfree(entry);
......@@ -932,7 +938,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
mutex_lock(&ct_priv->shared_counter_lock);
if (entry->tuple_node.next)
if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
......
......@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
return NUM_IPSEC_SW_COUNTERS;
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
}
static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
......@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
{
return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
......
......@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_channels new_channels = {};
bool reset_channels = true;
bool opened;
int err = 0;
mutex_lock(&priv->state_lock);
......@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
trust_state);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!opened)
reset_channels = false;
}
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
reset_channels = false;
if (reset_channels)
if (reset_channels) {
err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_update_trust_state_hw,
&trust_state);
else
} else {
err = mlx5e_update_trust_state_hw(priv, &trust_state);
if (!err && !opened)
priv->channels.params = new_channels.params;
}
mutex_unlock(&priv->state_lock);
......
......@@ -447,12 +447,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
new_channels.params = priv->channels.params;
new_channels.params = *cur_params;
new_channels.params.num_channels = count;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
struct mlx5e_params old_params;
old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
if (err)
*cur_params = old_params;
goto out;
}
......
......@@ -3614,7 +3614,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
new_channels.params.num_tc = tc ? tc : 1;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
struct mlx5e_params old_params;
old_params = priv->channels.params;
priv->channels.params = new_channels.params;
err = mlx5e_num_channels_changed(priv);
if (err)
priv->channels.params = old_params;
goto out;
}
......@@ -3757,7 +3764,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
struct mlx5e_params *old_params;
struct mlx5e_params *cur_params;
int err = 0;
bool reset;
......@@ -3770,8 +3777,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
goto out;
}
old_params = &priv->channels.params;
if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
cur_params = &priv->channels.params;
if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
netdev_warn(netdev, "can't set LRO with legacy RQ\n");
err = -EINVAL;
goto out;
......@@ -3779,18 +3786,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
new_channels.params = *old_params;
new_channels.params = *cur_params;
new_channels.params.lro_en = enable;
if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
reset = false;
}
if (!reset) {
*old_params = new_channels.params;
struct mlx5e_params old_params;
old_params = *cur_params;
*cur_params = new_channels.params;
err = mlx5e_modify_tirs_lro(priv);
if (err)
*cur_params = old_params;
goto out;
}
......@@ -4067,9 +4079,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
}
if (!reset) {
unsigned int old_mtu = params->sw_mtu;
params->sw_mtu = new_mtu;
if (preactivate)
preactivate(priv, NULL);
if (preactivate) {
err = preactivate(priv, NULL);
if (err) {
params->sw_mtu = old_mtu;
goto out;
}
}
netdev->mtu = params->sw_mtu;
goto out;
}
......@@ -5027,7 +5046,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
#ifdef CONFIG_MLX5_ESWITCH
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
......
......@@ -737,7 +737,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_NETNS_LOCAL;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
......
......@@ -67,6 +67,7 @@
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
......@@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
......@@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow_flag_clear(flow, OFFLOADED);
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
goto offload_rule_0;
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
......@@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
......@@ -2269,8 +2277,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
......@@ -5007,13 +5015,13 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
return err;
}
static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
u32 rate_mbps = 0;
u16 vport_num;
u32 rate_mbps;
int err;
vport_num = rpriv->rep->vport;
......@@ -5030,7 +5038,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
if (rate) {
rate = (rate * BITS_PER_BYTE) + 500000;
rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
}
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
......
......@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
destroy_ft:
root->cmds->destroy_flow_table(root, ft);
free_ft:
rhltable_destroy(&ft->fgs_hash);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
......
......@@ -58,7 +58,7 @@ struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
u16 func_id;
u32 function;
unsigned long bitmask;
struct list_head list;
unsigned free_count;
......@@ -74,12 +74,17 @@ enum {
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
static u32 get_function(u16 func_id, bool ec_function)
{
return func_id & (ec_function << 16);
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
......@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
if (!root)
return ERR_PTR(-ENOMEM);
err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
......@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
return root;
}
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
......@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
struct fw_page *tfp;
int i;
root = page_root_per_func_id(dev, func_id);
root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
......@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
nfp->addr = addr;
nfp->page = page;
nfp->func_id = func_id;
nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
......@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
u32 func_id)
u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
......@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
return err;
}
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
if (iter->func_id != func_id)
if (iter->function != function)
continue;
fp = iter;
}
......@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
{
struct rb_root *root;
root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
......@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
kfree(fwp);
}
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
......@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
list_add(&fwp->list, &dev->priv.free_list);
}
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
......@@ -291,7 +296,7 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
goto map;
}
err = insert_page(dev, addr, page, func_id);
err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
......@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int notify_fail, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
u64 addr;
......@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
for (i = 0; i < npages; i++) {
retry:
err = alloc_4k(dev, &addr, func_id);
err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, func_id);
err = alloc_system_page(dev, function);
if (err)
goto out_4k;
......@@ -384,7 +390,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
......@@ -392,14 +398,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
return err;
}
static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
......@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
......@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
root = xa_load(&dev->priv.page_root_xa, func_id);
root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
......@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
......@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
}
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment