Commit 716dcaeb authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-01-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-24-01

The first seven patches from Or Gerlitz in this series further enhances
the mlx5 SRIOV switchdev mode to support offloading IPv6 tunnels using the
TC tunnel key set (encap) and unset (decap) actions.

Or Gerlitz says:
========================
As part of doing this change, few cleanups are done in the IPv4 code,
later we move to use the full tunnel key info provided to the driver as
the key for our internal hashing which is used to identify cases where
the same tunnel is used for encapsulating multiple flows. As done in the
IPv4 case, the control path for offloading IPv6 tunnels uses route/neigh
lookups and construction of the IPv6 tunnel headers on the encap path and
matching on the outer hears in the decap path.

The last patch of the series enlarges the HW FDB size for the switchdev mode,
so it has now room to contain offloaded flows as many as min(max number
of HW flow counters supported, max HW table size supported).
========================

Next to Or's series you can find several patches handling several topics.

From Mohamad, add support for SRIOV VF min rate guarantee by using the
TSAR BW share weights mechanism.

From Or, Two patches to enable Eth VFs to query their min-inline value for
user-space.
for that we move a mlx5 low level min inline helper function from mlx5
ethernet driver into the core driver and then use it in mlx5_ib to expose
the inline mode to rdma applications through libmlx5.

From Kamal Heib, Reduce memory consumption on kdump kernel.

From Shaker Daibes, code reuse in CQE compression control logic
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a08ef476 5eb0249b
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <linux/in.h> #include <linux/in.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib" #define DRIVER_NAME "mlx5_ib"
...@@ -1202,6 +1203,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1202,6 +1203,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cmds_supp_uhw); resp.response_length += sizeof(resp.cmds_supp_uhw);
} }
if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
resp.eth_min_inline++;
}
resp.response_length += sizeof(resp.eth_min_inline);
}
/* /*
* We don't want to expose information from the PCI bar that is located * We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's * after 4096 bytes, so if the arch only supports larger pages, let's
......
...@@ -101,6 +101,7 @@ ...@@ -101,6 +101,7 @@
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_CQ_POLL_BUDGET 128
...@@ -786,7 +787,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, ...@@ -786,7 +787,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
struct ptp_clock_event *event); struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid); u16 vid);
...@@ -847,12 +848,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) ...@@ -847,12 +848,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
} }
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return min_t(int, mdev->priv.eq_table.num_comp_vectors,
MLX5E_MAX_NUM_CHANNELS);
}
extern const struct ethtool_ops mlx5e_ethtool_ops; extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
......
...@@ -106,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) ...@@ -106,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE; return -ERANGE;
} }
mutex_lock(&priv->state_lock);
/* RX HW timestamp */ /* RX HW timestamp */
switch (config.rx_filter) { switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE: case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */ /* Reset CQE compression to Admin default */
mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def); mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
break; break;
case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_SOME:
...@@ -128,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) ...@@ -128,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */ /* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression"); netdev_warn(dev, "Disabling cqe compression");
mlx5e_modify_rx_cqe_compression(priv, false); mlx5e_modify_rx_cqe_compression_locked(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL; config.rx_filter = HWTSTAMP_FILTER_ALL;
break; break;
default: default:
mutex_unlock(&priv->state_lock);
return -ERANGE; return -ERANGE;
} }
memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
return copy_to_user(ifr->ifr_data, &config, return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0; sizeof(config)) ? -EFAULT : 0;
......
...@@ -552,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev, ...@@ -552,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev,
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
ch->max_combined = mlx5e_get_max_num_channels(priv->mdev); ch->max_combined = priv->profile->max_nch(priv->mdev);
ch->combined_count = priv->params.num_channels; ch->combined_count = priv->params.num_channels;
} }
...@@ -560,7 +560,7 @@ static int mlx5e_set_channels(struct net_device *dev, ...@@ -560,7 +560,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch) struct ethtool_channels *ch)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = mlx5e_get_max_num_channels(priv->mdev); int ncv = priv->profile->max_nch(priv->mdev);
unsigned int count = ch->combined_count; unsigned int count = ch->combined_count;
bool arfs_enabled; bool arfs_enabled;
bool was_opened; bool was_opened;
...@@ -1476,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, ...@@ -1476,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err = 0;
bool reset;
if (!MLX5_CAP_GEN(mdev, cqe_compression)) if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -ENOTSUPP; return -ENOTSUPP;
...@@ -1487,17 +1485,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, ...@@ -1487,17 +1485,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
return -EINVAL; return -EINVAL;
} }
reset = test_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_modify_rx_cqe_compression_locked(priv, enable);
if (reset)
mlx5e_close_locked(netdev);
MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
priv->params.rx_cqe_compress_def = enable; priv->params.rx_cqe_compress_def = enable;
if (reset) return 0;
err = mlx5e_open_locked(netdev);
return err;
} }
static int mlx5e_handle_pflag(struct net_device *netdev, static int mlx5e_handle_pflag(struct net_device *netdev,
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
*/ */
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
#include <linux/crash_dump.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <net/vxlan.h> #include <net/vxlan.h>
...@@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) ...@@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.rq_wq_type = rq_type; priv->params.rq_wq_type = rq_type;
switch (priv->params.rq_wq_type) { switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; priv->params.log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
priv->params.mpwqe_log_stride_sz = priv->params.mpwqe_log_stride_sz =
MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ? MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
...@@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) ...@@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.mpwqe_log_stride_sz; priv->params.mpwqe_log_stride_sz;
break; break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */ default: /* MLX5_WQ_TYPE_LINKED_LIST */
priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; priv->params.log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
} }
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size)); BIT(priv->params.log_rq_size));
...@@ -1508,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) ...@@ -1508,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err; return err;
} }
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
min_t(int, mdev->priv.eq_table.num_comp_vectors,
MLX5E_MAX_NUM_CHANNELS);
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam, struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp) struct mlx5e_channel **cp)
...@@ -3021,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, ...@@ -3021,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
if (min_tx_rate)
return -EOPNOTSUPP;
return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1, return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
max_tx_rate); max_tx_rate, min_tx_rate);
} }
static int mlx5_vport_link2ifla(u8 esw_link) static int mlx5_vport_link2ifla(u8 esw_link)
...@@ -3461,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) ...@@ -3461,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
} }
static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode)
{
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
break;
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE;
break;
}
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{ {
int i; int i;
...@@ -3510,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -3510,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.lro_timeout = priv->params.lro_timeout =
mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; priv->params.log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */ /* set CQE compression */
priv->params.rx_cqe_compress_def = false; priv->params.rx_cqe_compress_def = false;
...@@ -3536,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -3536,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.tx_cq_moderation.pkts = priv->params.tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
priv->params.num_tc = 1; priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR; priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
......
...@@ -155,17 +155,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, ...@@ -155,17 +155,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
} }
void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
{ {
bool was_opened; bool was_opened;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
return; return;
mutex_lock(&priv->state_lock);
if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val) if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
goto unlock; return;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened) if (was_opened)
...@@ -176,8 +174,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) ...@@ -176,8 +174,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
if (was_opened) if (was_opened)
mlx5e_open_locked(priv->netdev); mlx5e_open_locked(priv->netdev);
unlock:
mutex_unlock(&priv->state_lock);
} }
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
......
...@@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) ...@@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
} }
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
u32 initial_max_rate) u32 initial_max_rate, u32 initial_bw_share)
{ {
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num]; struct mlx5_vport *vport = &esw->vports[vport_num];
...@@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, ...@@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
esw->qos.root_tsar_id); esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
initial_max_rate); initial_max_rate);
MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev, err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH, SCHEDULING_HIERARCHY_E_SWITCH,
...@@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) ...@@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
} }
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
u32 max_rate) u32 max_rate, u32 bw_share)
{ {
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num]; struct mlx5_vport *vport = &esw->vports[vport_num];
...@@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, ...@@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
esw->qos.root_tsar_id); esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
max_rate); max_rate);
MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev, err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH, SCHEDULING_HIERARCHY_E_SWITCH,
...@@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, ...@@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_apply_vport_conf(esw, vport); esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */ /* Attach vport to the eswitch rate limiter */
if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */ /* Sync with current vport context */
...@@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ...@@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos; ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk; ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted; ivi->trusted = evport->info.trusted;
ivi->min_tx_rate = evport->info.min_rate;
ivi->max_tx_rate = evport->info.max_rate; ivi->max_tx_rate = evport->info.max_rate;
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
...@@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, ...@@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
return 0; return 0;
} }
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
int vport, u32 max_rate)
{ {
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport; struct mlx5_vport *evport;
u32 max_guarantee = 0;
int i;
for (i = 0; i <= esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
max_guarantee = evport->info.min_rate;
}
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
}
static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport;
u32 vport_max_rate;
u32 vport_min_rate;
u32 bw_share;
int err;
int i;
for (i = 0; i <= esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled)
continue;
vport_min_rate = evport->info.min_rate;
vport_max_rate = evport->info.max_rate;
bw_share = MLX5_MIN_BW_SHARE;
if (vport_min_rate)
bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
divider,
fw_max_bw_share);
if (bw_share == evport->qos.bw_share)
continue;
err = esw_vport_qos_config(esw, i, vport_max_rate,
bw_share);
if (!err)
evport->qos.bw_share = bw_share;
else
return err;
}
return 0;
}
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
struct mlx5_vport *evport;
u32 previous_min_rate;
u32 divider;
int err = 0; int err = 0;
if (!ESW_ALLOWED(esw)) if (!ESW_ALLOWED(esw))
return -EPERM; return -EPERM;
if (!LEGAL_VPORT(esw, vport)) if (!LEGAL_VPORT(esw, vport))
return -EINVAL; return -EINVAL;
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
return -EOPNOTSUPP;
mutex_lock(&esw->state_lock); mutex_lock(&esw->state_lock);
evport = &esw->vports[vport]; evport = &esw->vports[vport];
err = esw_vport_qos_config(esw, vport, max_rate);
if (min_rate == evport->info.min_rate)
goto set_max_rate;
previous_min_rate = evport->info.min_rate;
evport->info.min_rate = min_rate;
divider = calculate_vports_min_rate_divider(esw);
err = normalize_vports_min_rate(esw, divider);
if (err) {
evport->info.min_rate = previous_min_rate;
goto unlock;
}
set_max_rate:
if (max_rate == evport->info.max_rate)
goto unlock;
err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
if (!err) if (!err)
evport->info.max_rate = max_rate; evport->info.max_rate = max_rate;
unlock:
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_link.h> #include <linux/if_link.h>
#include <net/devlink.h> #include <net/devlink.h>
#include <net/ip_tunnels.h>
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \ #define MLX5_MAX_UC_PER_VPORT(dev) \
...@@ -49,6 +50,11 @@ ...@@ -49,6 +50,11 @@
#define FDB_UPLINK_VPORT 0xffff #define FDB_UPLINK_VPORT 0xffff
#define MLX5_MIN_BW_SHARE 1
#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
/* L2 -mac address based- hash helpers */ /* L2 -mac address based- hash helpers */
struct l2addr_node { struct l2addr_node {
struct hlist_node hlist; struct hlist_node hlist;
...@@ -115,6 +121,7 @@ struct mlx5_vport_info { ...@@ -115,6 +121,7 @@ struct mlx5_vport_info {
u8 qos; u8 qos;
u64 node_guid; u64 node_guid;
int link_state; int link_state;
u32 min_rate;
u32 max_rate; u32 max_rate;
bool spoofchk; bool spoofchk;
bool trusted; bool trusted;
...@@ -137,6 +144,7 @@ struct mlx5_vport { ...@@ -137,6 +144,7 @@ struct mlx5_vport {
struct { struct {
bool enabled; bool enabled;
u32 esw_tsar_ix; u32 esw_tsar_ix;
u32 bw_share;
} qos; } qos;
bool enabled; bool enabled;
...@@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, ...@@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk); int vport, bool spoofchk);
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting); int vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
int vport, u32 max_rate); u32 max_rate, u32 min_rate);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi); int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
...@@ -274,18 +282,12 @@ enum { ...@@ -274,18 +282,12 @@ enum {
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
struct mlx5_encap_info {
__be32 daddr;
__be32 tun_id;
__be16 tp_dst;
};
struct mlx5_encap_entry { struct mlx5_encap_entry {
struct hlist_node encap_hlist; struct hlist_node encap_hlist;
struct list_head flows; struct list_head flows;
u32 encap_id; u32 encap_id;
struct neighbour *n; struct neighbour *n;
struct mlx5_encap_info tun_info; struct ip_tunnel_info tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev; struct net_device *out_dev;
......
...@@ -402,19 +402,18 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -402,19 +402,18 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
} }
#define MAX_PF_SQ 256 #define MAX_PF_SQ 256
#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
#define ESW_OFFLOADS_NUM_GROUPS 4 #define ESW_OFFLOADS_NUM_GROUPS 4
static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int table_size, ix, esw_size, err = 0;
struct mlx5_core_dev *dev = esw->dev; struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL; struct mlx5_flow_table *fdb = NULL;
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
u32 *flow_group_in; u32 *flow_group_in;
void *match_criteria; void *match_criteria;
int table_size, ix, err = 0;
u32 flags = 0; u32 flags = 0;
flow_group_in = mlx5_vzalloc(inlen); flow_group_in = mlx5_vzalloc(inlen);
...@@ -427,15 +426,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) ...@@ -427,15 +426,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
goto ns_err; goto ns_err;
} }
esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) && if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)) MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN; flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
ESW_OFFLOADS_NUM_ENTRIES, esw_size,
ESW_OFFLOADS_NUM_GROUPS, 0, ESW_OFFLOADS_NUM_GROUPS, 0,
flags); flags);
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
......
...@@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev, ...@@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int err; int err;
u32 *in; u32 *in;
if (size > MLX5_CAP_ESW(dev, max_encap_header_size)) if (size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
size, max_encap_size);
return -EINVAL; return -EINVAL;
}
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size, in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
GFP_KERNEL); GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
......
...@@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, ...@@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
} }
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode)
{
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
break;
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE;
break;
}
}
EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline) u16 vport, u8 min_inline)
{ {
......
...@@ -547,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits { ...@@ -547,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits { struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1]; u8 packet_pacing[0x1];
u8 esw_scheduling[0x1]; u8 esw_scheduling[0x1];
u8 reserved_at_2[0x1e]; u8 esw_bw_share[0x1];
u8 esw_rate_limit[0x1];
u8 reserved_at_4[0x1c];
u8 reserved_at_20[0x20]; u8 reserved_at_20[0x20];
......
...@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, ...@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr); u16 vport, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline); u16 vport, u8 *min_inline);
void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline); u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
......
...@@ -90,6 +90,17 @@ enum mlx5_user_cmds_supp_uhw { ...@@ -90,6 +90,17 @@ enum mlx5_user_cmds_supp_uhw {
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
}; };
/* The eth_min_inline response value is set to off-by-one vs the FW
* returned value to allow user-space to deal with older kernels.
*/
enum mlx5_user_inline_mode {
MLX5_USER_INLINE_MODE_NA,
MLX5_USER_INLINE_MODE_NONE,
MLX5_USER_INLINE_MODE_L2,
MLX5_USER_INLINE_MODE_IP,
MLX5_USER_INLINE_MODE_TCP_UDP,
};
struct mlx5_ib_alloc_ucontext_resp { struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size; __u32 qp_tab_size;
__u32 bf_reg_size; __u32 bf_reg_size;
...@@ -106,7 +117,8 @@ struct mlx5_ib_alloc_ucontext_resp { ...@@ -106,7 +117,8 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 response_length; __u32 response_length;
__u8 cqe_version; __u8 cqe_version;
__u8 cmds_supp_uhw; __u8 cmds_supp_uhw;
__u16 reserved2; __u8 eth_min_inline;
__u8 reserved2;
__u64 hca_core_clock_offset; __u64 hca_core_clock_offset;
__u32 log_uar_size; __u32 log_uar_size;
__u32 num_uars_per_page; __u32 num_uars_per_page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment