Commit 488e5b30 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-11-04' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-11-04

This series includes:

From Huy: dscp to priority mapping for Ethernet packet.

===================================================
First six patches enable differentiated services code point (dscp) to
priority mapping for Ethernet packet. Once this feature is
enabled, the packet is routed to the corresponding priority based on its
dscp. User can combine this feature with priority flow control (pfc)
feature to have priority flow control based on the dscp.

Firmware interface:
Mellanox firmware provides two control knobs for this feature:
  QPTS register allow changing the trust state between dscp and
  pcp mode. The default is pcp mode. Once in dscp mode, firmware will
  route the packet based on its dscp value if the dscp field exists.

  QPDPM register allow mapping a specific dscp (0 to 63) to a
  specific priority (0 to 7). By default, all the dscps are mapped to
  priority zero.

Software interface:
This feature is controlled via application priority TLV. IEEE
specification P802.1Qcd/D2.1 defines priority selector id 5 for
application priority TLV. This APP TLV selector defines DSCP to priority
map. This APP TLV can be sent by the switch or can be set locally using
software such as lldptool. In mlx5 drivers, we add the support for net
dcb's getapp and setapp call back. Mlx5 driver only handles the selector
id 5 application entry (dscp application priority application entry).
If user sends multiple dscp to priority APP TLV entries on the same
dscp, the last sent one will take effect. All the previous sent will be
deleted.

The firmware trust state (in QPTS register) is changed based on the
number of dscp to priority application entries. When the first dscp to
priority application entry is added by the user, the trust state is
changed to dscp. When the last dscp to priority application entry is
deleted by the user, the trust state is changed to pcp.

When the port is in DSCP trust state, the transmit queue is selected
based on the dscp of the skb.

When the port is in DSCP trust state and vport inline mode is not NONE,
firmware requires mlx5 driver to copy the IP header to the
wqe ethernet segment inline header if the skb has it.
This is done by changing the transmit queue sq's min inline mode to L3.
Note that the min inline mode of sqs that belong to other features
such as xdpsq, icosq are not modified.
===================================================

Plus to the dscp series, some small misc changes are include as well:

From Inbar, Ethtool msglvl support and some debug prints in DCBNL logic
From Or Gerlitz, Enlarge the NIC TC offload table size
From Rabie, Initialize destination_flow struct to 0
From Feras, Add inner TTC table to IPoIB flow steering
From Tal, Enable CQE based moderation on TX CQ
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bfe26ba9 0088cbbc
......@@ -57,6 +57,7 @@
#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
......@@ -105,6 +106,7 @@
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
......@@ -126,6 +128,16 @@
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
#define mlx5e_dbg(mlevel, priv, format, ...) \
do { \
if (NETIF_MSG_##mlevel & (priv)->msglevel) \
netdev_warn(priv->netdev, format, \
##__VA_ARGS__); \
} while (0)
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
......@@ -187,12 +199,14 @@ extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
"tx_cqe_moder",
"rx_cqe_compress",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
......@@ -212,6 +226,7 @@ enum mlx5e_priv_flag {
struct mlx5e_cq_moder {
u16 usec;
u16 pkts;
u8 cq_period_mode;
};
struct mlx5e_params {
......@@ -223,7 +238,6 @@ struct mlx5e_params {
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
u8 rx_cq_period_mode;
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
......@@ -260,11 +274,17 @@ enum {
struct mlx5e_dcbx {
enum mlx5_dcbx_oper_mode mode;
struct mlx5e_cee_config cee_cfg; /* pending configuration */
u8 dscp_app_cnt;
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
u8 cap;
};
struct mlx5e_dcbx_dp {
u8 dscp2prio[MLX5E_MAX_DSCP];
u8 trust_state;
};
#endif
enum {
......@@ -742,8 +762,12 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
/* priv data path fields - end */
u32 msglevel;
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
......@@ -800,6 +824,8 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
void (*netdev_registered_init)(struct mlx5e_priv *priv);
void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
......@@ -903,6 +929,8 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
......@@ -968,6 +996,8 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
#ifndef CONFIG_RFS_ACCEL
......@@ -1020,6 +1050,9 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
......@@ -1069,5 +1102,5 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
......@@ -92,7 +92,7 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
static int arfs_disable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
......@@ -126,7 +126,7 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv)
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
int err = 0;
int tt;
int i;
......@@ -175,7 +175,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
......@@ -466,7 +466,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
......@@ -557,7 +557,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule, u16 rxq)
{
struct mlx5_flow_destination dst;
struct mlx5_flow_destination dst = {};
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
......
......@@ -171,3 +171,15 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
return err;
}
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
{
u8 min_inline_mode;
mlx5_query_min_inline(mdev, &min_inline_mode);
if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
min_inline_mode = MLX5_INLINE_MODE_L2;
return min_inline_mode;
}
......@@ -46,6 +46,13 @@ enum {
MLX5E_LOWEST_PRIO_GROUP = 0,
};
#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
MLX5_CAP_QCAM_REG(mdev, qpts) && \
MLX5_CAP_QCAM_REG(mdev, qpdpm))
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
......@@ -234,7 +241,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
int max_tc = mlx5_max_tc(mdev);
int err;
int err, i;
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
......@@ -253,6 +260,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
__func__, i, ets->prio_tc[i]);
mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
__func__, i, tc_tx_bw[i], tc_group[i]);
}
return err;
}
......@@ -338,6 +353,11 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
mlx5_toggle_port_link(mdev);
if (!ret) {
mlx5e_dbg(HW, priv,
"%s: PFC per priority bit mask: 0x%x\n",
__func__, pfc->pfc_en);
}
return ret;
}
......@@ -381,6 +401,113 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 0;
}
static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct dcb_app temp;
bool is_new;
int err;
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return -EINVAL;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EINVAL;
if (app->protocol >= MLX5E_MAX_DSCP)
return -EINVAL;
/* Save the old entry info */
temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
temp.protocol = app->protocol;
temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
/* Check if need to switch to dscp trust state */
if (!priv->dcbx.dscp_app_cnt) {
err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
if (err)
return err;
}
/* Skip the fw command if new and old mapping are the same */
if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
if (err)
goto fw_err;
}
/* Delete the old entry if exists */
is_new = false;
err = dcb_ieee_delapp(dev, &temp);
if (err)
is_new = true;
/* Add new entry and update counter */
err = dcb_ieee_setapp(dev, app);
if (err)
return err;
if (is_new)
priv->dcbx.dscp_app_cnt++;
return err;
fw_err:
mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
return err;
}
static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return -EINVAL;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EINVAL;
if (app->protocol >= MLX5E_MAX_DSCP)
return -EINVAL;
/* Skip if no dscp app entry */
if (!priv->dcbx.dscp_app_cnt)
return -ENOENT;
/* Check if the entry matches fw setting */
if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
return -ENOENT;
/* Delete the app entry */
err = dcb_ieee_delapp(dev, app);
if (err)
return err;
/* Reset the priority mapping back to zero */
err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
if (err)
goto fw_err;
priv->dcbx.dscp_app_cnt--;
/* Check if need to switch to pcp trust state */
if (!priv->dcbx.dscp_app_cnt)
err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
return err;
fw_err:
mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
return err;
}
static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
......@@ -446,6 +573,11 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
__func__, i, max_bw_value[i]);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
......@@ -471,6 +603,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
mlx5e_dbg(HW, priv,
"%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
__func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
......@@ -740,6 +876,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
.ieee_setapp = mlx5e_dcbnl_ieee_setapp,
.ieee_delapp = mlx5e_dcbnl_ieee_delapp,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
......@@ -801,10 +939,135 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
mlx5e_dcbnl_ieee_setets_core(priv, &ets);
}
enum {
INIT,
DELETE,
};
static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
{
struct dcb_app temp;
int i;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return;
/* No SEL_DSCP entry in non DSCP state */
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
return;
temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
for (i = 0; i < MLX5E_MAX_DSCP; i++) {
temp.protocol = i;
temp.priority = priv->dcbx_dp.dscp2prio[i];
if (action == INIT)
dcb_ieee_setapp(priv->netdev, &temp);
else
dcb_ieee_delapp(priv->netdev, &temp);
}
priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
}
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
{
mlx5e_dcbnl_dscp_app(priv, INIT);
}
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
{
mlx5e_dcbnl_dscp_app(priv, DELETE);
}
static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
}
static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
{
struct mlx5e_channels new_channels = {};
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto out;
new_channels.params = priv->channels.params;
mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
/* Skip if tx_min_inline is the same */
if (new_channels.params.tx_min_inline_mode ==
priv->channels.params.tx_min_inline_mode)
goto out;
if (mlx5e_open_channels(priv, &new_channels))
goto out;
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
out:
mutex_unlock(&priv->state_lock);
}
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
int err;
err = mlx5_set_trust_state(priv->mdev, trust_state);
if (err)
return err;
priv->dcbx_dp.trust_state = trust_state;
mlx5e_trust_update_sq_inline_mode(priv);
return err;
}
static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
{
int err;
err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
if (err)
return err;
priv->dcbx_dp.dscp2prio[dscp] = prio;
return err;
}
static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
if (!MLX5_DSCP_SUPPORTED(mdev))
return 0;
err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
if (err)
return err;
mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
if (err)
return err;
return 0;
}
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
mlx5e_trust_initialize(priv);
if (!MLX5_CAP_GEN(priv->mdev, qos))
return;
......
......@@ -1340,6 +1340,16 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
static u32 mlx5e_get_msglevel(struct net_device *dev)
{
return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
}
static void mlx5e_set_msglevel(struct net_device *dev, u32 val)
{
((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val;
}
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
......@@ -1444,29 +1454,36 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
bool rx_mode_changed;
u8 rx_cq_period_mode;
bool mode_changed;
u8 cq_period_mode, current_cq_period_mode;
int err = 0;
rx_cq_period_mode = enable ?
cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
priv->channels.params.tx_cq_moderation.cq_period_mode;
mode_changed = cq_period_mode != current_cq_period_mode;
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
if (!rx_mode_changed)
if (!mode_changed)
return 0;
new_channels.params = priv->channels.params;
mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
if (is_rx_cq)
mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode);
else
mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
......@@ -1481,6 +1498,16 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return 0;
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
{
return set_pflag_cqe_based_moder(netdev, enable, false);
}
static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
{
return set_pflag_cqe_based_moder(netdev, enable, true);
}
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
......@@ -1568,6 +1595,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
set_pflag_tx_cqe_based_moder);
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
......@@ -1672,4 +1705,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
};
......@@ -162,7 +162,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
......@@ -738,7 +738,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
......@@ -909,7 +909,7 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
{
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
......@@ -1005,7 +1005,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
return err;
}
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
struct mlx5_flow_table_attr ft_attr = {};
......@@ -1041,7 +1041,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
return err;
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
......@@ -1106,7 +1106,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
......
......@@ -681,7 +681,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
rq->am.mode = params->rx_cq_period_mode;
rq->am.mode = params->rx_cq_moderation.cq_period_mode;
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
......@@ -1974,7 +1974,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
param->cq_period_mode = params->rx_cq_period_mode;
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
......@@ -1986,8 +1986,7 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
mlx5e_build_common_cq_param(priv, param);
param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
......@@ -3987,9 +3986,27 @@ static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw <= 16000) && (pci_bw < link_speed));
}
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
params->tx_cq_moderation.cq_period_mode = cq_period_mode;
params->tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
params->tx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
params->tx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
params->tx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
params->rx_cq_period_mode = cq_period_mode;
params->rx_cq_moderation.cq_period_mode = cq_period_mode;
params->rx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
......@@ -4002,10 +4019,11 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
if (params->rx_am_enabled)
params->rx_cq_moderation =
mlx5e_am_get_def_profile(params->rx_cq_period_mode);
mlx5e_am_get_def_profile(cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
params->rx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
......@@ -4065,16 +4083,11 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
......@@ -4094,6 +4107,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
......@@ -4374,7 +4388,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (netdev->reg_state != NETREG_REGISTERED)
return;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
/* Device already registered: sync netdev system state */
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
......@@ -4395,6 +4411,11 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->netdev->reg_state == NETREG_REGISTERED)
mlx5e_dcbnl_delete_app(priv);
#endif
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
......@@ -4615,6 +4636,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach;
}
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv);
#endif
return priv;
err_detach:
......@@ -4631,6 +4655,9 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
void *ppriv = priv->ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
......
......@@ -63,7 +63,11 @@ profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
{
return profile[cq_period_mode][ix];
struct mlx5e_cq_moder cq_moder;
cq_moder = profile[cq_period_mode][ix];
cq_moder.cq_period_mode = cq_period_mode;
return cq_moder;
}
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
......@@ -75,7 +79,7 @@ struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
return profile[rx_cq_period_mode][default_profile_ix];
return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
}
/* Adaptive moderation logic */
......
......@@ -90,8 +90,8 @@ enum {
MLX5_HEADER_TYPE_NVGRE = 0x1,
};
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
struct mod_hdr_key {
int num_actions;
......@@ -263,10 +263,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
int tc_grp_size, tc_tbl_size;
u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
MLX5_CAP_GEN(dev, max_flow_counter_15_0);
tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_PRIO,
MLX5E_TC_TABLE_NUM_ENTRIES,
tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
0, 0);
if (IS_ERR(priv->fs.tc.t)) {
......
......@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
......@@ -86,6 +87,20 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
#ifdef CONFIG_MLX5_CORE_EN_DCB
static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
{
int dscp_cp = 0;
if (skb->protocol == htons(ETH_P_IP))
dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
else if (skb->protocol == htons(ETH_P_IPV6))
dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
return priv->dcbx_dp.dscp2prio[dscp_cp];
}
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
......@@ -97,6 +112,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (!netdev_get_num_tc(dev))
return channel_ix;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
up = mlx5e_get_dscp_up(priv, skb);
else
#endif
if (skb_vlan_tag_present(skb))
up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
......
......@@ -157,7 +157,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
void *mc_misc = NULL;
......
......@@ -306,7 +306,7 @@ static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
......@@ -395,7 +395,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
int err = 0;
......@@ -670,7 +670,7 @@ struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
......
......@@ -912,7 +912,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
struct mlx5_flow_table *new_next_ft,
struct mlx5_flow_table *old_next_ft)
{
struct mlx5_flow_destination dest;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_rule *iter;
int err = 0;
......@@ -1820,7 +1820,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest;
struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
......
......@@ -106,6 +106,13 @@ static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
MLX5_MCAM_REGS_FIRST_128);
}
static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
{
return mlx5_query_qcam_reg(dev, dev->caps.qcam,
MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
MLX5_QCAM_REGS_FIRST_128);
}
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
......@@ -182,6 +189,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg))
mlx5_get_mcam_reg(dev);
if (MLX5_CAP_GEN(dev, qcam_reg))
mlx5_get_qcam_reg(dev);
return 0;
}
......
......@@ -255,15 +255,24 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_inner_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
err = mlx5e_create_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
goto err_destroy_inner_ttc_table;
}
return 0;
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
......@@ -273,6 +282,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
mlx5e_destroy_inner_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
}
......
......@@ -122,6 +122,8 @@ int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
......
......@@ -98,6 +98,18 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
}
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group)
{
u32 in[MLX5_ST_SZ_DW(qcam_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(qcam_reg);
MLX5_SET(qcam_reg, in, feature_group, feature_group);
MLX5_SET(qcam_reg, in, access_reg_group, access_reg_group);
return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0);
}
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
......@@ -959,3 +971,102 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MTPPSE, 0, 1);
}
int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state)
{
u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
int err;
MLX5_SET(qpts_reg, in, local_port, 1);
MLX5_SET(qpts_reg, in, trust_state, trust_state);
err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_QPTS, 0, 1);
return err;
}
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
{
u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
int err;
MLX5_SET(qpts_reg, in, local_port, 1);
err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_QPTS, 0, 0);
if (!err)
*trust_state = MLX5_GET(qpts_reg, out, trust_state);
return err;
}
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
void *qpdpm_dscp;
void *out;
void *in;
int err;
in = kzalloc(sz, GFP_KERNEL);
out = kzalloc(sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
}
MLX5_SET(qpdpm_reg, in, local_port, 1);
err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
if (err)
goto out;
memcpy(in, out, sz);
MLX5_SET(qpdpm_reg, in, local_port, 1);
/* Update the corresponding dscp entry */
qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, in, dscp[dscp]);
MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, prio, prio);
MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1);
err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1);
out:
kfree(in);
kfree(out);
return err;
}
/* dscp2prio[i]: priority that dscp i mapped to */
#define MLX5E_SUPPORTED_DSCP 64
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio)
{
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
void *qpdpm_dscp;
void *out;
void *in;
int err;
int i;
in = kzalloc(sz, GFP_KERNEL);
out = kzalloc(sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
}
MLX5_SET(qpdpm_reg, in, local_port, 1);
err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
if (err)
goto out;
for (i = 0; i < (MLX5E_SUPPORTED_DSCP); i++) {
qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, out, dscp[i]);
dscp2prio[i] = MLX5_GET16(qpdpm_dscp_reg, qpdpm_dscp, prio);
}
out:
kfree(in);
kfree(out);
return err;
}
......@@ -49,11 +49,15 @@
#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
#define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
#define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
#define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
#define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
......@@ -116,6 +120,19 @@ __mlx5_mask(typ, fld))
___t; \
})
#define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
__mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
__mlx5_mask16(typ, fld))
#define MLX5_SET16(typ, p, fld, v) do { \
u16 _v = v; \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
*((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
(~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
<< __mlx5_16_bit_off(typ, fld))); \
} while (0)
/* Big endian getters */
#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
__mlx5_64_off(typ, fld)))
......@@ -1000,6 +1017,14 @@ enum mlx5_mcam_feature_groups {
MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
};
enum mlx5_qcam_reg_groups {
MLX5_QCAM_REGS_FIRST_128 = 0x0,
};
enum mlx5_qcam_feature_groups {
MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
};
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
......@@ -1108,6 +1133,12 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
#define MLX5_CAP_QCAM_REG(mdev, fld) \
MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
#define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
#define MLX5_CAP_FPGA(mdev, cap) \
MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
......
......@@ -107,8 +107,11 @@ enum {
};
enum {
MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
MLX5_REG_QPDPM = 0x4013,
MLX5_REG_QCAM = 0x4019,
MLX5_REG_DCBX_PARAM = 0x4020,
MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_FPGA_CAP = 0x4022,
......@@ -141,6 +144,11 @@ enum {
MLX5_REG_MCAM = 0x907f,
};
enum mlx5_qpts_trust_state {
MLX5_QPTS_TRUST_PCP = 1,
MLX5_QPTS_TRUST_DSCP = 2,
};
enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
......@@ -798,6 +806,7 @@ struct mlx5_core_dev {
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
} caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
......
......@@ -838,7 +838,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cc_modify_allowed[0x1];
u8 start_pad[0x1];
u8 cache_line_128byte[0x1];
u8 reserved_at_165[0xb];
u8 reserved_at_165[0xa];
u8 qcam_reg[0x1];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
......@@ -7890,6 +7891,43 @@ struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_1c0[0x80];
};
struct mlx5_ifc_qcam_access_reg_cap_mask {
u8 qcam_access_reg_cap_mask_127_to_20[0x6C];
u8 qpdpm[0x1];
u8 qcam_access_reg_cap_mask_18_to_4[0x0F];
u8 qdpm[0x1];
u8 qpts[0x1];
u8 qcap[0x1];
u8 qcam_access_reg_cap_mask_0[0x1];
};
struct mlx5_ifc_qcam_qos_feature_cap_mask {
u8 qcam_qos_feature_cap_mask_127_to_1[0x7F];
u8 qpts_trust_both[0x1];
};
struct mlx5_ifc_qcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
u8 reserved_at_10[0x8];
u8 access_reg_group[0x8];
u8 reserved_at_20[0x20];
union {
struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap;
u8 reserved_at_0[0x80];
} qos_access_reg_cap_mask;
u8 reserved_at_c0[0x80];
union {
struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap;
u8 reserved_at_0[0x80];
} qos_feature_cap_mask;
u8 reserved_at_1c0[0x80];
};
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
......@@ -8540,6 +8578,26 @@ struct mlx5_ifc_qetc_reg_bits {
struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
};
struct mlx5_ifc_qpdpm_dscp_reg_bits {
u8 e[0x1];
u8 reserved_at_01[0x0b];
u8 prio[0x04];
};
struct mlx5_ifc_qpdpm_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64];
};
struct mlx5_ifc_qpts_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x2d];
u8 trust_state[0x3];
};
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
......
......@@ -179,4 +179,9 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
#endif /* __MLX5_PORT_H__ */
......@@ -206,6 +206,7 @@ struct cee_pfc {
#define IEEE_8021QAZ_APP_SEL_STREAM 2
#define IEEE_8021QAZ_APP_SEL_DGRAM 3
#define IEEE_8021QAZ_APP_SEL_ANY 4
#define IEEE_8021QAZ_APP_SEL_DSCP 5
/* This structure contains the IEEE 802.1Qaz APP managed object. This
* object is also used for the CEE std as well.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment