Commit f993604d authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-next'

Saeed Mahameed says:

====================
QoS and VxLAN offloads support for Mellanox 100G mlx5 driver

This patch series introduces QoS IEEE dcbnl support for
PFC, ETS and max rate.

In addition we added VxLAN support and introduced a patch
that modifies the driver to report checksum complete in RX path
for all IP (tunneled and non-tunneled) traffic which is non HW LRO.

This series is applied on top of the latest mlx5_ifc and NDO fixes
we sent to the net tree:
	net/mlx5e: Use static constant netdevice ndos
	net/mlx5e: Remove select queue ndo initialization
	net/mlx5: Use offset based reserved field names in the IFC header file

The QoS patches depend on the IFC change since they expose new fields in
the driver/firmware API. Both QoS and VxLAN patches depend on the NDO changes,
since they add new ndo entries.

Changes from V1:
	- Fixed the S.O.B from "Matt" to "Matthew" to be aligned with the committer title.
	- Don't populate VxLAN/dcbnl ndos for virtual functions.
	- Addressed John comment on mlx5_setup_tc to be aligned with latest API changes.
	- Added device ETS capability check prior query/modify ets configuration.
	- Call mlx5e_dcbnl_ieee_setets_core at the end of mlx5e_create_netdev and don't
fail netdev creation in case it failed or ETS was not supported.

The series where applied on top of: ("5270c4da Merge branch 'vxlan-cleanups'") +
	latest mlx5 ifc and ndo fixes from net tree.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e5fbfc1c 89db09eb
......@@ -42,6 +42,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
......
......@@ -19,3 +19,15 @@ config MLX5_CORE_EN
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
Ethernet and Infiniband support in ConnectX-4 are currently mutually
exclusive.
config MLX5_CORE_EN_DCB
bool "Data Center Bridging (DCB) Support"
default y
depends on MLX5_CORE_EN && DCB
---help---
Say Y here if you want to use Data Center Bridging (DCB) in the
driver.
If set to N, will not be able to configure QoS and ratelimit attributes.
This flag is depended on the kernel's DCB support.
If unsure, set to Y
......@@ -3,6 +3,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
en_txrx.o en_clock.o
en_txrx.o en_clock.o vxlan.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
* Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
......@@ -560,6 +560,18 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ACCESS_REG:
return "MLX5_CMD_OP_ACCESS_REG";
case MLX5_CMD_OP_SET_WOL_ROL:
return "SET_WOL_ROL";
case MLX5_CMD_OP_QUERY_WOL_ROL:
return "QUERY_WOL_ROL";
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
return "ADD_VXLAN_UDP_DPORT";
case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
return "DELETE_VXLAN_UDP_DPORT";
default: return "unknown command opcode";
}
}
......
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
......@@ -29,6 +29,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_EN_H__
#define __MLX5_EN_H__
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
......@@ -38,6 +40,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
#include "wq.h"
......@@ -69,6 +72,11 @@
#define MLX5E_NUM_MAIN_GROUPS 9
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
static const char vport_strings[][ETH_GSTRING_LEN] = {
/* vport statistics */
"rx_packets",
......@@ -95,12 +103,15 @@ static const char vport_strings[][ETH_GSTRING_LEN] = {
/* SW counters */
"tso_packets",
"tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"lro_packets",
"lro_bytes",
"rx_csum_good",
"rx_csum_none",
"rx_csum_sw",
"tx_csum_offload",
"tx_csum_inner",
"tx_queue_stopped",
"tx_queue_wake",
"tx_queue_dropped",
......@@ -133,18 +144,21 @@ struct mlx5e_vport_stats {
/* SW counters */
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 lro_packets;
u64 lro_bytes;
u64 rx_csum_good;
u64 rx_csum_none;
u64 rx_csum_sw;
u64 tx_csum_offload;
u64 tx_csum_inner;
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
#define NUM_VPORT_COUNTERS 32
#define NUM_VPORT_COUNTERS 35
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
......@@ -244,7 +258,10 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"packets",
"tso_packets",
"tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"csum_offload_none",
"csum_offload_inner",
"stopped",
"wake",
"dropped",
......@@ -255,12 +272,15 @@ struct mlx5e_sq_stats {
u64 packets;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 csum_offload_none;
u64 csum_offload_inner;
u64 stopped;
u64 wake;
u64 dropped;
u64 nop;
#define NUM_SQ_STATS 8
#define NUM_SQ_STATS 11
};
struct mlx5e_stats {
......@@ -272,7 +292,6 @@ struct mlx5e_params {
u8 log_sq_size;
u8 log_rq_size;
u16 num_channels;
u8 default_vlan_prio;
u8 num_tc;
u16 rx_cq_moderation_usec;
u16 rx_cq_moderation_pkts;
......@@ -285,6 +304,9 @@ struct mlx5e_params {
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets;
#endif
};
struct mlx5e_tstamp {
......@@ -491,6 +513,11 @@ struct mlx5e_vlan_db {
bool filter_disabled;
};
struct mlx5e_vxlan_db {
spinlock_t lock; /* protect vxlan table */
struct radix_tree_root tree;
};
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
......@@ -505,7 +532,6 @@ struct mlx5e_flow_tables {
struct mlx5e_priv {
/* priv data path fields - start */
int default_vlan_prio;
struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
/* priv data path fields - end */
......@@ -526,6 +552,7 @@ struct mlx5e_priv {
struct mlx5e_flow_tables fts;
struct mlx5e_eth_addr_db eth_addr;
struct mlx5e_vlan_db vlan;
struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
spinlock_t async_events_spinlock; /* sync hw events */
......@@ -665,4 +692,11 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
}
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
#endif
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/device.h>
#include <linux/netdevice.h>
#include "en.h"
#define MLX5E_MAX_PRIORITY 8
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -ENOTSUPP;
memcpy(ets, &priv->params.ets, sizeof(*ets));
return 0;
}
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_ETS_TC_GROUP_NUM = 0,
};
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{
bool any_tc_mapped_to_ets = false;
int strict_group;
int i;
for (i = 0; i <= max_tc; i++)
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
any_tc_mapped_to_ets = true;
strict_group = any_tc_mapped_to_ets ? 1 : 0;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
break;
case IEEE_8021QAZ_TSA_STRICT:
tc_group[i] = strict_group++;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
break;
}
}
}
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{
int i;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_STRICT:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
break;
}
}
}
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
int max_tc = mlx5_max_tc(mdev);
int err;
if (!MLX5_CAP_GEN(mdev, ets))
return -ENOTSUPP;
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
if (err)
return err;
err = mlx5_set_port_tc_group(mdev, tc_group);
if (err)
return err;
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
}
static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
{
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
return -EINVAL;
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
bw_sum += ets->tc_tx_bw[i];
}
if (bw_sum != 0 && bw_sum != 100)
return -EINVAL;
return 0;
}
static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
err = mlx5e_dbcnl_validate_ets(ets);
if (err)
return err;
err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
if (err)
return err;
memcpy(&priv->params.ets, ets, sizeof(*ets));
priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
return 0;
}
static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
}
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
enum mlx5_port_status ps;
u8 curr_pfc_en;
int ret;
mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
if (pfc->pfc_en == curr_pfc_en)
return 0;
mlx5_query_port_admin_status(mdev, &ps);
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
if (ps == MLX5_PORT_UP)
mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
return ret;
}
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
!(mode & DCB_CAP_DCBX_VER_IEEE) ||
!(mode & DCB_CAP_DCBX_HOST))
return 1;
return 0;
}
static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
int err;
int i;
err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
if (err)
return err;
memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
switch (max_bw_unit[i]) {
case MLX5_100_MBPS_UNIT:
maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
break;
case MLX5_GBPS_UNIT:
maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
break;
case MLX5_BW_NO_LIMIT:
break;
default:
WARN(true, "non-supported BW unit");
break;
}
}
return 0;
}
static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
__u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
int i;
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
if (!maxrate->tc_maxrate[i]) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
} else {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
}
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_getets = mlx5e_dcbnl_ieee_getets,
.ieee_setets = mlx5e_dcbnl_ieee_setets,
.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
};
......@@ -884,6 +884,129 @@ static int mlx5e_get_ts_info(struct net_device *dev,
return 0;
}
static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
{
__u32 ret = 0;
if (MLX5_CAP_GEN(mdev, wol_g))
ret |= WAKE_MAGIC;
if (MLX5_CAP_GEN(mdev, wol_s))
ret |= WAKE_MAGICSECURE;
if (MLX5_CAP_GEN(mdev, wol_a))
ret |= WAKE_ARP;
if (MLX5_CAP_GEN(mdev, wol_b))
ret |= WAKE_BCAST;
if (MLX5_CAP_GEN(mdev, wol_m))
ret |= WAKE_MCAST;
if (MLX5_CAP_GEN(mdev, wol_u))
ret |= WAKE_UCAST;
if (MLX5_CAP_GEN(mdev, wol_p))
ret |= WAKE_PHY;
return ret;
}
static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
{
__u32 ret = 0;
if (mode & MLX5_WOL_MAGIC)
ret |= WAKE_MAGIC;
if (mode & MLX5_WOL_SECURED_MAGIC)
ret |= WAKE_MAGICSECURE;
if (mode & MLX5_WOL_ARP)
ret |= WAKE_ARP;
if (mode & MLX5_WOL_BROADCAST)
ret |= WAKE_BCAST;
if (mode & MLX5_WOL_MULTICAST)
ret |= WAKE_MCAST;
if (mode & MLX5_WOL_UNICAST)
ret |= WAKE_UCAST;
if (mode & MLX5_WOL_PHY_ACTIVITY)
ret |= WAKE_PHY;
return ret;
}
static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
{
u8 ret = 0;
if (mode & WAKE_MAGIC)
ret |= MLX5_WOL_MAGIC;
if (mode & WAKE_MAGICSECURE)
ret |= MLX5_WOL_SECURED_MAGIC;
if (mode & WAKE_ARP)
ret |= MLX5_WOL_ARP;
if (mode & WAKE_BCAST)
ret |= MLX5_WOL_BROADCAST;
if (mode & WAKE_MCAST)
ret |= MLX5_WOL_MULTICAST;
if (mode & WAKE_UCAST)
ret |= MLX5_WOL_UNICAST;
if (mode & WAKE_PHY)
ret |= MLX5_WOL_PHY_ACTIVITY;
return ret;
}
static void mlx5e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mlx5_wol_mode;
int err;
memset(wol, 0, sizeof(*wol));
wol->supported = mlx5e_get_wol_supported(mdev);
if (!wol->supported)
return;
err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
if (err)
return;
wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
}
static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
__u32 wol_supported = mlx5e_get_wol_supported(mdev);
u32 mlx5_wol_mode;
if (!wol_supported)
return -ENOTSUPP;
if (wol->wolopts & ~wol_supported)
return -EINVAL;
mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
......@@ -908,4 +1031,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
.get_wol = mlx5e_get_wol,
.set_wol = mlx5e_set_wol,
};
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
......@@ -31,8 +31,10 @@
*/
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include "en.h"
#include "eswitch.h"
#include "vxlan.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
......@@ -143,9 +145,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
/* Collect firts the SW counters and then HW for consistency */
s->tso_packets = 0;
s->tso_bytes = 0;
s->tso_inner_packets = 0;
s->tso_inner_bytes = 0;
s->tx_queue_stopped = 0;
s->tx_queue_wake = 0;
s->tx_queue_dropped = 0;
s->tx_csum_inner = 0;
tx_offload_none = 0;
s->lro_packets = 0;
s->lro_bytes = 0;
......@@ -166,9 +171,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
s->tso_inner_packets += sq_stats->tso_inner_packets;
s->tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_csum_inner += sq_stats->csum_offload_inner;
tx_offload_none += sq_stats->csum_offload_none;
}
}
......@@ -243,7 +251,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
s->tx_broadcast_bytes;
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
s->rx_csum_sw;
......@@ -1400,6 +1408,24 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
return 0;
}
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int nch = priv->params.num_channels;
int ntc = priv->params.num_tc;
int tc;
netdev_reset_tc(netdev);
if (ntc == 1)
return;
netdev_set_num_tc(netdev, ntc);
for (tc = 0; tc < ntc; tc++)
netdev_set_tc_queue(netdev, tc, nch, tc * nch);
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
......@@ -1408,6 +1434,8 @@ int mlx5e_open_locked(struct net_device *netdev)
set_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_netdev_set_tcs(netdev);
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
......@@ -1602,7 +1630,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc);
MLX5_SET(tisc, tisc, prio, tc << 1);
MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
......@@ -1618,7 +1646,7 @@ static int mlx5e_create_tises(struct mlx5e_priv *priv)
int err;
int tc;
for (tc = 0; tc < priv->params.num_tc; tc++) {
for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
err = mlx5e_create_tis(priv, tc);
if (err)
goto err_close_tises;
......@@ -1637,7 +1665,7 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
{
int tc;
for (tc = 0; tc < priv->params.num_tc; tc++)
for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
mlx5e_destroy_tis(priv, tc);
}
......@@ -1824,6 +1852,40 @@ static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
mlx5e_destroy_tir(priv, i);
}
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool was_opened;
int err = 0;
if (tc && tc != MLX5E_MAX_NUM_TC)
return -EINVAL;
mutex_lock(&priv->state_lock);
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(priv->netdev);
priv->params.num_tc = tc ? tc : 1;
if (was_opened)
err = mlx5e_open_locked(priv->netdev);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
__be16 proto, struct tc_to_netdev *tc)
{
if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
return mlx5e_setup_tc(dev, tc->tc);
}
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
......@@ -2024,10 +2086,84 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
static void mlx5e_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
}
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
{
struct udphdr *udph;
u16 proto;
u16 port = 0;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
proto = ipv6_hdr(skb)->nexthdr;
break;
default:
goto out;
}
if (proto == IPPROTO_UDP) {
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
}
/* Verify if UDP port is being offloaded by HW */
if (port && mlx5e_vxlan_lookup_port(priv, port))
return features;
out:
/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
return mlx5e_vxlan_features_check(priv, skb, features);
return features;
}
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_ndo_setup_tc,
.ndo_select_queue = mlx5e_select_queue,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
......@@ -2042,6 +2178,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_ndo_setup_tc,
.ndo_select_queue = mlx5e_select_queue,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
......@@ -2050,6 +2188,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_add_vxlan_port = mlx5e_add_vxlan_port,
.ndo_del_vxlan_port = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_get_vf_config = mlx5e_get_vf_config,
......@@ -2089,6 +2230,24 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
#ifdef CONFIG_MLX5_CORE_EN_DCB
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
int i;
priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < priv->params.ets.ets_cap; i++) {
priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
priv->params.ets.prio_tc[i] = i;
}
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
priv->params.ets.prio_tc[0] = 1;
priv->params.ets.prio_tc[1] = 0;
}
#endif
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
......@@ -2112,7 +2271,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
......@@ -2127,7 +2285,10 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->mdev = mdev;
priv->netdev = netdev;
priv->params.num_channels = num_channels;
priv->default_vlan_prio = priv->params.default_vlan_prio;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
#endif
spin_lock_init(&priv->async_events_spinlock);
mutex_init(&priv->state_lock);
......@@ -2156,10 +2317,14 @@ static void mlx5e_build_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
if (MLX5_CAP_GEN(mdev, vport_group_manager))
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
else
#ifdef CONFIG_MLX5_CORE_EN_DCB
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
#endif
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_basic;
}
netdev->watchdog_timeo = 15 * HZ;
......@@ -2182,6 +2347,16 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (mlx5e_vxlan_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_RXCSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_RXHASH;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
}
netdev->features = netdev->hw_features;
if (!priv->params.lro_en)
netdev->features &= ~NETIF_F_LRO;
......@@ -2228,7 +2403,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * MLX5E_MAX_NUM_TC,
nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
......@@ -2303,12 +2480,21 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
mlx5e_init_eth_addr(priv);
mlx5e_vxlan_init(priv);
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
#endif
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_destroy_flow_tables;
}
if (mlx5e_vxlan_allowed(mdev))
vxlan_get_rx_port(netdev);
mlx5e_enable_async_events(priv);
schedule_work(&priv->set_rx_mode_work);
......@@ -2361,6 +2547,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5e_disable_async_events(priv);
flush_scheduled_work();
unregister_netdev(netdev);
mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_flow_tables(priv);
mlx5e_destroy_tirs(priv);
mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
......
......@@ -167,14 +167,15 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb)
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
struct sk_buff *skb)
struct sk_buff *skb,
bool lro)
{
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (is_first_ethertype_ip(skb)) {
} else if (likely(is_first_ethertype_ip(skb))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++;
......@@ -211,7 +212,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
mlx5e_handle_csum(netdev, cqe, rq, skb);
mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
skb->protocol = eth_type_trans(skb, netdev);
......
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
......@@ -109,12 +109,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
int up = skb_vlan_tag_present(skb) ?
skb->vlan_tci >> VLAN_PRIO_SHIFT :
priv->default_vlan_prio;
int tc = netdev_get_prio_tc_map(dev, up);
int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
return priv->channeltc_to_txq_map[channel_ix][tc];
return priv->channeltc_to_txq_map[channel_ix][up];
}
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
......@@ -187,9 +185,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
memset(wqe, 0, sizeof(*wqe));
if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
else
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
sq->stats.csum_offload_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
sq->stats.csum_offload_none++;
if (sq->cc != sq->prev_cc) {
......@@ -198,16 +203,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
if (skb_is_gso(skb)) {
u32 payload_len;
eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
opcode = MLX5_OPCODE_LSO;
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload_len = skb->len - ihs;
if (skb->encapsulation) {
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
sq->stats.tso_inner_packets++;
sq->stats.tso_inner_bytes += skb->len - ihs;
} else {
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
sq->stats.tso_packets++;
sq->stats.tso_bytes += skb->len - ihs;
}
wi->num_bytes = skb->len +
(skb_shinfo(skb)->gso_segs - 1) * ihs;
sq->stats.tso_packets++;
sq->stats.tso_bytes += payload_len;
} else {
bf = sq->bf_budget &&
!skb->xmit_more &&
......
......@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
......@@ -363,3 +364,223 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx);
MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx);
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 1);
}
EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
{
u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(pfcc_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PFCC, 0, 0);
if (err)
return err;
if (pfc_en_tx)
*pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx);
if (pfc_en_rx)
*pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
return num_tc - 1;
}
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
{
u32 in[MLX5_ST_SZ_DW(qtct_reg)];
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
int err;
int i;
memset(in, 0, sizeof(in));
for (i = 0; i < 8; i++) {
if (prio_tc[i] > mlx5_max_tc(mdev))
return -EINVAL;
MLX5_SET(qtct_reg, in, prio, i);
MLX5_SET(qtct_reg, in, tclass, prio_tc[i]);
err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_QTCT, 0, 1);
if (err)
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
{
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
return -ENOTSUPP;
return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
MLX5_REG_QETCR, 0, 1);
}
static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
u32 in[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
return -ENOTSUPP;
memset(in, 0, sizeof(in));
return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
MLX5_REG_QETCR, 0, 0);
}
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)];
int i;
memset(in, 0, sizeof(in));
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
}
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)];
int i;
memset(in, 0, sizeof(in));
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
}
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_units)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)];
void *ets_tcn_conf;
int i;
memset(in, 0, sizeof(in));
MLX5_SET(qetc_reg, in, port_number, 1);
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]);
MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1);
MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units,
max_bw_units[i]);
MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value,
max_bw_value[i]);
}
return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
}
EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_units)
{
u32 out[MLX5_ST_SZ_DW(qetc_reg)];
void *ets_tcn_conf;
int err;
int i;
err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
if (err)
return err;
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]);
max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
max_bw_value);
max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
max_bw_units);
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
{
u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
{
u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
out, sizeof(out));
if (!err)
*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
/*
* Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "vxlan.h"
void mlx5e_vxlan_init(struct mlx5e_priv *priv)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
spin_lock_init(&vxlan_db->lock);
INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
}
static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
struct mlx5_outbox_hdr *hdr;
int err;
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
hdr = (struct mlx5_outbox_hdr *)out;
return hdr->status ? -ENOMEM : 0;
}
static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
sizeof(out));
}
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
spin_lock(&vxlan_db->lock);
vxlan = radix_tree_lookup(&vxlan_db->tree, port);
spin_unlock(&vxlan_db->lock);
return vxlan;
}
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
int err;
err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
if (err)
return err;
vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
if (!vxlan) {
err = -ENOMEM;
goto err_delete_port;
}
vxlan->udp_port = port;
spin_lock_irq(&vxlan_db->lock);
err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
spin_unlock_irq(&vxlan_db->lock);
if (err)
goto err_free;
return 0;
err_free:
kfree(vxlan);
err_delete_port:
mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
return err;
}
static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
spin_lock_irq(&vxlan_db->lock);
vxlan = radix_tree_delete(&vxlan_db->tree, port);
spin_unlock_irq(&vxlan_db->lock);
if (!vxlan)
return;
mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
kfree(vxlan);
}
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
{
if (!mlx5e_vxlan_lookup_port(priv, port))
return;
__mlx5e_vxlan_core_del_port(priv, port);
}
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
struct mlx5e_vxlan *vxlan;
unsigned int port = 0;
spin_lock_irq(&vxlan_db->lock);
while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
port = vxlan->udp_port;
spin_unlock_irq(&vxlan_db->lock);
__mlx5e_vxlan_core_del_port(priv, (u16)port);
spin_lock_irq(&vxlan_db->lock);
}
spin_unlock_irq(&vxlan_db->lock);
}
/*
* Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_VXLAN_H__
#define __MLX5_VXLAN_H__
#include <linux/mlx5/driver.h>
#include "en.h"
struct mlx5e_vxlan {
u16 udp_port;
};
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
mlx5_core_is_pf(mdev));
}
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
#endif /* __MLX5_VXLAN_H__ */
......@@ -350,6 +350,12 @@ enum {
MLX5_SET_PORT_PKEY_TABLE = 20,
};
enum {
MLX5_BW_NO_LIMIT = 0,
MLX5_100_MBPS_UNIT = 3,
MLX5_GBPS_UNIT = 4,
};
enum {
MLX5_MAX_PAGE_SHIFT = 31
};
......@@ -1177,6 +1183,17 @@ enum {
MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
};
enum mlx5_wol_mode {
MLX5_WOL_DISABLE = 0,
MLX5_WOL_SECURED_MAGIC = 1 << 1,
MLX5_WOL_MAGIC = 1 << 2,
MLX5_WOL_ARP = 1 << 3,
MLX5_WOL_BROADCAST = 1 << 4,
MLX5_WOL_MULTICAST = 1 << 5,
MLX5_WOL_UNICAST = 1 << 6,
MLX5_WOL_PHY_ACTIVITY = 1 << 7,
};
/* MLX5 DEV CAPs */
/* TODO: EAT.ME */
......
......@@ -99,6 +99,8 @@ enum {
};
enum {
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
......@@ -794,37 +796,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, int proto_mask,
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
......
......@@ -166,6 +166,8 @@ enum {
MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
MLX5_CMD_OP_SET_WOL_ROL = 0x830,
MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
MLX5_CMD_OP_CREATE_TIR = 0x900,
MLX5_CMD_OP_MODIFY_TIR = 0x901,
MLX5_CMD_OP_DESTROY_TIR = 0x902,
......@@ -729,7 +731,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_1bf[0x3];
u8 log_max_msg[0x5];
u8 reserved_at_1c7[0x18];
u8 reserved_at_1c7[0x4];
u8 max_tc[0x4];
u8 reserved_at_1cf[0x6];
u8 rol_s[0x1];
u8 rol_g[0x1];
u8 reserved_at_1d7[0x1];
u8 wol_s[0x1];
u8 wol_g[0x1];
u8 wol_a[0x1];
u8 wol_b[0x1];
u8 wol_m[0x1];
u8 wol_u[0x1];
u8 wol_p[0x1];
u8 stat_rate_support[0x10];
u8 reserved_at_1ef[0xc];
......@@ -6871,6 +6885,54 @@ struct mlx5_ifc_mtt_bits {
u8 rd_en[0x1];
};
struct mlx5_ifc_query_wol_rol_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x10];
u8 rol_mode[0x8];
u8 wol_mode[0x8];
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_wol_rol_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_wol_rol_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_wol_rol_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 rol_mode_valid[0x1];
u8 wol_mode_valid[0x1];
u8 reserved_at_42[0xe];
u8 rol_mode[0x8];
u8 wol_mode[0x8];
u8 reserved_at_60[0x20];
};
enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
......@@ -7061,4 +7123,49 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 reserved_at_100[0x100];
};
struct mlx5_ifc_ets_tcn_config_reg_bits {
u8 g[0x1];
u8 b[0x1];
u8 r[0x1];
u8 reserved_at_3[0x9];
u8 group[0x4];
u8 reserved_at_10[0x9];
u8 bw_allocation[0x7];
u8 reserved_at_20[0xc];
u8 max_bw_units[0x4];
u8 reserved_at_30[0x8];
u8 max_bw_value[0x8];
};
struct mlx5_ifc_ets_global_config_reg_bits {
u8 reserved_at_0[0x2];
u8 r[0x1];
u8 reserved_at_3[0x1d];
u8 reserved_at_20[0xc];
u8 max_bw_units[0x4];
u8 reserved_at_30[0x8];
u8 max_bw_value[0x8];
};
struct mlx5_ifc_qetc_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
u8 reserved_at_10[0x30];
struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8];
struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
};
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
u8 reserved_at_10[0xd];
u8 prio[0x3];
u8 reserved_at_20[0x1d];
u8 tclass[0x3];
};
#endif /* MLX5_IFC_H */
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MLX5_PORT_H__
#define __MLX5_PORT_H__
#include <linux/mlx5/driver.h>
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, int proto_mask,
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
int mlx5_query_port_pause(struct mlx5_core_dev *dev,
u32 *rx_pause, u32 *tx_pause);
int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
u8 *pfc_en_rx);
int mlx5_max_tc(struct mlx5_core_dev *mdev);
int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_unit);
int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
u8 *max_bw_value,
u8 *max_bw_unit);
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
#endif /* __MLX5_PORT_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment