Commit 63bfc508 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-08-17-V2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-08-17

Some updates for mlx5 ethernet and IPoIB device driver.

Eran added the support for manage physical link state from netdevice upon
interface open/close requests.

Feras fixed the driver name showed in ethtool for IPoIB interfaces.
Shalom Added the support for IPoIB netdevice ethtool get link settings.

Gal and Eran exposed new diagnostic counters for outbound PCIe stalls and overflow
and RX buffer fullness statistics.

Code cleanups from Or Gerlitz.
Variable types cleanup from Gal.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d6e1e46f 9da5106c
...@@ -258,6 +258,7 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc); ...@@ -258,6 +258,7 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{ {
u32 db_per_page = PAGE_SIZE / cache_line_size(); u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex); mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap); __set_bit(db->index, db->u.pgdir->bitmap);
......
...@@ -802,7 +802,6 @@ static void cmd_work_handler(struct work_struct *work) ...@@ -802,7 +802,6 @@ static void cmd_work_handler(struct work_struct *work)
bool poll_cmd = ent->polling; bool poll_cmd = ent->polling;
int alloc_ret; int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem); down(sem);
if (!ent->page_queue) { if (!ent->page_queue) {
......
...@@ -176,7 +176,6 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) ...@@ -176,7 +176,6 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{ {
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return NUM_SW_COUNTERS + return NUM_SW_COUNTERS +
...@@ -207,7 +206,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) ...@@ -207,7 +206,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return mlx5e_ethtool_get_sset_count(priv, sset); return mlx5e_ethtool_get_sset_count(priv, sset);
} }
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{ {
int i, j, tc, prio, idx = 0; int i, j, tc, prio, idx = 0;
unsigned long pfc_combined; unsigned long pfc_combined;
...@@ -242,10 +241,22 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) ...@@ -242,10 +241,22 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_stats_desc[i].format); pport_phy_statistical_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_eth_ext_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stats_desc[i].format); pcie_perf_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stats_desc64[i].format);
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stall_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
...@@ -297,8 +308,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) ...@@ -297,8 +308,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
priv->channel_tc2txq[i][tc]); priv->channel_tc2txq[i][tc]);
} }
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
uint32_t stringset, uint8_t *data)
{ {
int i; int i;
...@@ -320,8 +330,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, ...@@ -320,8 +330,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
} }
} }
static void mlx5e_get_strings(struct net_device *dev, static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
uint32_t stringset, uint8_t *data)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
...@@ -373,10 +382,22 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, ...@@ -373,10 +382,22 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i); pport_phy_statistical_stats_desc, i);
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
pport_eth_ext_stats_desc, i);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
pcie_perf_stats_desc, i); pcie_perf_stats_desc, i);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
pcie_perf_stats_desc64, i);
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
pcie_perf_stall_stats_desc, i);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
...@@ -964,24 +985,27 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type) ...@@ -964,24 +985,27 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type)
if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type]; return ptys2connector_type[connector_type];
if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) if (eth_proto &
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) |
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) |
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) |
return PORT_FIBRE; MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
return PORT_FIBRE;
} }
if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) if (eth_proto &
| MLX5E_PROT_MASK(MLX5E_10GBASE_CR) (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) |
| MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { MLX5E_PROT_MASK(MLX5E_10GBASE_CR) |
return PORT_DA; MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
return PORT_DA;
} }
if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) if (eth_proto &
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR) (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) |
| MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) MLX5E_PROT_MASK(MLX5E_10GBASE_KR) |
| MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) |
return PORT_NONE; MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
return PORT_NONE;
} }
return PORT_OTHER; return PORT_OTHER;
......
...@@ -288,6 +288,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full) ...@@ -288,6 +288,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
} }
if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
out = pstats->eth_ext_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
out = pstats->per_prio_counters[prio]; out = pstats->per_prio_counters[prio];
...@@ -2682,6 +2688,8 @@ int mlx5e_open(struct net_device *netdev) ...@@ -2682,6 +2688,8 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev); err = mlx5e_open_locked(netdev);
if (!err)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
...@@ -2716,6 +2724,7 @@ int mlx5e_close(struct net_device *netdev) ...@@ -2716,6 +2724,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV; return -ENODEV;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev); err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
...@@ -4187,6 +4196,10 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) ...@@ -4187,6 +4196,10 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_init_l2_addr(priv); mlx5e_init_l2_addr(priv);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
/* MTU range: 68 - hw-specific max */ /* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU; netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
......
...@@ -613,15 +613,18 @@ static int mlx5e_rep_open(struct net_device *dev) ...@@ -613,15 +613,18 @@ static int mlx5e_rep_open(struct net_device *dev)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err; int err;
err = mlx5e_open(dev); mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(dev);
if (err) if (err)
return err; goto unlock;
err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP); if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
if (!err) MLX5_ESW_VPORT_ADMIN_STATE_UP))
netif_carrier_on(dev); netif_carrier_on(dev);
return 0; unlock:
mutex_unlock(&priv->state_lock);
return err;
} }
static int mlx5e_rep_close(struct net_device *dev) static int mlx5e_rep_close(struct net_device *dev)
...@@ -630,10 +633,13 @@ static int mlx5e_rep_close(struct net_device *dev) ...@@ -630,10 +633,13 @@ static int mlx5e_rep_close(struct net_device *dev)
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
mutex_lock(&priv->state_lock);
(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
ret = mlx5e_close_locked(dev);
return mlx5e_close(dev); mutex_unlock(&priv->state_lock);
return ret;
} }
static int mlx5e_rep_get_phys_port_name(struct net_device *dev, static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
......
...@@ -509,8 +509,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, ...@@ -509,8 +509,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u16 tot_len; u16 tot_len;
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || int tcp_ack = ((l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA));
skb->mac_len = ETH_HLEN; skb->mac_len = ETH_HLEN;
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
struct counter_desc { struct counter_desc {
char format[ETH_GSTRING_LEN]; char format[ETH_GSTRING_LEN];
int offset; /* Byte offset */ size_t offset; /* Byte offset */
}; };
struct mlx5e_sw_stats { struct mlx5e_sw_stats {
...@@ -216,6 +216,12 @@ static const struct counter_desc vport_stats_desc[] = { ...@@ -216,6 +216,12 @@ static const struct counter_desc vport_stats_desc[] = {
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high) counter_set.eth_per_prio_grp_data_layout.c##_high)
#define NUM_PPORT_PRIO 8 #define NUM_PPORT_PRIO 8
#define PPORT_ETH_EXT_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
#define PPORT_ETH_EXT_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
struct mlx5e_pport_stats { struct mlx5e_pport_stats {
__be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
...@@ -224,6 +230,7 @@ struct mlx5e_pport_stats { ...@@ -224,6 +230,7 @@ struct mlx5e_pport_stats {
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
}; };
static const struct counter_desc pport_802_3_stats_desc[] = { static const struct counter_desc pport_802_3_stats_desc[] = {
...@@ -290,12 +297,22 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { ...@@ -290,12 +297,22 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
}; };
static const struct counter_desc pport_eth_ext_stats_desc[] = {
{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
};
#define PCIE_PERF_OFF(c) \ #define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \ #define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c) counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_OFF64(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
#define PCIE_PERF_GET64(pcie_stats, c) \
MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
struct mlx5e_pcie_stats { struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
}; };
...@@ -305,6 +322,17 @@ static const struct counter_desc pcie_perf_stats_desc[] = { ...@@ -305,6 +322,17 @@ static const struct counter_desc pcie_perf_stats_desc[] = {
{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
}; };
static const struct counter_desc pcie_perf_stats_desc64[] = {
{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
};
static const struct counter_desc pcie_perf_stall_stats_desc[] = {
{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
};
struct mlx5e_rq_stats { struct mlx5e_rq_stats {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
...@@ -397,17 +425,29 @@ static const struct counter_desc sq_stats_desc[] = { ...@@ -397,17 +425,29 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PCIE_PERF_COUNTERS(priv) \ #define NUM_PCIE_PERF_COUNTERS(priv) \
(ARRAY_SIZE(pcie_perf_stats_desc) * \ (ARRAY_SIZE(pcie_perf_stats_desc) * \
MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
#define NUM_PCIE_PERF_COUNTERS64(priv) \
(ARRAY_SIZE(pcie_perf_stats_desc64) * \
MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
(ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc) ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
ARRAY_SIZE(pport_per_prio_pfc_stats_desc) ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \
(ARRAY_SIZE(pport_eth_ext_stats_desc) * \
MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \
NUM_PPORT_2819_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO) NUM_PPORT_PRIO + \
#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) NUM_PPORT_ETH_EXT_COUNTERS(priv))
#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \
NUM_PCIE_PERF_COUNTERS64(priv) +\
NUM_PCIE_PERF_STALL_COUNTERS(priv))
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
......
...@@ -188,6 +188,7 @@ static enum mlx5_dev_event port_subtype_event(u8 subtype) ...@@ -188,6 +188,7 @@ static enum mlx5_dev_event port_subtype_event(u8 subtype)
static void eq_update_ci(struct mlx5_eq *eq, int arm) static void eq_update_ci(struct mlx5_eq *eq, int arm)
{ {
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
__raw_writel((__force u32)cpu_to_be32(val), addr); __raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */ /* We still want ordering, just not swabbing, so add a barrier */
......
...@@ -39,10 +39,11 @@ static void mlx5i_get_drvinfo(struct net_device *dev, ...@@ -39,10 +39,11 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5e_priv *priv = mlx5i_epriv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo); mlx5e_ethtool_get_drvinfo(priv, drvinfo);
strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]",
sizeof(drvinfo->driver));
} }
static void mlx5i_get_strings(struct net_device *dev, static void mlx5i_get_strings(struct net_device *dev, u32 stringset, u8 *data)
uint32_t stringset, uint8_t *data)
{ {
struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5e_priv *priv = mlx5i_epriv(dev);
...@@ -129,17 +130,123 @@ static int mlx5i_flash_device(struct net_device *netdev, ...@@ -129,17 +130,123 @@ static int mlx5i_flash_device(struct net_device *netdev,
return mlx5e_ethtool_flash_device(priv, flash); return mlx5e_ethtool_flash_device(priv, flash);
} }
enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_1X = 1 << 0,
MLX5_PTYS_WIDTH_2X = 1 << 1,
MLX5_PTYS_WIDTH_4X = 1 << 2,
MLX5_PTYS_WIDTH_8X = 1 << 3,
MLX5_PTYS_WIDTH_12X = 1 << 4,
};
static inline int mlx5_ptys_width_enum_to_int(enum mlx5_ptys_width width)
{
switch (width) {
case MLX5_PTYS_WIDTH_1X: return 1;
case MLX5_PTYS_WIDTH_2X: return 2;
case MLX5_PTYS_WIDTH_4X: return 4;
case MLX5_PTYS_WIDTH_8X: return 8;
case MLX5_PTYS_WIDTH_12X: return 12;
default: return -1;
}
}
enum mlx5_ptys_rate {
MLX5_PTYS_RATE_SDR = 1 << 0,
MLX5_PTYS_RATE_DDR = 1 << 1,
MLX5_PTYS_RATE_QDR = 1 << 2,
MLX5_PTYS_RATE_FDR10 = 1 << 3,
MLX5_PTYS_RATE_FDR = 1 << 4,
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
{
switch (rate) {
case MLX5_PTYS_RATE_SDR: return 2500;
case MLX5_PTYS_RATE_DDR: return 5000;
case MLX5_PTYS_RATE_QDR:
case MLX5_PTYS_RATE_FDR10: return 10000;
case MLX5_PTYS_RATE_FDR: return 14000;
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
default: return -1;
}
}
static int mlx5i_get_port_settings(struct net_device *netdev,
u16 *ib_link_width_oper, u16 *ib_proto_oper)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
int ret;
ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1);
if (ret)
return ret;
*ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
*ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
return 0;
}
static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
return -EINVAL;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
return -EINVAL;
return rate * width;
}
static int mlx5i_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
u16 ib_link_width_oper;
u16 ib_proto_oper;
int speed, ret;
ret = mlx5i_get_port_settings(netdev, &ib_link_width_oper, &ib_proto_oper);
if (ret)
return ret;
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
if (speed < 0)
return -EINVAL;
link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
link_ksettings->base.speed = speed;
return 0;
}
const struct ethtool_ops mlx5i_ethtool_ops = { const struct ethtool_ops mlx5i_ethtool_ops = {
.get_drvinfo = mlx5i_get_drvinfo, .get_drvinfo = mlx5i_get_drvinfo,
.get_strings = mlx5i_get_strings, .get_strings = mlx5i_get_strings,
.get_sset_count = mlx5i_get_sset_count, .get_sset_count = mlx5i_get_sset_count,
.get_ethtool_stats = mlx5i_get_ethtool_stats, .get_ethtool_stats = mlx5i_get_ethtool_stats,
.get_ringparam = mlx5i_get_ringparam, .get_ringparam = mlx5i_get_ringparam,
.set_ringparam = mlx5i_set_ringparam, .set_ringparam = mlx5i_set_ringparam,
.flash_device = mlx5i_flash_device, .flash_device = mlx5i_flash_device,
.get_channels = mlx5i_get_channels, .get_channels = mlx5i_get_channels,
.set_channels = mlx5i_set_channels, .set_channels = mlx5i_set_channels,
.get_coalesce = mlx5i_get_coalesce, .get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce, .set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info, .get_ts_info = mlx5i_get_ts_info,
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
}; };
...@@ -836,7 +836,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) ...@@ -836,7 +836,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{ {
struct pci_dev *pdev = dev->pdev; struct pci_dev *pdev = dev->pdev;
......
...@@ -71,7 +71,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) ...@@ -71,7 +71,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
sriov->vfs_ctx[vf].enabled = 1; sriov->vfs_ctx[vf].enabled = 1;
sriov->enabled_vfs++; sriov->enabled_vfs++;
mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
} }
return 0; return 0;
......
...@@ -1538,7 +1538,17 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { ...@@ -1538,7 +1538,17 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 port_transmit_wait_low[0x20]; u8 port_transmit_wait_low[0x20];
u8 reserved_at_40[0x780]; u8 reserved_at_40[0x100];
u8 rx_buffer_almost_full_high[0x20];
u8 rx_buffer_almost_full_low[0x20];
u8 rx_buffer_full_high[0x20];
u8 rx_buffer_full_low[0x20];
u8 reserved_at_1c0[0x600];
}; };
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
...@@ -1854,7 +1864,19 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { ...@@ -1854,7 +1864,19 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 crc_error_tlp[0x20]; u8 crc_error_tlp[0x20];
u8 reserved_at_140[0x680]; u8 tx_overflow_buffer_pkt_high[0x20];
u8 tx_overflow_buffer_pkt_low[0x20];
u8 outbound_stalled_reads[0x20];
u8 outbound_stalled_writes[0x20];
u8 outbound_stalled_reads_events[0x20];
u8 outbound_stalled_writes_events[0x20];
u8 reserved_at_200[0x5c0];
}; };
struct mlx5_ifc_cmd_inter_comp_event_bits { struct mlx5_ifc_cmd_inter_comp_event_bits {
...@@ -7713,8 +7735,9 @@ struct mlx5_ifc_peir_reg_bits { ...@@ -7713,8 +7735,9 @@ struct mlx5_ifc_peir_reg_bits {
}; };
struct mlx5_ifc_pcam_enhanced_features_bits { struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x7c]; u8 reserved_at_0[0x7b];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1]; u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1]; u8 reserved_at_7d[0x1];
u8 ppcnt_discard_group[0x1]; u8 ppcnt_discard_group[0x1];
...@@ -7744,8 +7767,9 @@ struct mlx5_ifc_pcam_reg_bits { ...@@ -7744,8 +7767,9 @@ struct mlx5_ifc_pcam_reg_bits {
}; };
struct mlx5_ifc_mcam_enhanced_features_bits { struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x7d]; u8 reserved_at_0[0x7b];
u8 pcie_outbound_stalled[0x1];
u8 tx_overflow_buffer_pkt[0x1];
u8 mtpps_enh_out_per_adj[0x1]; u8 mtpps_enh_out_per_adj[0x1];
u8 mtpps_fs[0x1]; u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1]; u8 pcie_performance_group[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment