Commit 18281f2d authored by Ido Schimmel's avatar Ido Schimmel Committed by David S. Miller

mlxsw: spectrum: Query cell size from firmware

As explained in the previous patch, the cell size may change in future
devices, so query it from the firmware instead of hard coding it.
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f417f04d
......@@ -50,6 +50,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
MLXSW_RES_ID_ACL_MAX_REGIONS,
......@@ -85,6 +86,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
[MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
[MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
......
......@@ -359,9 +359,10 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
return false;
}
static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
int mtu)
{
return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
}
static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
......@@ -374,8 +375,9 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
* updated according to the mtu value
*/
if (mlxsw_sp_span_is_egress_mirror(port)) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
mlxsw_sp_span_mtu_to_buffsize(mtu));
u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err) {
netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
......@@ -412,8 +414,10 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
/* if it is an egress SPAN, bind a shared buffer to it */
if (type == MLXSW_SP_SPAN_EGRESS) {
mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
port->dev->mtu);
mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
if (err) {
netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
......@@ -800,28 +804,35 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
static u16 mlxsw_sp_pg_buf_threshold_get(int mtu)
static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
int mtu)
{
return 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
}
#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
static u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
u16 delay)
{
delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
BITS_PER_BYTE));
return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
mtu);
}
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
* Assumes 100m cable and maximum MTU.
*/
#define MLXSW_SP_PAUSE_DELAY 612
static u16 mlxsw_sp_pg_buf_delay_get(int mtu, u16 delay, bool pfc, bool pause)
#define MLXSW_SP_PAUSE_DELAY 58752
static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
u16 delay, bool pfc, bool pause)
{
if (pfc)
return mlxsw_sp_pfc_delay_get(mtu, delay);
return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
else if (pause)
return MLXSW_SP_PAUSE_DELAY;
return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
else
return 0;
}
......@@ -869,8 +880,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
continue;
lossy = !(pfc || pause_en);
thres = mlxsw_sp_pg_buf_threshold_get(mtu);
delay = mlxsw_sp_pg_buf_delay_get(mtu, delay, pfc, pause_en);
thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
pause_en);
mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
}
......@@ -1577,6 +1589,7 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
u64 (*getter)(const char *payload);
bool cells_bytes;
};
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
......@@ -1697,17 +1710,11 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(const char *ppcnt_pl)
{
u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
}
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
{
.str = "tc_transmit_queue_tc",
.getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
.getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
.cells_bytes = true,
},
{
.str = "tc_no_buffer_discard_uc_tc",
......@@ -1819,6 +1826,8 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
......@@ -1828,8 +1837,13 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
if (err)
return;
mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
for (i = 0; i < len; i++)
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
continue;
data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
data[data_index + i]);
}
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
......
......@@ -65,11 +65,6 @@
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
......@@ -147,6 +142,7 @@ struct mlxsw_sp_sb_port {
struct mlxsw_sp_sb {
struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
struct mlxsw_sp_sb_port *ports;
u32 cell_size;
};
#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
......@@ -284,6 +280,18 @@ mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
return &mlxsw_sp->lags[lag_id];
}
static inline u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp,
u32 cells)
{
return mlxsw_sp->sb.cell_size * cells;
}
static inline u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp,
u32 bytes)
{
return DIV_ROUND_UP(bytes, mlxsw_sp->sb.cell_size);
}
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment