nfp_net_ethtool.c 41.7 KB
Newer Older
1 2
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 4 5 6 7 8 9 10 11 12

/*
 * nfp_net_ethtool.c
 * Netronome network device driver: ethtool support
 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
 *          Jason McMullan <jason.mcmullan@netronome.com>
 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
 *          Brad Petrus <brad.petrus@netronome.com>
 */

13
#include <linux/bitfield.h>
14 15 16 17 18 19
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
20
#include <linux/firmware.h>
21
#include <linux/sfp.h>
22

23
#include "nfpcore/nfp.h"
24
#include "nfpcore/nfp_nsp.h"
25
#include "nfp_app.h"
Carl Heymann's avatar
Carl Heymann committed
26
#include "nfp_main.h"
27 28
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
Jakub Kicinski's avatar
Jakub Kicinski committed
29
#include "nfp_port.h"
30

31
struct nfp_et_stat {
32 33 34 35
	char name[ETH_GSTRING_LEN];
	int off;
};

36
static const struct nfp_et_stat nfp_net_et_stats[] = {
37
	/* Stats from the device */
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	{ "dev_rx_discards",	NFP_NET_CFG_STATS_RX_DISCARDS },
	{ "dev_rx_errors",	NFP_NET_CFG_STATS_RX_ERRORS },
	{ "dev_rx_bytes",	NFP_NET_CFG_STATS_RX_OCTETS },
	{ "dev_rx_uc_bytes",	NFP_NET_CFG_STATS_RX_UC_OCTETS },
	{ "dev_rx_mc_bytes",	NFP_NET_CFG_STATS_RX_MC_OCTETS },
	{ "dev_rx_bc_bytes",	NFP_NET_CFG_STATS_RX_BC_OCTETS },
	{ "dev_rx_pkts",	NFP_NET_CFG_STATS_RX_FRAMES },
	{ "dev_rx_mc_pkts",	NFP_NET_CFG_STATS_RX_MC_FRAMES },
	{ "dev_rx_bc_pkts",	NFP_NET_CFG_STATS_RX_BC_FRAMES },

	{ "dev_tx_discards",	NFP_NET_CFG_STATS_TX_DISCARDS },
	{ "dev_tx_errors",	NFP_NET_CFG_STATS_TX_ERRORS },
	{ "dev_tx_bytes",	NFP_NET_CFG_STATS_TX_OCTETS },
	{ "dev_tx_uc_bytes",	NFP_NET_CFG_STATS_TX_UC_OCTETS },
	{ "dev_tx_mc_bytes",	NFP_NET_CFG_STATS_TX_MC_OCTETS },
	{ "dev_tx_bc_bytes",	NFP_NET_CFG_STATS_TX_BC_OCTETS },
	{ "dev_tx_pkts",	NFP_NET_CFG_STATS_TX_FRAMES },
	{ "dev_tx_mc_pkts",	NFP_NET_CFG_STATS_TX_MC_FRAMES },
	{ "dev_tx_bc_pkts",	NFP_NET_CFG_STATS_TX_BC_FRAMES },

	{ "bpf_pass_pkts",	NFP_NET_CFG_STATS_APP0_FRAMES },
	{ "bpf_pass_bytes",	NFP_NET_CFG_STATS_APP0_BYTES },
60 61 62
	/* see comments in outro functions in nfp_bpf_jit.c to find out
	 * how different BPF modes use app-specific counters
	 */
63 64 65 66 67 68
	{ "bpf_app1_pkts",	NFP_NET_CFG_STATS_APP1_FRAMES },
	{ "bpf_app1_bytes",	NFP_NET_CFG_STATS_APP1_BYTES },
	{ "bpf_app2_pkts",	NFP_NET_CFG_STATS_APP2_FRAMES },
	{ "bpf_app2_bytes",	NFP_NET_CFG_STATS_APP2_BYTES },
	{ "bpf_app3_pkts",	NFP_NET_CFG_STATS_APP3_FRAMES },
	{ "bpf_app3_bytes",	NFP_NET_CFG_STATS_APP3_BYTES },
69 70
};

71 72 73 74 75
static const struct nfp_et_stat nfp_mac_et_stats[] = {
	{ "rx_octets",			NFP_MAC_STATS_RX_IN_OCTETS, },
	{ "rx_frame_too_long_errors",
			NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
	{ "rx_range_length_errors",	NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
76
	{ "rx_vlan_received_ok",	NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
	{ "rx_errors",			NFP_MAC_STATS_RX_IN_ERRORS, },
	{ "rx_broadcast_pkts",		NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
	{ "rx_drop_events",		NFP_MAC_STATS_RX_DROP_EVENTS, },
	{ "rx_alignment_errors",	NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, },
	{ "rx_pause_mac_ctrl_frames",
			NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, },
	{ "rx_frames_received_ok",	NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, },
	{ "rx_frame_check_sequence_errors",
			NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, },
	{ "rx_unicast_pkts",		NFP_MAC_STATS_RX_UNICAST_PKTS, },
	{ "rx_multicast_pkts",		NFP_MAC_STATS_RX_MULTICAST_PKTS, },
	{ "rx_pkts",			NFP_MAC_STATS_RX_PKTS, },
	{ "rx_undersize_pkts",		NFP_MAC_STATS_RX_UNDERSIZE_PKTS, },
	{ "rx_pkts_64_octets",		NFP_MAC_STATS_RX_PKTS_64_OCTETS, },
	{ "rx_pkts_65_to_127_octets",
			NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, },
	{ "rx_pkts_128_to_255_octets",
			NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, },
	{ "rx_pkts_256_to_511_octets",
			NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, },
	{ "rx_pkts_512_to_1023_octets",
			NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, },
	{ "rx_pkts_1024_to_1518_octets",
			NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, },
	{ "rx_pkts_1519_to_max_octets",
			NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, },
	{ "rx_jabbers",			NFP_MAC_STATS_RX_JABBERS, },
	{ "rx_fragments",		NFP_MAC_STATS_RX_FRAGMENTS, },
	{ "rx_oversize_pkts",		NFP_MAC_STATS_RX_OVERSIZE_PKTS, },
	{ "rx_pause_frames_class0",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, },
	{ "rx_pause_frames_class1",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, },
	{ "rx_pause_frames_class2",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, },
	{ "rx_pause_frames_class3",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, },
	{ "rx_pause_frames_class4",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, },
	{ "rx_pause_frames_class5",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, },
	{ "rx_pause_frames_class6",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, },
	{ "rx_pause_frames_class7",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, },
	{ "rx_mac_ctrl_frames_received",
			NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, },
	{ "rx_mac_head_drop",		NFP_MAC_STATS_RX_MAC_HEAD_DROP, },
	{ "tx_queue_drop",		NFP_MAC_STATS_TX_QUEUE_DROP, },
	{ "tx_octets",			NFP_MAC_STATS_TX_OUT_OCTETS, },
	{ "tx_vlan_transmitted_ok",	NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, },
	{ "tx_errors",			NFP_MAC_STATS_TX_OUT_ERRORS, },
	{ "tx_broadcast_pkts",		NFP_MAC_STATS_TX_BROADCAST_PKTS, },
	{ "tx_pause_mac_ctrl_frames",
			NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, },
	{ "tx_frames_transmitted_ok",
			NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, },
	{ "tx_unicast_pkts",		NFP_MAC_STATS_TX_UNICAST_PKTS, },
	{ "tx_multicast_pkts",		NFP_MAC_STATS_TX_MULTICAST_PKTS, },
	{ "tx_pkts_64_octets",		NFP_MAC_STATS_TX_PKTS_64_OCTETS, },
	{ "tx_pkts_65_to_127_octets",
			NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, },
	{ "tx_pkts_128_to_255_octets",
			NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, },
	{ "tx_pkts_256_to_511_octets",
			NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, },
	{ "tx_pkts_512_to_1023_octets",
			NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, },
	{ "tx_pkts_1024_to_1518_octets",
			NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, },
	{ "tx_pkts_1519_to_max_octets",
			NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, },
	{ "tx_pause_frames_class0",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, },
	{ "tx_pause_frames_class1",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, },
	{ "tx_pause_frames_class2",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, },
	{ "tx_pause_frames_class3",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, },
	{ "tx_pause_frames_class4",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, },
	{ "tx_pause_frames_class5",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, },
	{ "tx_pause_frames_class6",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, },
	{ "tx_pause_frames_class7",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
};

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
	[1]	= "dev_rx_discards",
	[2]	= "dev_rx_errors",
	[3]	= "dev_rx_bytes",
	[4]	= "dev_rx_uc_bytes",
	[5]	= "dev_rx_mc_bytes",
	[6]	= "dev_rx_bc_bytes",
	[7]	= "dev_rx_pkts",
	[8]	= "dev_rx_mc_pkts",
	[9]	= "dev_rx_bc_pkts",

	[10]	= "dev_tx_discards",
	[11]	= "dev_tx_errors",
	[12]	= "dev_tx_bytes",
	[13]	= "dev_tx_uc_bytes",
	[14]	= "dev_tx_mc_bytes",
	[15]	= "dev_tx_bc_bytes",
	[16]	= "dev_tx_pkts",
	[17]	= "dev_tx_mc_pkts",
	[18]	= "dev_tx_bc_pkts",
};

173
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
174
#define NN_ET_SWITCH_STATS_LEN 9
175
#define NN_RVEC_GATHER_STATS	13
176
#define NN_RVEC_PER_Q_STATS	3
177
#define NN_CTRL_PATH_STATS	4
178

179 180
#define SFP_SFF_REV_COMPLIANCE	1

181
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
182 183 184
{
	struct nfp_nsp *nsp;

185
	if (!app)
186 187
		return;

188
	nsp = nfp_nsp_open(app->cpp);
189 190 191
	if (IS_ERR(nsp))
		return;

192
	snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu",
193 194 195 196 197 198
		 nfp_nsp_get_abi_ver_major(nsp),
		 nfp_nsp_get_abi_ver_minor(nsp));

	nfp_nsp_close(nsp);
}

199 200 201
static void
nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
		const char *vnic_version, struct ethtool_drvinfo *drvinfo)
202
{
203
	char nsp_version[ETHTOOL_FWVERS_LEN] = {};
204

205
	strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
206
	strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
207

208
	nfp_net_get_nspinfo(app, nsp_version);
209
	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
210 211 212 213 214 215 216 217 218 219 220
		 "%s %s %s %s", vnic_version, nsp_version,
		 nfp_app_mip_name(app), nfp_app_name(app));
}

static void
nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
	char vnic_version[ETHTOOL_FWVERS_LEN] = {};
	struct nfp_net *nn = netdev_priv(netdev);

	snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
221
		 nn->fw_ver.resv, nn->fw_ver.class,
222
		 nn->fw_ver.major, nn->fw_ver.minor);
223 224 225
	strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
		sizeof(drvinfo->bus_info));

226 227 228 229 230 231
	nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
}

static void
nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
232
	struct nfp_app *app = nfp_app_from_netdev(netdev);
233

234 235
	strlcpy(drvinfo->bus_info, pci_name(app->pdev),
		sizeof(drvinfo->bus_info));
236
	nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
237 238
}

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
static void
nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
			  struct ethtool_link_ksettings *c)
{
	unsigned int modes;

	ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE);
	if (!nfp_eth_can_support_fec(eth_port)) {
		ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE);
		return;
	}

	modes = nfp_eth_supported_fec_modes(eth_port);
	if (modes & NFP_FEC_BASER) {
		ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER);
		ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER);
	}

	if (modes & NFP_FEC_REED_SOLOMON) {
		ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS);
		ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS);
	}
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
/**
 * nfp_net_get_link_ksettings - Get Link Speed settings
 * @netdev:	network interface device structure
 * @cmd:	ethtool command
 *
 * Reports speed settings based on info in the BAR provided by the fw.
 */
static int
nfp_net_get_link_ksettings(struct net_device *netdev,
			   struct ethtool_link_ksettings *cmd)
{
	static const u32 ls_to_ethtool[] = {
		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED]	= 0,
		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]	= SPEED_UNKNOWN,
		[NFP_NET_CFG_STS_LINK_RATE_1G]		= SPEED_1000,
		[NFP_NET_CFG_STS_LINK_RATE_10G]		= SPEED_10000,
		[NFP_NET_CFG_STS_LINK_RATE_25G]		= SPEED_25000,
		[NFP_NET_CFG_STS_LINK_RATE_40G]		= SPEED_40000,
		[NFP_NET_CFG_STS_LINK_RATE_50G]		= SPEED_50000,
		[NFP_NET_CFG_STS_LINK_RATE_100G]	= SPEED_100000,
	};
Jakub Kicinski's avatar
Jakub Kicinski committed
284 285 286
	struct nfp_eth_table_port *eth_port;
	struct nfp_port *port;
	struct nfp_net *nn;
287 288
	u32 sts, ls;

Jakub Kicinski's avatar
Jakub Kicinski committed
289
	/* Init to unknowns */
290 291 292 293 294
	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
	cmd->base.port = PORT_OTHER;
	cmd->base.speed = SPEED_UNKNOWN;
	cmd->base.duplex = DUPLEX_UNKNOWN;

Jakub Kicinski's avatar
Jakub Kicinski committed
295
	port = nfp_port_from_netdev(netdev);
296
	eth_port = nfp_port_get_eth_port(port);
297
	if (eth_port) {
Jakub Kicinski's avatar
Jakub Kicinski committed
298
		cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
299
			AUTONEG_ENABLE : AUTONEG_DISABLE;
300 301
		nfp_net_set_fec_link_mode(eth_port, cmd);
	}
302

303 304 305
	if (!netif_carrier_ok(netdev))
		return 0;

306
	/* Use link speed from ETH table if available, otherwise try the BAR */
Jakub Kicinski's avatar
Jakub Kicinski committed
307 308 309
	if (eth_port) {
		cmd->base.port = eth_port->port_type;
		cmd->base.speed = eth_port->speed;
310 311 312 313
		cmd->base.duplex = DUPLEX_FULL;
		return 0;
	}

314 315 316 317
	if (!nfp_netdev_is_nfp_net(netdev))
		return -EOPNOTSUPP;
	nn = netdev_priv(netdev);

318 319 320 321 322 323 324 325 326 327
	sts = nn_readl(nn, NFP_NET_CFG_STS);

	ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
	if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
		return -EOPNOTSUPP;

	if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
	    ls >= ARRAY_SIZE(ls_to_ethtool))
		return 0;

328
	cmd->base.speed = ls_to_ethtool[ls];
329 330 331 332 333
	cmd->base.duplex = DUPLEX_FULL;

	return 0;
}

334 335 336 337
static int
nfp_net_set_link_ksettings(struct net_device *netdev,
			   const struct ethtool_link_ksettings *cmd)
{
Jakub Kicinski's avatar
Jakub Kicinski committed
338 339
	struct nfp_eth_table_port *eth_port;
	struct nfp_port *port;
340 341 342
	struct nfp_nsp *nsp;
	int err;

Jakub Kicinski's avatar
Jakub Kicinski committed
343 344 345
	port = nfp_port_from_netdev(netdev);
	eth_port = __nfp_port_get_eth_port(port);
	if (!eth_port)
346 347 348
		return -EOPNOTSUPP;

	if (netif_running(netdev)) {
349
		netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
350 351 352
		return -EBUSY;
	}

Jakub Kicinski's avatar
Jakub Kicinski committed
353
	nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
354 355 356 357 358 359 360 361
	if (IS_ERR(nsp))
		return PTR_ERR(nsp);

	err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
				 NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
	if (err)
		goto err_bad_set;
	if (cmd->base.speed != SPEED_UNKNOWN) {
Jakub Kicinski's avatar
Jakub Kicinski committed
362
		u32 speed = cmd->base.speed / eth_port->lanes;
363 364 365 366 367 368 369 370 371 372

		err = __nfp_eth_set_speed(nsp, speed);
		if (err)
			goto err_bad_set;
	}

	err = nfp_eth_config_commit_end(nsp);
	if (err > 0)
		return 0; /* no change */

Jakub Kicinski's avatar
Jakub Kicinski committed
373
	nfp_net_refresh_port_table(port);
374 375 376 377 378 379 380 381

	return err;

err_bad_set:
	nfp_eth_config_cleanup_end(nsp);
	return err;
}

382 383 384 385 386 387 388
static void nfp_net_get_ringparam(struct net_device *netdev,
				  struct ethtool_ringparam *ring)
{
	struct nfp_net *nn = netdev_priv(netdev);

	ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
	ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
389 390
	ring->rx_pending = nn->dp.rxd_cnt;
	ring->tx_pending = nn->dp.txd_cnt;
391 392
}

393 394
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
395
	struct nfp_net_dp *dp;
396

397 398 399 400
	dp = nfp_net_clone_dp(nn);
	if (!dp)
		return -ENOMEM;

401 402 403
	dp->rxd_cnt = rxd_cnt;
	dp->txd_cnt = txd_cnt;

404
	return nfp_net_ring_reconfig(nn, dp, NULL);
405 406
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420
static int nfp_net_set_ringparam(struct net_device *netdev,
				 struct ethtool_ringparam *ring)
{
	struct nfp_net *nn = netdev_priv(netdev);
	u32 rxd_cnt, txd_cnt;

	/* We don't have separate queues/rings for small/large frames. */
	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
		return -EINVAL;

	/* Round up to supported values */
	rxd_cnt = roundup_pow_of_two(ring->rx_pending);
	txd_cnt = roundup_pow_of_two(ring->tx_pending);

421 422 423 424
	if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
	    txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
		return -EINVAL;

425
	if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
426
		return 0;
427

428
	nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
429
	       nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
430

431
	return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
432 433
}

434
__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
435 436 437 438 439 440 441 442 443 444
{
	va_list args;

	va_start(args, fmt);
	vsnprintf(data, ETH_GSTRING_LEN, fmt, args);
	va_end(args);

	return data + ETH_GSTRING_LEN;
}

445
static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
446 447 448
{
	struct nfp_net *nn = netdev_priv(netdev);

449 450
	return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
		NN_CTRL_PATH_STATS;
451
}
452

453 454 455 456 457
static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
{
	struct nfp_net *nn = netdev_priv(netdev);
	int i;

458
	for (i = 0; i < nn->max_r_vecs; i++) {
459 460 461
		data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
		data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
		data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
462
	}
463 464 465

	data = nfp_pr_et(data, "hw_rx_csum_ok");
	data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
466
	data = nfp_pr_et(data, "hw_rx_csum_complete");
467
	data = nfp_pr_et(data, "hw_rx_csum_err");
468
	data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
469
	data = nfp_pr_et(data, "rx_tls_decrypted_packets");
470 471 472 473
	data = nfp_pr_et(data, "hw_tx_csum");
	data = nfp_pr_et(data, "hw_tx_inner_csum");
	data = nfp_pr_et(data, "tx_gather");
	data = nfp_pr_et(data, "tx_lso");
474
	data = nfp_pr_et(data, "tx_tls_encrypted_packets");
475 476 477 478
	data = nfp_pr_et(data, "tx_tls_ooo");
	data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");

	data = nfp_pr_et(data, "hw_tls_no_space");
479 480 481
	data = nfp_pr_et(data, "rx_tls_resync_req_ok");
	data = nfp_pr_et(data, "rx_tls_resync_req_ign");
	data = nfp_pr_et(data, "rx_tls_resync_sent");
482 483

	return data;
484 485
}

486
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
487
{
488
	u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
489
	struct nfp_net *nn = netdev_priv(netdev);
490
	u64 tmp[NN_RVEC_GATHER_STATS];
491
	unsigned int i, j;
492

493
	for (i = 0; i < nn->max_r_vecs; i++) {
494 495 496
		unsigned int start;

		do {
497
			start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
498
			data[0] = nn->r_vecs[i].rx_pkts;
499 500
			tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
			tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
501 502 503
			tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
			tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
			tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
504
			tmp[5] = nn->r_vecs[i].hw_tls_rx;
505
		} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
506 507

		do {
508
			start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
509 510
			data[1] = nn->r_vecs[i].tx_pkts;
			data[2] = nn->r_vecs[i].tx_busy;
511 512 513 514 515 516 517
			tmp[6] = nn->r_vecs[i].hw_csum_tx;
			tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
			tmp[8] = nn->r_vecs[i].tx_gather;
			tmp[9] = nn->r_vecs[i].tx_lso;
			tmp[10] = nn->r_vecs[i].hw_tls_tx;
			tmp[11] = nn->r_vecs[i].tls_tx_fallback;
			tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
518 519
		} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));

520
		data += NN_RVEC_PER_Q_STATS;
521

522
		for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
523
			gathered_stats[j] += tmp[j];
524
	}
525

526
	for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
527 528
		*data++ = gathered_stats[j];

529
	*data++ = atomic_read(&nn->ktls_no_space);
530 531 532
	*data++ = atomic_read(&nn->ktls_rx_resync_req);
	*data++ = atomic_read(&nn->ktls_rx_resync_ign);
	*data++ = atomic_read(&nn->ktls_rx_resync_sent);
533

534 535 536
	return data;
}

537
static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
538
{
539
	return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
540 541 542
}

static u8 *
543
nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
544
{
545
	int swap_off, i;
546

547 548 549 550 551 552 553 554 555 556 557 558 559 560
	BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2);
	/* If repr is true first add SWITCH_STATS_LEN and then subtract it
	 * effectively swapping the RX and TX statistics (giving us the RX
	 * and TX from perspective of the switch).
	 */
	swap_off = repr * NN_ET_SWITCH_STATS_LEN;

	for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
		data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name);

	for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
		data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name);

	for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
561 562
		data = nfp_pr_et(data, nfp_net_et_stats[i].name);

563
	for (i = 0; i < num_vecs; i++) {
564 565
		data = nfp_pr_et(data, "rxq_%u_pkts", i);
		data = nfp_pr_et(data, "rxq_%u_bytes", i);
566 567
		data = nfp_pr_et(data, "txq_%u_pkts", i);
		data = nfp_pr_et(data, "txq_%u_bytes", i);
568
	}
569 570 571 572 573

	return data;
}

static u64 *
574
nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
575 576 577 578 579 580
{
	unsigned int i;

	for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
		*data++ = readq(mem + nfp_net_et_stats[i].off);

581
	for (i = 0; i < num_vecs; i++) {
582 583
		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
584 585
		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
586 587 588 589 590
	}

	return data;
}

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
{
	return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
}

static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
{
	unsigned int i, id;
	u8 __iomem *mem;
	u64 id_word = 0;

	mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
	for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
		if (!(i % 4))
			id_word = readq(mem + i * 2);

		id = (u16)id_word;
		id_word >>= 16;

		if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
		    nfp_tlv_stat_names[id][0]) {
			memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		} else {
			data = nfp_pr_et(data, "dev_unknown_stat%u", id);
		}
	}

	for (i = 0; i < nn->max_r_vecs; i++) {
		data = nfp_pr_et(data, "rxq_%u_pkts", i);
		data = nfp_pr_et(data, "rxq_%u_bytes", i);
		data = nfp_pr_et(data, "txq_%u_pkts", i);
		data = nfp_pr_et(data, "txq_%u_bytes", i);
	}

	return data;
}

static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
{
	u8 __iomem *mem;
	unsigned int i;

	mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
	mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
	for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
		*data++ = readq(mem + i * 8);

	mem = nn->dp.ctrl_bar;
	for (i = 0; i < nn->max_r_vecs; i++) {
		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
	}

	return data;
}

650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
{
	struct nfp_port *port;

	port = nfp_port_from_netdev(netdev);
	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
		return 0;

	return ARRAY_SIZE(nfp_mac_et_stats);
}

static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
{
	struct nfp_port *port;
	unsigned int i;

	port = nfp_port_from_netdev(netdev);
	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
		return data;

	for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
		data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name);

	return data;
}

static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data)
{
	struct nfp_port *port;
	unsigned int i;

	port = nfp_port_from_netdev(netdev);
	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
		return data;

	for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
		*data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off);

	return data;
}

691 692 693 694 695 696 697 698
static void nfp_net_get_strings(struct net_device *netdev,
				u32 stringset, u8 *data)
{
	struct nfp_net *nn = netdev_priv(netdev);

	switch (stringset) {
	case ETH_SS_STATS:
		data = nfp_vnic_get_sw_stats_strings(netdev, data);
699 700 701 702 703 704
		if (!nn->tlv_caps.vnic_stats_off)
			data = nfp_vnic_get_hw_stats_strings(data,
							     nn->max_r_vecs,
							     false);
		else
			data = nfp_vnic_get_tlv_stats_strings(nn, data);
705
		data = nfp_mac_get_stats_strings(netdev, data);
706
		data = nfp_app_port_get_stats_strings(nn->port, data);
707 708 709 710 711 712 713 714 715 716 717
		break;
	}
}

static void
nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
		  u64 *data)
{
	struct nfp_net *nn = netdev_priv(netdev);

	data = nfp_vnic_get_sw_stats(netdev, data);
718 719 720 721 722
	if (!nn->tlv_caps.vnic_stats_off)
		data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
					     nn->max_r_vecs);
	else
		data = nfp_vnic_get_tlv_stats(nn, data);
723
	data = nfp_mac_get_stats(netdev, data);
724
	data = nfp_app_port_get_stats(nn->port, data);
725 726 727 728 729
}

static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
{
	struct nfp_net *nn = netdev_priv(netdev);
730
	unsigned int cnt;
731 732 733

	switch (sset) {
	case ETH_SS_STATS:
734 735 736 737 738 739 740 741
		cnt = nfp_vnic_get_sw_stats_count(netdev);
		if (!nn->tlv_caps.vnic_stats_off)
			cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
		else
			cnt += nfp_vnic_get_tlv_stats_count(nn);
		cnt += nfp_mac_get_stats_count(netdev);
		cnt += nfp_app_port_get_stats_count(nn->port);
		return cnt;
742 743 744 745 746
	default:
		return -EOPNOTSUPP;
	}
}

747 748 749 750 751 752 753 754
static void nfp_port_get_strings(struct net_device *netdev,
				 u32 stringset, u8 *data)
{
	struct nfp_port *port = nfp_port_from_netdev(netdev);

	switch (stringset) {
	case ETH_SS_STATS:
		if (nfp_port_is_vnic(port))
755
			data = nfp_vnic_get_hw_stats_strings(data, 0, true);
756 757
		else
			data = nfp_mac_get_stats_strings(netdev, data);
758
		data = nfp_app_port_get_stats_strings(port, data);
759 760 761 762 763 764 765 766 767 768 769
		break;
	}
}

static void
nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
		   u64 *data)
{
	struct nfp_port *port = nfp_port_from_netdev(netdev);

	if (nfp_port_is_vnic(port))
770
		data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
771 772
	else
		data = nfp_mac_get_stats(netdev, data);
773
	data = nfp_app_port_get_stats(port, data);
774 775 776 777 778 779 780 781 782 783
}

static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
{
	struct nfp_port *port = nfp_port_from_netdev(netdev);
	unsigned int count;

	switch (sset) {
	case ETH_SS_STATS:
		if (nfp_port_is_vnic(port))
784
			count = nfp_vnic_get_hw_stats_count(0);
785 786
		else
			count = nfp_mac_get_stats_count(netdev);
787
		count += nfp_app_port_get_stats_count(port);
788 789 790 791 792 793
		return count;
	default:
		return -EOPNOTSUPP;
	}
}

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
static int nfp_port_fec_ethtool_to_nsp(u32 fec)
{
	switch (fec) {
	case ETHTOOL_FEC_AUTO:
		return NFP_FEC_AUTO_BIT;
	case ETHTOOL_FEC_OFF:
		return NFP_FEC_DISABLED_BIT;
	case ETHTOOL_FEC_RS:
		return NFP_FEC_REED_SOLOMON_BIT;
	case ETHTOOL_FEC_BASER:
		return NFP_FEC_BASER_BIT;
	default:
		/* NSP only supports a single mode at a time */
		return -EOPNOTSUPP;
	}
}

static u32 nfp_port_fec_nsp_to_ethtool(u32 fec)
{
	u32 result = 0;

	if (fec & NFP_FEC_AUTO)
		result |= ETHTOOL_FEC_AUTO;
	if (fec & NFP_FEC_BASER)
		result |= ETHTOOL_FEC_BASER;
	if (fec & NFP_FEC_REED_SOLOMON)
		result |= ETHTOOL_FEC_RS;
	if (fec & NFP_FEC_DISABLED)
		result |= ETHTOOL_FEC_OFF;

	return result ?: ETHTOOL_FEC_NONE;
}

static int
nfp_port_get_fecparam(struct net_device *netdev,
		      struct ethtool_fecparam *param)
{
	struct nfp_eth_table_port *eth_port;
	struct nfp_port *port;

	param->active_fec = ETHTOOL_FEC_NONE_BIT;
	param->fec = ETHTOOL_FEC_NONE_BIT;

	port = nfp_port_from_netdev(netdev);
	eth_port = nfp_port_get_eth_port(port);
	if (!eth_port)
		return -EOPNOTSUPP;

	if (!nfp_eth_can_support_fec(eth_port))
		return 0;

	param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
	param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);

	return 0;
}

static int
nfp_port_set_fecparam(struct net_device *netdev,
		      struct ethtool_fecparam *param)
{
	struct nfp_eth_table_port *eth_port;
	struct nfp_port *port;
	int err, fec;

	port = nfp_port_from_netdev(netdev);
	eth_port = nfp_port_get_eth_port(port);
	if (!eth_port)
		return -EOPNOTSUPP;

	if (!nfp_eth_can_support_fec(eth_port))
		return -EOPNOTSUPP;

	fec = nfp_port_fec_ethtool_to_nsp(param->fec);
	if (fec < 0)
		return fec;

	err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec);
	if (!err)
		/* Only refresh if we did something */
		nfp_net_refresh_port_table(port);

	return err < 0 ? err : 0;
}

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
/* RX network flow classification (RSS, filters, etc)
 */
static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
{
	static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
		[TCP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_TCP,
		[TCP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_TCP,
		[UDP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_UDP,
		[UDP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_UDP,
		[IPV4_FLOW]	= NFP_NET_CFG_RSS_IPV4,
		[IPV6_FLOW]	= NFP_NET_CFG_RSS_IPV6,
	};

	if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
		return 0;

	return xlate_ethtool_to_nfp[flow_type];
}

static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
				     struct ethtool_rxnfc *cmd)
{
	u32 nfp_rss_flag;

	cmd->data = 0;

905
	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
		return -EOPNOTSUPP;

	nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
	if (!nfp_rss_flag)
		return -EINVAL;

	cmd->data |= RXH_IP_SRC | RXH_IP_DST;
	if (nn->rss_cfg & nfp_rss_flag)
		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;

	return 0;
}

static int nfp_net_get_rxnfc(struct net_device *netdev,
			     struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
	struct nfp_net *nn = netdev_priv(netdev);

	switch (cmd->cmd) {
	case ETHTOOL_GRXRINGS:
926
		cmd->data = nn->dp.num_rx_rings;
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
		return 0;
	case ETHTOOL_GRXFH:
		return nfp_net_get_rss_hash_opts(nn, cmd);
	default:
		return -EOPNOTSUPP;
	}
}

static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
				    struct ethtool_rxnfc *nfc)
{
	u32 new_rss_cfg = nn->rss_cfg;
	u32 nfp_rss_flag;
	int err;

942
	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
		return -EOPNOTSUPP;

	/* RSS only supports IP SA/DA and L4 src/dst ports  */
	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
		return -EINVAL;

	/* We need at least the IP SA/DA fields for hashing */
	if (!(nfc->data & RXH_IP_SRC) ||
	    !(nfc->data & RXH_IP_DST))
		return -EINVAL;

	nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
	if (!nfp_rss_flag)
		return -EINVAL;

	switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
	case 0:
		new_rss_cfg &= ~nfp_rss_flag;
		break;
	case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
		new_rss_cfg |= nfp_rss_flag;
		break;
	default:
		return -EINVAL;
	}

970
	new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
971 972 973 974 975
	new_rss_cfg |= NFP_NET_CFG_RSS_MASK;

	if (new_rss_cfg == nn->rss_cfg)
		return 0;

976
	writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
	if (err)
		return err;

	nn->rss_cfg = new_rss_cfg;

	nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
	return 0;
}

static int nfp_net_set_rxnfc(struct net_device *netdev,
			     struct ethtool_rxnfc *cmd)
{
	struct nfp_net *nn = netdev_priv(netdev);

	switch (cmd->cmd) {
	case ETHTOOL_SRXFH:
		return nfp_net_set_rss_hash_opt(nn, cmd);
	default:
		return -EOPNOTSUPP;
	}
}

static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
{
	struct nfp_net *nn = netdev_priv(netdev);

1004
	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1005 1006 1007 1008 1009 1010 1011
		return 0;

	return ARRAY_SIZE(nn->rss_itbl);
}

static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
{
1012 1013
	struct nfp_net *nn = netdev_priv(netdev);

1014
	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1015 1016 1017
		return -EOPNOTSUPP;

	return nfp_net_rss_key_sz(nn);
1018 1019 1020 1021 1022 1023 1024 1025
}

static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
			    u8 *hfunc)
{
	struct nfp_net *nn = netdev_priv(netdev);
	int i;

1026
	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1027 1028 1029 1030 1031 1032
		return -EOPNOTSUPP;

	if (indir)
		for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
			indir[i] = nn->rss_itbl[i];
	if (key)
1033 1034 1035 1036 1037 1038
		memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
	if (hfunc) {
		*hfunc = nn->rss_hfunc;
		if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
			*hfunc = ETH_RSS_HASH_UNKNOWN;
	}
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049

	return 0;
}

static int nfp_net_set_rxfh(struct net_device *netdev,
			    const u32 *indir, const u8 *key,
			    const u8 hfunc)
{
	struct nfp_net *nn = netdev_priv(netdev);
	int i;

1050
	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
1051
	    !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
1052 1053 1054 1055 1056 1057
		return -EOPNOTSUPP;

	if (!key && !indir)
		return 0;

	if (key) {
1058
		memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
		nfp_net_rss_write_key(nn);
	}
	if (indir) {
		for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
			nn->rss_itbl[i] = indir[i];

		nfp_net_rss_write_itbl(nn);
	}

	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
}

/* Dump BAR registers
 */
static int nfp_net_get_regs_len(struct net_device *netdev)
{
	return NFP_NET_CFG_BAR_SZ;
}

static void nfp_net_get_regs(struct net_device *netdev,
			     struct ethtool_regs *regs, void *p)
{
	struct nfp_net *nn = netdev_priv(netdev);
	u32 *regs_buf = p;
	int i;

	regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);

	for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
1088
		regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
}

static int nfp_net_get_coalesce(struct net_device *netdev,
				struct ethtool_coalesce *ec)
{
	struct nfp_net *nn = netdev_priv(netdev);

	if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
		return -EINVAL;

	ec->rx_coalesce_usecs       = nn->rx_coalesce_usecs;
	ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
	ec->tx_coalesce_usecs       = nn->tx_coalesce_usecs;
	ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;

	return 0;
}

1107 1108 1109
/* Other debug dumps
 */
static int
1110
nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
1111 1112 1113 1114
{
	struct nfp_resource *res;
	int ret;

1115
	if (!app)
1116 1117 1118 1119 1120
		return -EOPNOTSUPP;

	dump->version = 1;
	dump->flag = NFP_DUMP_NSP_DIAG;

1121
	res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG);
1122 1123 1124 1125 1126 1127 1128 1129 1130
	if (IS_ERR(res))
		return PTR_ERR(res);

	if (buffer) {
		if (dump->len != nfp_resource_size(res)) {
			ret = -EINVAL;
			goto exit_release;
		}

1131
		ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res),
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
				   nfp_resource_address(res),
				   buffer, dump->len);
		if (ret != dump->len)
			ret = ret < 0 ? ret : -EIO;
		else
			ret = 0;
	} else {
		dump->len = nfp_resource_size(res);
		ret = 0;
	}
exit_release:
	nfp_resource_release(res);

	return ret;
}

Carl Heymann's avatar
Carl Heymann committed
1148 1149 1150 1151 1152
/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
 * based dumps), since flag 0 (default) calculates the length in
 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
 * without setting the flag first, for backward compatibility.
 */
1153
static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1154
{
1155
	struct nfp_app *app = nfp_app_from_netdev(netdev);
Carl Heymann's avatar
Carl Heymann committed
1156
	s64 len;
1157

1158
	if (!app)
1159 1160
		return -EOPNOTSUPP;

Carl Heymann's avatar
Carl Heymann committed
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	if (val->flag == NFP_DUMP_NSP_DIAG) {
		app->pf->dump_flag = val->flag;
		return 0;
	}

	if (!app->pf->dumpspec)
		return -EOPNOTSUPP;

	len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
					  val->flag);
	if (len < 0)
		return len;

	app->pf->dump_flag = val->flag;
	app->pf->dump_len = len;
1176 1177 1178 1179 1180

	return 0;
}

static int
1181
nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1182
{
Carl Heymann's avatar
Carl Heymann committed
1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
	struct nfp_app *app = nfp_app_from_netdev(netdev);

	if (!app)
		return -EOPNOTSUPP;

	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
		return nfp_dump_nsp_diag(app, dump, NULL);

	dump->flag = app->pf->dump_flag;
	dump->len = app->pf->dump_len;

	return 0;
1195 1196 1197
}

static int
1198
nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1199 1200
		      void *buffer)
{
Carl Heymann's avatar
Carl Heymann committed
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	struct nfp_app *app = nfp_app_from_netdev(netdev);

	if (!app)
		return -EOPNOTSUPP;

	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
		return nfp_dump_nsp_diag(app, dump, buffer);

	dump->flag = app->pf->dump_flag;
	dump->len = app->pf->dump_len;

	return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
					    buffer);
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
static int
nfp_port_get_module_info(struct net_device *netdev,
			 struct ethtool_modinfo *modinfo)
{
	struct nfp_eth_table_port *eth_port;
	struct nfp_port *port;
	unsigned int read_len;
	struct nfp_nsp *nsp;
	int err = 0;
	u8 data;

	port = nfp_port_from_netdev(netdev);
	eth_port = nfp_port_get_eth_port(port);
	if (!eth_port)
		return -EOPNOTSUPP;

	nsp = nfp_nsp_open(port->app->cpp);
	if (IS_ERR(nsp)) {
		err = PTR_ERR(nsp);
		netdev_err(netdev, "Failed to access the NSP: %d\n", err);
		return err;
	}

	if (!nfp_nsp_has_read_module_eeprom(nsp)) {
		netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
		err = -EOPNOTSUPP;
		goto exit_close_nsp;
	}

	switch (eth_port->interface) {
	case NFP_INTERFACE_SFP:
	case NFP_INTERFACE_SFP28:
		err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
						 SFP_SFF8472_COMPLIANCE, &data,
						 1, &read_len);
		if (err < 0)
			goto exit_close_nsp;

		if (!data) {
			modinfo->type = ETH_MODULE_SFF_8079;
			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
		} else {
			modinfo->type = ETH_MODULE_SFF_8472;
			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
		}
		break;
	case NFP_INTERFACE_QSFP:
		err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
						 SFP_SFF_REV_COMPLIANCE, &data,
						 1, &read_len);
		if (err < 0)
			goto exit_close_nsp;

		if (data < 0x3) {
			modinfo->type = ETH_MODULE_SFF_8436;
			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
		} else {
			modinfo->type = ETH_MODULE_SFF_8636;
			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
		}
		break;
	case NFP_INTERFACE_QSFP28:
		modinfo->type = ETH_MODULE_SFF_8636;
		modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
		break;
	default:
		netdev_err(netdev, "Unsupported module 0x%x detected\n",
			   eth_port->interface);
		err = -EINVAL;
	}

exit_close_nsp:
	nfp_nsp_close(nsp);
	return err;
}

static int
nfp_port_get_module_eeprom(struct net_device *netdev,
			   struct ethtool_eeprom *eeprom, u8 *data)
{
	struct nfp_eth_table_port *eth_port;
	struct nfp_port *port;
	struct nfp_nsp *nsp;
	int err;

	port = nfp_port_from_netdev(netdev);
	eth_port = __nfp_port_get_eth_port(port);
	if (!eth_port)
		return -EOPNOTSUPP;

	nsp = nfp_nsp_open(port->app->cpp);
	if (IS_ERR(nsp)) {
		err = PTR_ERR(nsp);
		netdev_err(netdev, "Failed to access the NSP: %d\n", err);
		return err;
	}

	if (!nfp_nsp_has_read_module_eeprom(nsp)) {
		netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
		err = -EOPNOTSUPP;
		goto exit_close_nsp;
	}

	err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
					 eeprom->offset, data, eeprom->len,
					 &eeprom->len);
	if (err < 0) {
		if (eeprom->len) {
			netdev_warn(netdev,
				    "Incomplete read from module EEPROM: %d\n",
				     err);
			err = 0;
		} else {
			netdev_err(netdev,
				   "Reading from module EEPROM failed: %d\n",
				   err);
		}
	}

exit_close_nsp:
	nfp_nsp_close(nsp);
	return err;
}

1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
static int nfp_net_set_coalesce(struct net_device *netdev,
				struct ethtool_coalesce *ec)
{
	struct nfp_net *nn = netdev_priv(netdev);
	unsigned int factor;

	if (ec->rx_coalesce_usecs_irq ||
	    ec->rx_max_coalesced_frames_irq ||
	    ec->tx_coalesce_usecs_irq ||
	    ec->tx_max_coalesced_frames_irq ||
	    ec->stats_block_coalesce_usecs ||
	    ec->use_adaptive_rx_coalesce ||
	    ec->use_adaptive_tx_coalesce ||
	    ec->pkt_rate_low ||
	    ec->rx_coalesce_usecs_low ||
	    ec->rx_max_coalesced_frames_low ||
	    ec->tx_coalesce_usecs_low ||
	    ec->tx_max_coalesced_frames_low ||
	    ec->pkt_rate_high ||
	    ec->rx_coalesce_usecs_high ||
	    ec->rx_max_coalesced_frames_high ||
	    ec->tx_coalesce_usecs_high ||
	    ec->tx_max_coalesced_frames_high ||
	    ec->rate_sample_interval)
1364
		return -EOPNOTSUPP;
1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

	/* Compute factor used to convert coalesce '_usecs' parameters to
	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
	 * count.
	 */
	factor = nn->me_freq_mhz / 16;

	/* Each pair of (usecs, max_frames) fields specifies that interrupts
	 * should be coalesced until
	 *      (usecs > 0 && time_since_first_completion >= usecs) ||
	 *      (max_frames > 0 && completed_frames >= max_frames)
	 *
	 * It is illegal to set both usecs and max_frames to zero as this would
	 * cause interrupts to never be generated.  To disable coalescing, set
	 * usecs = 0 and max_frames = 1.
	 *
	 * Some implementations ignore the value of max_frames and use the
	 * condition time_since_first_completion >= usecs
	 */

	if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
		return -EINVAL;

	/* ensure valid configuration */
	if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
		return -EINVAL;

	if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
		return -EINVAL;

	if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
		return -EINVAL;

	if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
		return -EINVAL;

	if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
		return -EINVAL;

	if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
		return -EINVAL;

	/* configuration is valid */
	nn->rx_coalesce_usecs      = ec->rx_coalesce_usecs;
	nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
	nn->tx_coalesce_usecs      = ec->tx_coalesce_usecs;
	nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;

	/* write configuration to device */
	nfp_net_coalesce_write_cfg(nn);
	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
}

1418 1419 1420 1421
static void nfp_net_get_channels(struct net_device *netdev,
				 struct ethtool_channels *channel)
{
	struct nfp_net *nn = netdev_priv(netdev);
1422 1423
	unsigned int num_tx_rings;

1424 1425 1426
	num_tx_rings = nn->dp.num_tx_rings;
	if (nn->dp.xdp_prog)
		num_tx_rings -= nn->dp.num_rx_rings;
1427 1428 1429 1430 1431

	channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
	channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
	channel->max_combined = min(channel->max_rx, channel->max_tx);
	channel->max_other = NFP_NET_NON_Q_VECTORS;
1432 1433
	channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
	channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
1434
	channel->tx_count = num_tx_rings - channel->combined_count;
1435 1436 1437
	channel->other_count = NFP_NET_NON_Q_VECTORS;
}

1438 1439 1440
static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
				 unsigned int total_tx)
{
1441
	struct nfp_net_dp *dp;
1442

1443 1444 1445 1446
	dp = nfp_net_clone_dp(nn);
	if (!dp)
		return -ENOMEM;

1447 1448 1449 1450 1451 1452
	dp->num_rx_rings = total_rx;
	dp->num_tx_rings = total_tx;
	/* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
	if (dp->xdp_prog)
		dp->num_tx_rings += total_rx;

1453
	return nfp_net_ring_reconfig(nn, dp, NULL);
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
}

static int nfp_net_set_channels(struct net_device *netdev,
				struct ethtool_channels *channel)
{
	struct nfp_net *nn = netdev_priv(netdev);
	unsigned int total_rx, total_tx;

	/* Reject unsupported */
	if (!channel->combined_count ||
	    channel->other_count != NFP_NET_NON_Q_VECTORS ||
	    (channel->rx_count && channel->tx_count))
		return -EINVAL;

	total_rx = channel->combined_count + channel->rx_count;
	total_tx = channel->combined_count + channel->tx_count;

	if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
	    total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
		return -EINVAL;

	return nfp_net_set_num_rings(nn, total_rx, total_tx);
}

1478 1479
static const struct ethtool_ops nfp_net_ethtool_ops = {
	.get_drvinfo		= nfp_net_get_drvinfo,
1480
	.get_link		= ethtool_op_get_link,
1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
	.get_ringparam		= nfp_net_get_ringparam,
	.set_ringparam		= nfp_net_set_ringparam,
	.get_strings		= nfp_net_get_strings,
	.get_ethtool_stats	= nfp_net_get_stats,
	.get_sset_count		= nfp_net_get_sset_count,
	.get_rxnfc		= nfp_net_get_rxnfc,
	.set_rxnfc		= nfp_net_set_rxnfc,
	.get_rxfh_indir_size	= nfp_net_get_rxfh_indir_size,
	.get_rxfh_key_size	= nfp_net_get_rxfh_key_size,
	.get_rxfh		= nfp_net_get_rxfh,
	.set_rxfh		= nfp_net_set_rxfh,
	.get_regs_len		= nfp_net_get_regs_len,
	.get_regs		= nfp_net_get_regs,
1494 1495 1496
	.set_dump		= nfp_app_set_dump,
	.get_dump_flag		= nfp_app_get_dump_flag,
	.get_dump_data		= nfp_app_get_dump_data,
1497 1498
	.get_module_info	= nfp_port_get_module_info,
	.get_module_eeprom	= nfp_port_get_module_eeprom,
1499 1500
	.get_coalesce           = nfp_net_get_coalesce,
	.set_coalesce           = nfp_net_set_coalesce,
1501
	.get_channels		= nfp_net_get_channels,
1502
	.set_channels		= nfp_net_set_channels,
1503
	.get_link_ksettings	= nfp_net_get_link_ksettings,
1504
	.set_link_ksettings	= nfp_net_set_link_ksettings,
1505 1506
	.get_fecparam		= nfp_port_get_fecparam,
	.set_fecparam		= nfp_port_set_fecparam,
1507 1508
};

1509
const struct ethtool_ops nfp_port_ethtool_ops = {
1510
	.get_drvinfo		= nfp_app_get_drvinfo,
1511
	.get_link		= ethtool_op_get_link,
1512 1513 1514
	.get_strings		= nfp_port_get_strings,
	.get_ethtool_stats	= nfp_port_get_stats,
	.get_sset_count		= nfp_port_get_sset_count,
1515 1516 1517
	.set_dump		= nfp_app_set_dump,
	.get_dump_flag		= nfp_app_get_dump_flag,
	.get_dump_data		= nfp_app_get_dump_data,
1518 1519
	.get_module_info	= nfp_port_get_module_info,
	.get_module_eeprom	= nfp_port_get_module_eeprom,
1520 1521
	.get_link_ksettings	= nfp_net_get_link_ksettings,
	.set_link_ksettings	= nfp_net_set_link_ksettings,
1522 1523
	.get_fecparam		= nfp_port_get_fecparam,
	.set_fecparam		= nfp_port_set_fecparam,
1524 1525
};

1526 1527 1528 1529
void nfp_net_set_ethtool_ops(struct net_device *netdev)
{
	netdev->ethtool_ops = &nfp_net_ethtool_ops;
}