Commit 765c9f46 authored by Ben Hutchings's avatar Ben Hutchings Committed by David S. Miller

sfc: Add support for RX flow hash control

Allow ethtool to query the number of RX rings, the fields used in RX
flow hashing and the hash indirection table.

Allow ethtool to update the RX flow hash indirection table.
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a5b6ee29
...@@ -1121,6 +1121,7 @@ static void efx_set_channels(struct efx_nic *efx) ...@@ -1121,6 +1121,7 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx) static int efx_probe_nic(struct efx_nic *efx)
{ {
size_t i;
int rc; int rc;
netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
...@@ -1136,6 +1137,8 @@ static int efx_probe_nic(struct efx_nic *efx) ...@@ -1136,6 +1137,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1) if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
efx->rx_indir_table[i] = i % efx->n_rx_channels;
efx_set_channels(efx); efx_set_channels(efx);
efx->net_dev->real_num_tx_queues = efx->n_tx_channels; efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
......
...@@ -868,6 +868,93 @@ extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) ...@@ -868,6 +868,93 @@ extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, method); return efx_reset(efx, method);
} }
static int
efx_ethtool_get_rxnfc(struct net_device *net_dev,
struct ethtool_rxnfc *info, void *rules __always_unused)
{
struct efx_nic *efx = netdev_priv(net_dev);
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = efx->n_rx_channels;
return 0;
case ETHTOOL_GRXFH: {
unsigned min_revision = 0;
info->data = 0;
switch (info->flow_type) {
case TCP_V4_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
min_revision = EFX_REV_FALCON_B0;
break;
case TCP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fall through */
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
min_revision = EFX_REV_SIENA_A0;
break;
default:
break;
}
if (efx_nic_rev(efx) < min_revision)
info->data = 0;
return 0;
}
default:
return -EOPNOTSUPP;
}
}
static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
struct ethtool_rxfh_indir *indir)
{
struct efx_nic *efx = netdev_priv(net_dev);
size_t copy_size =
min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return -EOPNOTSUPP;
indir->size = ARRAY_SIZE(efx->rx_indir_table);
memcpy(indir->ring_index, efx->rx_indir_table,
copy_size * sizeof(indir->ring_index[0]));
return 0;
}
static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
const struct ethtool_rxfh_indir *indir)
{
struct efx_nic *efx = netdev_priv(net_dev);
size_t i;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return -EOPNOTSUPP;
/* Validate size and indices */
if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
if (indir->ring_index[i] >= efx->n_rx_channels)
return -EINVAL;
memcpy(efx->rx_indir_table, indir->ring_index,
sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx);
return 0;
}
const struct ethtool_ops efx_ethtool_ops = { const struct ethtool_ops efx_ethtool_ops = {
.get_settings = efx_ethtool_get_settings, .get_settings = efx_ethtool_get_settings,
.set_settings = efx_ethtool_set_settings, .set_settings = efx_ethtool_set_settings,
...@@ -905,4 +992,7 @@ const struct ethtool_ops efx_ethtool_ops = { ...@@ -905,4 +992,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_wol = efx_ethtool_get_wol, .get_wol = efx_ethtool_get_wol,
.set_wol = efx_ethtool_set_wol, .set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset, .reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
}; };
...@@ -648,6 +648,7 @@ union efx_multicast_hash { ...@@ -648,6 +648,7 @@ union efx_multicast_hash {
* @n_tx_channels: Number of channels used for TX * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length * @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_indir_table: Indirection table for RSS
* @int_error_count: Number of internal errors seen recently * @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired * @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer * @irq_status: Interrupt status buffer
...@@ -736,6 +737,7 @@ struct efx_nic { ...@@ -736,6 +737,7 @@ struct efx_nic {
unsigned int rx_buffer_len; unsigned int rx_buffer_len;
unsigned int rx_buffer_order; unsigned int rx_buffer_order;
u8 rx_hash_key[40]; u8 rx_hash_key[40];
u32 rx_indir_table[128];
unsigned int_error_count; unsigned int_error_count;
unsigned long int_error_expire; unsigned long int_error_expire;
......
...@@ -1484,22 +1484,21 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) ...@@ -1484,22 +1484,21 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
/* Setup RSS indirection table. /* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ * This maps from the hash value of the packet to RXQ
*/ */
static void efx_setup_rss_indir_table(struct efx_nic *efx) void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{ {
int i = 0; size_t i = 0;
unsigned long offset;
efx_dword_t dword; efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return; return;
for (offset = FR_BZ_RX_INDIRECTION_TBL; BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; FR_BZ_RX_INDIRECTION_TBL_ROWS);
offset += 0x10) {
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
i % efx->n_rx_channels); efx->rx_indir_table[i]);
efx_writed(efx, &dword, offset); efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
i++;
} }
} }
...@@ -1634,7 +1633,7 @@ void efx_nic_init_common(struct efx_nic *efx) ...@@ -1634,7 +1633,7 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp); EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
efx_setup_rss_indir_table(efx); efx_nic_push_rx_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
......
...@@ -207,6 +207,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx); ...@@ -207,6 +207,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx); extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx); extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx); extern void efx_nic_init_common(struct efx_nic *efx);
extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len); unsigned int len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment