Commit 8c8278a5 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-prerequisites-for-EF100-driver-part-3'

Edward Cree says:

====================
sfc: prerequisites for EF100 driver, part 3

Continuing on from [1] and [2], this series assembles the last pieces
 of the common codebase that will be used by the forthcoming EF100
 driver.
Patch #1 also adds a minor feature to EF10 (setting MTU on VFs) since
 EF10 supports the same MCDI extension which that feature will use on
 EF100.
Patches #5 & #7, while they should have no externally-visible effect
 on driver functionality, change how that functionality is implemented
 and how the driver represents TXQ configuration internally, so are
 not mere cleanup/refactoring like most of these prerequisites have
 (from the perspective of the existing sfc driver) been.

Changes in v2:
* Patch #1: use efx_mcdi_set_mtu() directly, instead of as a fallback,
  in the mtu_only case (Jakub)
* Patch #3: fix symbol collision in non-modular builds by renaming
  interrupt_mode to efx_interrupt_mode (kernel test robot)
* Patch #6: check for failure of netif_set_real_num_[tr]x_queues (Jakub)
* Patch #12: cleaner solution for ethtool drvinfo (Jakub, David)

[1]: https://lore.kernel.org/netdev/20200629.173812.1532344417590172093.davem@davemloft.net/T/
[2]: https://lore.kernel.org/netdev/20200630.130923.402514193016248355.davem@davemloft.net/T/
====================
Reviewed-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e44f65fd b6d02dd2
...@@ -552,8 +552,6 @@ static int efx_ef10_probe(struct efx_nic *efx) ...@@ -552,8 +552,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
} }
nic_data->warm_boot_count = rc; nic_data->warm_boot_count = rc;
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* In case we're recovering from a crash (kexec), we want to /* In case we're recovering from a crash (kexec), we want to
* cancel any outstanding request by the previous user of this * cancel any outstanding request by the previous user of this
* function. We send a special message using the least * function. We send a special message using the least
...@@ -600,6 +598,7 @@ static int efx_ef10_probe(struct efx_nic *efx) ...@@ -600,6 +598,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
* However, until we use TX option descriptors we need two TX queues * However, until we use TX option descriptors we need two TX queues
* per channel. * per channel.
*/ */
efx->tx_queues_per_channel = 2;
efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
if (!efx->max_vis) { if (!efx->max_vis) {
netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
...@@ -607,7 +606,7 @@ static int efx_ef10_probe(struct efx_nic *efx) ...@@ -607,7 +606,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
goto fail5; goto fail5;
} }
efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS, efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS,
efx->max_vis / EFX_TXQ_TYPES); efx->max_vis / efx->tx_queues_per_channel);
efx->max_tx_channels = efx->max_channels; efx->max_tx_channels = efx->max_channels;
if (WARN_ON(efx->max_channels == 0)) { if (WARN_ON(efx->max_channels == 0)) {
rc = -EIO; rc = -EIO;
...@@ -1120,17 +1119,17 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx, ...@@ -1120,17 +1119,17 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
*/ */
static int efx_ef10_dimension_resources(struct efx_nic *efx) static int efx_ef10_dimension_resources(struct efx_nic *efx)
{ {
struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel,
unsigned int uc_mem_map_size, wc_mem_map_size;
unsigned int min_vis = max(EFX_TXQ_TYPES,
efx_separate_tx_channels ? 2 : 1); efx_separate_tx_channels ? 2 : 1);
unsigned int channel_vis, pio_write_vi_base, max_vis; unsigned int channel_vis, pio_write_vi_base, max_vis;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
unsigned int uc_mem_map_size, wc_mem_map_size;
void __iomem *membase; void __iomem *membase;
int rc; int rc;
channel_vis = max(efx->n_channels, channel_vis = max(efx->n_channels,
((efx->n_tx_channels + efx->n_extra_tx_channels) * ((efx->n_tx_channels + efx->n_extra_tx_channels) *
EFX_TXQ_TYPES) + efx->tx_queues_per_channel) +
efx->n_xdp_channels * efx->xdp_tx_per_channel); efx->n_xdp_channels * efx->xdp_tx_per_channel);
if (efx->max_vis && efx->max_vis < channel_vis) { if (efx->max_vis && efx->max_vis < channel_vis) {
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
...@@ -1219,7 +1218,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) ...@@ -1219,7 +1218,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
*/ */
efx->max_channels = nic_data->n_allocated_vis; efx->max_channels = nic_data->n_allocated_vis;
efx->max_tx_channels = efx->max_tx_channels =
nic_data->n_allocated_vis / EFX_TXQ_TYPES; nic_data->n_allocated_vis / efx->tx_queues_per_channel;
efx_mcdi_free_vis(efx); efx_mcdi_free_vis(efx);
return -EAGAIN; return -EAGAIN;
...@@ -2243,7 +2242,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx) ...@@ -2243,7 +2242,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{ {
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
struct efx_channel *channel = tx_queue->channel; struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data; struct efx_ef10_nic_data *nic_data;
...@@ -3116,44 +3115,6 @@ static void efx_ef10_ev_test_generate(struct efx_channel *channel) ...@@ -3116,44 +3115,6 @@ static void efx_ef10_ev_test_generate(struct efx_channel *channel)
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
} }
static int efx_ef10_fini_dmaq(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
int pending;
/* If the MC has just rebooted, the TX/RX queues will have already been
* torn down, but efx->active_queues needs to be set to zero.
*/
if (efx->must_realloc_vis) {
atomic_set(&efx->active_queues, 0);
return 0;
}
/* Do not attempt to write to the NIC during EEH recovery */
if (efx->state != STATE_RECOVERY) {
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_mcdi_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_mcdi_tx_fini(tx_queue);
}
wait_event_timeout(efx->flush_wq,
atomic_read(&efx->active_queues) == 0,
msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
pending = atomic_read(&efx->active_queues);
if (pending) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
pending);
return -ETIMEDOUT;
}
}
return 0;
}
static void efx_ef10_prepare_flr(struct efx_nic *efx) static void efx_ef10_prepare_flr(struct efx_nic *efx)
{ {
atomic_set(&efx->active_queues, 0); atomic_set(&efx->active_queues, 0);
...@@ -3306,18 +3267,15 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) ...@@ -3306,18 +3267,15 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
return rc; return rc;
} }
static int efx_ef10_mac_reconfigure(struct efx_nic *efx) static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
{ {
efx_mcdi_filter_sync_rx_mode(efx); WARN_ON(!mutex_is_locked(&efx->mac_lock));
return efx_mcdi_set_mac(efx);
}
static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
{
efx_mcdi_filter_sync_rx_mode(efx); efx_mcdi_filter_sync_rx_mode(efx);
return 0; if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
return efx_mcdi_set_mtu(efx);
return efx_mcdi_set_mac(efx);
} }
static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
...@@ -4028,7 +3986,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { ...@@ -4028,7 +3986,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.reset = efx_ef10_reset, .reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe, .probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove, .remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq, .fini_dmaq = efx_fini_dmaq,
.prepare_flr = efx_ef10_prepare_flr, .prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void, .finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats, .describe_stats = efx_ef10_describe_stats,
...@@ -4038,7 +3996,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { ...@@ -4038,7 +3996,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.stop_stats = efx_port_dummy_op_void, .stop_stats = efx_port_dummy_op_void,
.set_id_led = efx_mcdi_set_id_led, .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation, .push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure_vf, .reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault, .check_mac_fault = efx_mcdi_mac_check_fault,
.reconfigure_port = efx_mcdi_port_reconfigure, .reconfigure_port = efx_mcdi_port_reconfigure,
.get_wol = efx_ef10_get_wol_vf, .get_wol = efx_ef10_get_wol_vf,
...@@ -4111,7 +4069,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { ...@@ -4111,7 +4069,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.can_rx_scatter = true, .can_rx_scatter = true,
.always_rx_scatter = true, .always_rx_scatter = true,
.min_interrupt_mode = EFX_INT_MODE_MSIX, .min_interrupt_mode = EFX_INT_MODE_MSIX,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES, .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2, .mcdi_max_ver = 2,
...@@ -4137,7 +4094,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -4137,7 +4094,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.reset = efx_ef10_reset, .reset = efx_ef10_reset,
.probe_port = efx_mcdi_port_probe, .probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove, .remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq, .fini_dmaq = efx_fini_dmaq,
.prepare_flr = efx_ef10_prepare_flr, .prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void, .finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats, .describe_stats = efx_ef10_describe_stats,
...@@ -4248,7 +4205,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { ...@@ -4248,7 +4205,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.always_rx_scatter = true, .always_rx_scatter = true,
.option_descriptors = true, .option_descriptors = true,
.min_interrupt_mode = EFX_INT_MODE_LEGACY, .min_interrupt_mode = EFX_INT_MODE_LEGACY,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES, .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2, .mcdi_max_ver = 2,
......
...@@ -64,6 +64,13 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) ...@@ -64,6 +64,13 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
* *
*************************************************************************/ *************************************************************************/
module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
/* /*
* Use separate channels for TX and RX events * Use separate channels for TX and RX events
* *
...@@ -169,10 +176,6 @@ static int efx_init_port(struct efx_nic *efx) ...@@ -169,10 +176,6 @@ static int efx_init_port(struct efx_nic *efx)
efx->port_initialized = true; efx->port_initialized = true;
/* Reconfigure the MAC before creating dma queues (required for
* Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
efx_mac_reconfigure(efx);
/* Ensure the PHY advertises the correct flow control settings */ /* Ensure the PHY advertises the correct flow control settings */
rc = efx->phy_op->reconfigure(efx); rc = efx->phy_op->reconfigure(efx);
if (rc && rc != -EPERM) if (rc && rc != -EPERM)
...@@ -333,9 +336,6 @@ static int efx_probe_nic(struct efx_nic *efx) ...@@ -333,9 +336,6 @@ static int efx_probe_nic(struct efx_nic *efx)
sizeof(efx->rss_context.rx_hash_key)); sizeof(efx->rss_context.rx_hash_key));
efx_set_default_rx_indir_table(efx, &efx->rss_context); efx_set_default_rx_indir_table(efx, &efx->rss_context);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
/* Initialise the interrupt moderation settings */ /* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
......
...@@ -23,10 +23,7 @@ ...@@ -23,10 +23,7 @@
* 1 => MSI * 1 => MSI
* 2 => legacy * 2 => legacy
*/ */
static unsigned int interrupt_mode; unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
module_param(interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
* i.e. the number of CPUs among which we may distribute simultaneous * i.e. the number of CPUs among which we may distribute simultaneous
...@@ -35,9 +32,7 @@ MODULE_PARM_DESC(interrupt_mode, ...@@ -35,9 +32,7 @@ MODULE_PARM_DESC(interrupt_mode,
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
* The default (0) means to assign an interrupt to each core. * The default (0) means to assign an interrupt to each core.
*/ */
static unsigned int rss_cpus; unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
static unsigned int irq_adapt_low_thresh = 8000; static unsigned int irq_adapt_low_thresh = 8000;
module_param(irq_adapt_low_thresh, uint, 0644); module_param(irq_adapt_low_thresh, uint, 0644);
...@@ -529,7 +524,8 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) ...@@ -529,7 +524,8 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
for (j = 0; j < EFX_TXQ_TYPES; j++) { for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j]; tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx; tx_queue->efx = efx;
tx_queue->queue = i * EFX_TXQ_TYPES + j; tx_queue->queue = -1;
tx_queue->label = j;
tx_queue->channel = channel; tx_queue->channel = channel;
} }
...@@ -557,14 +553,8 @@ int efx_init_channels(struct efx_nic *efx) ...@@ -557,14 +553,8 @@ int efx_init_channels(struct efx_nic *efx)
} }
/* Higher numbered interrupt modes are less capable! */ /* Higher numbered interrupt modes are less capable! */
if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
efx->type->min_interrupt_mode)) {
return -EIO;
}
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
efx->interrupt_mode = min(efx->type->min_interrupt_mode, efx->interrupt_mode = min(efx->type->min_interrupt_mode,
interrupt_mode); efx_interrupt_mode);
efx->max_channels = EFX_MAX_CHANNELS; efx->max_channels = EFX_MAX_CHANNELS;
efx->max_tx_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS;
...@@ -737,7 +727,7 @@ void efx_remove_channel(struct efx_channel *channel) ...@@ -737,7 +727,7 @@ void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue); efx_remove_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue); efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel); efx_remove_eventq(channel);
channel->type->post_remove(channel); channel->type->post_remove(channel);
...@@ -864,9 +854,11 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -864,9 +854,11 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
int efx_set_channels(struct efx_nic *efx) int efx_set_channels(struct efx_nic *efx)
{ {
struct efx_channel *channel;
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
unsigned int next_queue = 0;
int xdp_queue_number; int xdp_queue_number;
int rc;
efx->tx_channel_offset = efx->tx_channel_offset =
efx_separate_tx_channels ? efx_separate_tx_channels ?
...@@ -894,18 +886,38 @@ int efx_set_channels(struct efx_nic *efx) ...@@ -894,18 +886,38 @@ int efx_set_channels(struct efx_nic *efx)
else else
channel->rx_queue.core_index = -1; channel->rx_queue.core_index = -1;
if (channel->channel >= efx->tx_channel_offset) {
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue -= (efx->tx_channel_offset * tx_queue->queue = next_queue++;
EFX_TXQ_TYPES); netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
channel->channel, tx_queue->label,
if (efx_channel_is_xdp_tx(channel) && xdp_queue_number, tx_queue->queue);
xdp_queue_number < efx->xdp_tx_queue_count) { /* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_TXQ_TYPES.
* We still allocate and probe those
* TXQs, but never use them.
*/
if (xdp_queue_number < efx->xdp_tx_queue_count)
efx->xdp_tx_queues[xdp_queue_number] = tx_queue; efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
xdp_queue_number++; xdp_queue_number++;
} }
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
channel->channel, tx_queue->label,
tx_queue->queue);
}
} }
} }
return 0; }
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
return rc;
return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
} }
bool efx_default_channel_want_txqs(struct efx_channel *channel) bool efx_default_channel_want_txqs(struct efx_channel *channel)
...@@ -1101,7 +1113,7 @@ void efx_stop_channels(struct efx_nic *efx) ...@@ -1101,7 +1113,7 @@ void efx_stop_channels(struct efx_nic *efx)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue); efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue); efx_fini_tx_queue(tx_queue);
} }
} }
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#ifndef EFX_CHANNELS_H #ifndef EFX_CHANNELS_H
#define EFX_CHANNELS_H #define EFX_CHANNELS_H
extern unsigned int efx_interrupt_mode;
extern unsigned int rss_cpus;
int efx_probe_interrupts(struct efx_nic *efx); int efx_probe_interrupts(struct efx_nic *efx);
void efx_remove_interrupts(struct efx_nic *efx); void efx_remove_interrupts(struct efx_nic *efx);
int efx_soft_enable_interrupts(struct efx_nic *efx); int efx_soft_enable_interrupts(struct efx_nic *efx);
......
...@@ -139,11 +139,11 @@ void efx_destroy_reset_workqueue(void) ...@@ -139,11 +139,11 @@ void efx_destroy_reset_workqueue(void)
/* We assume that efx->type->reconfigure_mac will always try to sync RX /* We assume that efx->type->reconfigure_mac will always try to sync RX
* filters and therefore needs to read-lock the filter table against freeing * filters and therefore needs to read-lock the filter table against freeing
*/ */
void efx_mac_reconfigure(struct efx_nic *efx) void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
{ {
if (efx->type->reconfigure_mac) { if (efx->type->reconfigure_mac) {
down_read(&efx->filter_sem); down_read(&efx->filter_sem);
efx->type->reconfigure_mac(efx); efx->type->reconfigure_mac(efx, mtu_only);
up_read(&efx->filter_sem); up_read(&efx->filter_sem);
} }
} }
...@@ -158,7 +158,7 @@ static void efx_mac_work(struct work_struct *data) ...@@ -158,7 +158,7 @@ static void efx_mac_work(struct work_struct *data)
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
if (efx->port_enabled) if (efx->port_enabled)
efx_mac_reconfigure(efx); efx_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} }
...@@ -190,7 +190,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data) ...@@ -190,7 +190,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* Reconfigure the MAC */ /* Reconfigure the MAC */
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx_mac_reconfigure(efx); efx_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
return 0; return 0;
...@@ -304,7 +304,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu) ...@@ -304,7 +304,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu)
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu; net_dev->mtu = new_mtu;
efx_mac_reconfigure(efx); efx_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_start_all(efx); efx_start_all(efx);
...@@ -486,7 +486,7 @@ static void efx_start_port(struct efx_nic *efx) ...@@ -486,7 +486,7 @@ static void efx_start_port(struct efx_nic *efx)
efx->port_enabled = true; efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */ /* Ensure MAC ingress/egress is enabled */
efx_mac_reconfigure(efx); efx_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} }
...@@ -1017,6 +1017,7 @@ int efx_init_struct(struct efx_nic *efx, ...@@ -1017,6 +1017,7 @@ int efx_init_struct(struct efx_nic *efx,
efx->rx_packet_ts_offset = efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size; efx->type->rx_ts_offset - efx->type->rx_prefix_size;
INIT_LIST_HEAD(&efx->rss_context.list); INIT_LIST_HEAD(&efx->rss_context.list);
efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
mutex_init(&efx->rss_lock); mutex_init(&efx->rss_lock);
efx->vport_id = EVB_PORT_ID_ASSIGNED; efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock); spin_lock_init(&efx->stats_lock);
...@@ -1036,6 +1037,7 @@ int efx_init_struct(struct efx_nic *efx, ...@@ -1036,6 +1037,7 @@ int efx_init_struct(struct efx_nic *efx,
INIT_WORK(&efx->mac_work, efx_mac_work); INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq); init_waitqueue_head(&efx->flush_wq);
efx->tx_queues_per_channel = 1;
efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
......
...@@ -95,7 +95,7 @@ static inline void efx_init_mcdi_logging(struct efx_nic *efx) {} ...@@ -95,7 +95,7 @@ static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
#endif #endif
void efx_mac_reconfigure(struct efx_nic *efx); void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
int efx_set_mac_address(struct net_device *net_dev, void *data); int efx_set_mac_address(struct net_device *net_dev, void *data);
void efx_set_rx_mode(struct net_device *net_dev); void efx_set_rx_mode(struct net_device *net_dev);
int efx_set_features(struct net_device *net_dev, netdev_features_t data); int efx_set_features(struct net_device *net_dev, netdev_features_t data);
......
...@@ -221,6 +221,8 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, ...@@ -221,6 +221,8 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
return 0; return 0;
} }
const char *efx_driver_name = KBUILD_MODNAME;
const struct ethtool_ops efx_ethtool_ops = { const struct ethtool_ops efx_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS | .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_USECS_IRQ |
......
...@@ -104,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev, ...@@ -104,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->driver, efx_driver_name, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
efx_mcdi_print_fwver(efx, info->fw_version, efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version)); sizeof(info->fw_version));
...@@ -241,7 +241,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev, ...@@ -241,7 +241,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event /* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no * if the user just changed the advertised capabilities, but there's no
* harm doing this twice */ * harm doing this twice */
efx_mac_reconfigure(efx); efx_mac_reconfigure(efx, false);
out: out:
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
...@@ -287,8 +287,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, ...@@ -287,8 +287,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
} }
#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label
#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
#define EFX_LOOPBACK_NAME(_mode, _counter) \ #define EFX_LOOPBACK_NAME(_mode, _counter) \
"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
...@@ -316,11 +315,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx, ...@@ -316,11 +315,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_fill_test(test_index++, strings, data, efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue], &lb_tests->tx_sent[tx_queue->label],
EFX_TX_QUEUE_NAME(tx_queue), EFX_TX_QUEUE_NAME(tx_queue),
EFX_LOOPBACK_NAME(mode, "tx_sent")); EFX_LOOPBACK_NAME(mode, "tx_sent"));
efx_fill_test(test_index++, strings, data, efx_fill_test(test_index++, strings, data,
&lb_tests->tx_done[tx_queue->queue], &lb_tests->tx_done[tx_queue->label],
EFX_TX_QUEUE_NAME(tx_queue), EFX_TX_QUEUE_NAME(tx_queue),
EFX_LOOPBACK_NAME(mode, "tx_done")); EFX_LOOPBACK_NAME(mode, "tx_done"));
} }
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#ifndef EFX_ETHTOOL_COMMON_H #ifndef EFX_ETHTOOL_COMMON_H
#define EFX_ETHTOOL_COMMON_H #define EFX_ETHTOOL_COMMON_H
extern const char *efx_driver_name;
void efx_ethtool_get_drvinfo(struct net_device *net_dev, void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info); struct ethtool_drvinfo *info);
u32 efx_ethtool_get_msglevel(struct net_device *net_dev); u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
......
...@@ -379,7 +379,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) ...@@ -379,7 +379,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
void efx_farch_tx_init(struct efx_tx_queue *tx_queue) void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{ {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg; efx_oword_t reg;
...@@ -395,7 +395,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue) ...@@ -395,7 +395,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
FRF_AZ_TX_DESCQ_EVQ_ID, FRF_AZ_TX_DESCQ_EVQ_ID,
tx_queue->channel->channel, tx_queue->channel->channel,
FRF_AZ_TX_DESCQ_OWNER_ID, 0, FRF_AZ_TX_DESCQ_OWNER_ID, 0,
FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
FRF_AZ_TX_DESCQ_SIZE, FRF_AZ_TX_DESCQ_SIZE,
__ffs(tx_queue->txd.entries), __ffs(tx_queue->txd.entries),
FRF_AZ_TX_DESCQ_TYPE, 0, FRF_AZ_TX_DESCQ_TYPE, 0,
...@@ -409,7 +409,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue) ...@@ -409,7 +409,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
EFX_POPULATE_OWORD_1(reg, EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE, FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF : FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED); FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue); efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
......
...@@ -1621,6 +1621,35 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) ...@@ -1621,6 +1621,35 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
return rc; return rc;
} }
/* This function finds types using the new NVRAM_PARTITIONS mcdi. */
static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
u32 *nvram_types)
{
efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
GFP_KERNEL);
size_t outlen;
int rc;
if (!outbuf)
return -ENOMEM;
BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
if (rc)
goto fail;
*number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
*number * sizeof(u32));
fail:
kfree(outbuf);
return rc;
}
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out, size_t *size_out, size_t *erase_size_out,
bool *protected_out) bool *protected_out)
...@@ -1674,6 +1703,39 @@ static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) ...@@ -1674,6 +1703,39 @@ static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
} }
} }
/* This function tests nvram partitions using the new mcdi partition lookup scheme */
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
{
u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
GFP_KERNEL);
unsigned int number;
int rc, i;
if (!nvram_types)
return -ENOMEM;
rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
if (rc)
goto fail;
/* Require at least one check */
rc = -EAGAIN;
for (i = 0; i < number; i++) {
if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
continue;
rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
if (rc)
goto fail;
}
fail:
kfree(nvram_types);
return rc;
}
int efx_mcdi_nvram_test_all(struct efx_nic *efx) int efx_mcdi_nvram_test_all(struct efx_nic *efx)
{ {
u32 nvram_types; u32 nvram_types;
......
...@@ -345,6 +345,7 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); ...@@ -345,6 +345,7 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out, size_t *size_out, size_t *erase_size_out,
bool *protected_out); bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_test_all(struct efx_nic *efx); int efx_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_handle_assertion(struct efx_nic *efx); int efx_mcdi_handle_assertion(struct efx_nic *efx);
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
......
...@@ -1459,7 +1459,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) ...@@ -1459,7 +1459,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
table->must_restore_filters = false; table->must_restore_filters = false;
} }
void efx_mcdi_filter_table_remove(struct efx_nic *efx) void efx_mcdi_filter_table_down(struct efx_nic *efx)
{ {
struct efx_mcdi_filter_table *table = efx->filter_state; struct efx_mcdi_filter_table *table = efx->filter_state;
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
...@@ -1467,21 +1467,11 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx) ...@@ -1467,21 +1467,11 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx; unsigned int filter_idx;
int rc; int rc;
efx_mcdi_filter_cleanup_vlans(efx);
efx->filter_state = NULL;
/*
* If we were called without locking, then it's not safe to free
* the table as others might be using it. So we just WARN, leak
* the memory, and potentially get an inconsistent filter table
* state.
* This should never actually happen.
*/
if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
return;
if (!table) if (!table)
return; return;
efx_mcdi_filter_cleanup_vlans(efx);
for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_mcdi_filter_entry_spec(table, filter_idx); spec = efx_mcdi_filter_entry_spec(table, filter_idx);
if (!spec) if (!spec)
...@@ -1501,6 +1491,27 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx) ...@@ -1501,6 +1491,27 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx)
__func__, filter_idx); __func__, filter_idx);
kfree(spec); kfree(spec);
} }
}
void efx_mcdi_filter_table_remove(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
efx_mcdi_filter_table_down(efx);
efx->filter_state = NULL;
/*
* If we were called without locking, then it's not safe to free
* the table as others might be using it. So we just WARN, leak
* the memory, and potentially get an inconsistent filter table
* state.
* This should never actually happen.
*/
if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
return;
if (!table)
return;
vfree(table->entry); vfree(table->entry);
kfree(table); kfree(table);
...@@ -2265,3 +2276,24 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, ...@@ -2265,3 +2276,24 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
return 0; return 0;
return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL); return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
} }
int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
unsigned int rss_spread)
{
int rc = 0;
if (efx->rss_spread == rss_spread)
return 0;
efx->rss_spread = rss_spread;
if (!efx->filter_state)
return 0;
efx_mcdi_rx_free_indir_table(efx);
if (rss_spread > 1) {
efx_set_default_rx_indir_table(efx, &efx->rss_context);
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
}
return rc;
}
...@@ -93,6 +93,7 @@ struct efx_mcdi_filter_table { ...@@ -93,6 +93,7 @@ struct efx_mcdi_filter_table {
}; };
int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining); int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining);
void efx_mcdi_filter_table_down(struct efx_nic *efx);
void efx_mcdi_filter_table_remove(struct efx_nic *efx); void efx_mcdi_filter_table_remove(struct efx_nic *efx);
void efx_mcdi_filter_table_restore(struct efx_nic *efx); void efx_mcdi_filter_table_restore(struct efx_nic *efx);
...@@ -154,6 +155,8 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, ...@@ -154,6 +155,8 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
__attribute__ ((unused)), __attribute__ ((unused)),
const u8 *key const u8 *key
__attribute__ ((unused))); __attribute__ ((unused)));
int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
unsigned int rss_spread);
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx); int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
struct efx_rss_context *ctx); struct efx_rss_context *ctx);
......
...@@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) ...@@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE)); EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel; struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
...@@ -176,7 +176,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) ...@@ -176,7 +176,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
...@@ -267,20 +267,22 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) ...@@ -267,20 +267,22 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
{ {
MCDI_DECLARE_BUF(inbuf,
MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
struct efx_channel *channel = efx_rx_queue_channel(rx_queue); struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
unsigned int buffer_size;
dma_addr_t dma_addr; dma_addr_t dma_addr;
size_t inlen;
int rc; int rc;
int i; int i;
BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
rx_queue->scatter_n = 0; rx_queue->scatter_n = 0;
rx_queue->scatter_len = 0; rx_queue->scatter_len = 0;
if (efx->type->revision == EFX_REV_EF100)
buffer_size = efx->rx_page_buf_step;
else
buffer_size = 0;
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
...@@ -292,6 +294,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) ...@@ -292,6 +294,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
INIT_RXQ_IN_FLAG_TIMESTAMP, 1); INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
dma_addr = rx_queue->rxd.buf.dma_addr; dma_addr = rx_queue->rxd.buf.dma_addr;
...@@ -303,9 +306,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) ...@@ -303,9 +306,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
dma_addr += EFX_BUF_SIZE; dma_addr += EFX_BUF_SIZE;
} }
inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf),
rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
NULL, 0, NULL); NULL, 0, NULL);
if (rc) if (rc)
netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
...@@ -341,6 +342,44 @@ void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue) ...@@ -341,6 +342,44 @@ void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
outbuf, outlen, rc); outbuf, outlen, rc);
} }
int efx_fini_dmaq(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
int pending;
/* If the MC has just rebooted, the TX/RX queues will have already been
* torn down, but efx->active_queues needs to be set to zero.
*/
if (efx->must_realloc_vis) {
atomic_set(&efx->active_queues, 0);
return 0;
}
/* Do not attempt to write to the NIC during EEH recovery */
if (efx->state != STATE_RECOVERY) {
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_mcdi_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_mcdi_tx_fini(tx_queue);
}
wait_event_timeout(efx->flush_wq,
atomic_read(&efx->active_queues) == 0,
msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
pending = atomic_read(&efx->active_queues);
if (pending) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
pending);
return -ETIMEDOUT;
}
}
return 0;
}
int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode) int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
{ {
switch (vi_window_mode) { switch (vi_window_mode) {
......
...@@ -26,6 +26,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue); ...@@ -26,6 +26,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
int efx_fini_dmaq(struct efx_nic *efx);
int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode); int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index); int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
......
...@@ -189,6 +189,8 @@ struct efx_tx_buffer { ...@@ -189,6 +189,8 @@ struct efx_tx_buffer {
* *
* @efx: The associated Efx NIC * @efx: The associated Efx NIC
* @queue: DMA queue number * @queue: DMA queue number
* @label: Label for TX completion events.
* Is our index within @channel->tx_queue array.
* @tso_version: Version of TSO in use for this queue. * @tso_version: Version of TSO in use for this queue.
* @channel: The associated channel * @channel: The associated channel
* @core_txq: The networking core TX queue structure * @core_txq: The networking core TX queue structure
...@@ -250,7 +252,8 @@ struct efx_tx_buffer { ...@@ -250,7 +252,8 @@ struct efx_tx_buffer {
struct efx_tx_queue { struct efx_tx_queue {
/* Members which don't change on the fast path */ /* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp; struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned queue; unsigned int queue;
unsigned int label;
unsigned int tso_version; unsigned int tso_version;
struct efx_channel *channel; struct efx_channel *channel;
struct netdev_queue *core_txq; struct netdev_queue *core_txq;
...@@ -867,6 +870,7 @@ struct efx_async_filter_insertion { ...@@ -867,6 +870,7 @@ struct efx_async_filter_insertion {
* @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX * @n_tx_channels: Number of channels used for TX
* @n_extra_tx_channels: Number of extra channels with TX queues * @n_extra_tx_channels: Number of extra channels with TX queues
* @tx_queues_per_channel: number of TX queues probed on each channel
* @n_xdp_channels: Number of channels used for XDP TX * @n_xdp_channels: Number of channels used for XDP TX
* @xdp_channel_offset: Offset of zeroth channel used for XPD TX. * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
* @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel. * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
...@@ -1031,6 +1035,7 @@ struct efx_nic { ...@@ -1031,6 +1035,7 @@ struct efx_nic {
unsigned tx_channel_offset; unsigned tx_channel_offset;
unsigned n_tx_channels; unsigned n_tx_channels;
unsigned n_extra_tx_channels; unsigned n_extra_tx_channels;
unsigned int tx_queues_per_channel;
unsigned int n_xdp_channels; unsigned int n_xdp_channels;
unsigned int xdp_channel_offset; unsigned int xdp_channel_offset;
unsigned int xdp_tx_per_channel; unsigned int xdp_tx_per_channel;
...@@ -1317,8 +1322,6 @@ struct efx_udp_tunnel { ...@@ -1317,8 +1322,6 @@ struct efx_udp_tunnel {
* @option_descriptors: NIC supports TX option descriptors * @option_descriptors: NIC supports TX option descriptors
* @min_interrupt_mode: Lowest capability interrupt mode supported * @min_interrupt_mode: Lowest capability interrupt mode supported
* from &enum efx_int_mode. * from &enum efx_int_mode.
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_int_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks) * @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload * @offload_features: net_device feature flags for protocol offload
* features implemented in hardware * features implemented in hardware
...@@ -1356,7 +1359,7 @@ struct efx_nic_type { ...@@ -1356,7 +1359,7 @@ struct efx_nic_type {
void (*push_irq_moderation)(struct efx_channel *channel); void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx); int (*reconfigure_port)(struct efx_nic *efx);
void (*prepare_enable_fc_tx)(struct efx_nic *efx); void (*prepare_enable_fc_tx)(struct efx_nic *efx);
int (*reconfigure_mac)(struct efx_nic *efx); int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only);
bool (*check_mac_fault)(struct efx_nic *efx); bool (*check_mac_fault)(struct efx_nic *efx);
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type); int (*set_wol)(struct efx_nic *efx, u32 type);
...@@ -1492,7 +1495,6 @@ struct efx_nic_type { ...@@ -1492,7 +1495,6 @@ struct efx_nic_type {
bool always_rx_scatter; bool always_rx_scatter;
bool option_descriptors; bool option_descriptors;
unsigned int min_interrupt_mode; unsigned int min_interrupt_mode;
unsigned int max_interrupt_mode;
unsigned int timer_period_max; unsigned int timer_period_max;
netdev_features_t offload_features; netdev_features_t offload_features;
int mcdi_max_ver; int mcdi_max_ver;
...@@ -1532,7 +1534,7 @@ static inline struct efx_tx_queue * ...@@ -1532,7 +1534,7 @@ static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
{ {
EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels || EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels ||
type >= EFX_TXQ_TYPES); type >= efx->tx_queues_per_channel);
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
} }
...@@ -1554,18 +1556,18 @@ static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) ...@@ -1554,18 +1556,18 @@ static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
return true; return true;
} }
static inline struct efx_tx_queue * static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{ {
EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_tx_queues(channel) || if (efx_channel_is_xdp_tx(channel))
type >= EFX_TXQ_TYPES); return channel->efx->xdp_tx_per_channel;
return &channel->tx_queue[type]; return channel->efx->tx_queues_per_channel;
} }
static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{ {
return !(tx_queue->efx->net_dev->num_tc < 2 && EFX_WARN_ON_ONCE_PARANOID(type >= efx_channel_num_tx_queues(channel));
tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); return &channel->tx_queue[type];
} }
/* Iterate over all TX queues belonging to a channel */ /* Iterate over all TX queues belonging to a channel */
...@@ -1574,18 +1576,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) ...@@ -1574,18 +1576,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
; \ ; \
else \ else \
for (_tx_queue = (_channel)->tx_queue; \ for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ _tx_queue < (_channel)->tx_queue + \
(efx_tx_queue_used(_tx_queue) || \ efx_channel_num_tx_queues(_channel); \
efx_channel_is_xdp_tx(_channel)); \
_tx_queue++)
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
if (!efx_channel_has_tx_queues(_channel)) \
; \
else \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++) _tx_queue++)
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
......
...@@ -21,6 +21,7 @@ enum { ...@@ -21,6 +21,7 @@ enum {
*/ */
EFX_REV_SIENA_A0 = 3, EFX_REV_SIENA_A0 = 3,
EFX_REV_HUNT_A0 = 4, EFX_REV_HUNT_A0 = 4,
EFX_REV_EF100 = 5,
}; };
static inline int efx_nic_rev(struct efx_nic *efx) static inline int efx_nic_rev(struct efx_nic *efx)
...@@ -90,7 +91,7 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) ...@@ -90,7 +91,7 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
/* XXX is this a thing on EF100? */ /* XXX is this a thing on EF100? */
static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
{ {
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD)
return tx_queue - EFX_TXQ_TYPE_OFFLOAD; return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
else else
return tx_queue + EFX_TXQ_TYPE_OFFLOAD; return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
......
...@@ -445,7 +445,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) ...@@ -445,7 +445,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
if (rc != NETDEV_TX_OK) { if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of " "TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->queue, "%d in %s loopback test\n", tx_queue->label,
i + 1, state->packet_count, i + 1, state->packet_count,
LOOPBACK_MODE(efx)); LOOPBACK_MODE(efx));
...@@ -497,7 +497,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, ...@@ -497,7 +497,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d " "TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n", "TX completion events in %s loopback test\n",
tx_queue->queue, tx_done, state->packet_count, tx_queue->label, tx_done, state->packet_count,
LOOPBACK_MODE(efx)); LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */ /* Allow to fall through so we see the RX errors as well */
...@@ -508,15 +508,15 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, ...@@ -508,15 +508,15 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d " "TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n", "received packets in %s loopback test\n",
tx_queue->queue, rx_good, state->packet_count, tx_queue->label, rx_good, state->packet_count,
LOOPBACK_MODE(efx)); LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
/* Fall through */ /* Fall through */
} }
/* Update loopback test structure */ /* Update loopback test structure */
lb_tests->tx_sent[tx_queue->queue] += state->packet_count; lb_tests->tx_sent[tx_queue->label] += state->packet_count;
lb_tests->tx_done[tx_queue->queue] += tx_done; lb_tests->tx_done[tx_queue->label] += tx_done;
lb_tests->rx_good += rx_good; lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad; lb_tests->rx_bad += rx_bad;
...@@ -542,8 +542,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, ...@@ -542,8 +542,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
state->flush = false; state->flush = false;
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"TX queue %d testing %s loopback with %d packets\n", "TX queue %d (hw %d) testing %s loopback with %d packets\n",
tx_queue->queue, LOOPBACK_MODE(efx), tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count); state->packet_count);
efx_iterate_state(efx); efx_iterate_state(efx);
...@@ -570,7 +570,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, ...@@ -570,7 +570,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length " "TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
state->packet_count); state->packet_count);
return 0; return 0;
...@@ -660,7 +660,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -660,7 +660,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
/* Test all enabled types of TX queue */ /* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue & state->offload_csum = (tx_queue->label &
EFX_TXQ_TYPE_OFFLOAD); EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue, rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]); &tests->loopback[mode]);
......
...@@ -279,6 +279,7 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -279,6 +279,7 @@ static int siena_probe_nic(struct efx_nic *efx)
efx->max_channels = EFX_MAX_CHANNELS; efx->max_channels = EFX_MAX_CHANNELS;
efx->max_vis = EFX_MAX_CHANNELS; efx->max_vis = EFX_MAX_CHANNELS;
efx->max_tx_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS;
efx->tx_queues_per_channel = 4;
efx_reado(efx, &reg, FR_AZ_CS_DEBUG); efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
...@@ -633,7 +634,7 @@ static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, ...@@ -633,7 +634,7 @@ static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
return SIENA_STAT_COUNT; return SIENA_STAT_COUNT;
} }
static int siena_mac_reconfigure(struct efx_nic *efx) static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unused)
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
int rc; int rc;
...@@ -1085,7 +1086,6 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -1085,7 +1086,6 @@ const struct efx_nic_type siena_a0_nic_type = {
.can_rx_scatter = true, .can_rx_scatter = true,
.option_descriptors = false, .option_descriptors = false,
.min_interrupt_mode = EFX_INT_MODE_LEGACY, .min_interrupt_mode = EFX_INT_MODE_LEGACY,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE), NETIF_F_RXHASH | NETIF_F_NTUPLE),
......
...@@ -551,8 +551,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) ...@@ -551,8 +551,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
/* Must be inverse of queue lookup in efx_hard_start_xmit() */ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
tx_queue->core_txq = tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev, netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EFX_TXQ_TYPES + tx_queue->channel->channel +
((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0)); efx->n_tx_channels : 0));
} }
...@@ -561,14 +561,15 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, ...@@ -561,14 +561,15 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data; struct tc_mqprio_qopt *mqprio = type_data;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned tc, num_tc; unsigned tc, num_tc;
int rc;
if (type != TC_SETUP_QDISC_MQPRIO) if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Only Siena supported highpri queues */
if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
return -EOPNOTSUPP;
num_tc = mqprio->num_tc; num_tc = mqprio->num_tc;
if (num_tc > EFX_MAX_TX_TC) if (num_tc > EFX_MAX_TX_TC)
...@@ -584,40 +585,9 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, ...@@ -584,40 +585,9 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
net_dev->tc_to_txq[tc].count = efx->n_tx_channels; net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
} }
if (num_tc > net_dev->num_tc) {
/* Initialise high-priority queues as necessary */
efx_for_each_channel(channel, efx) {
efx_for_each_possible_channel_tx_queue(tx_queue,
channel) {
if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
continue;
if (!tx_queue->buffer) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
return rc;
}
if (!tx_queue->initialised)
efx_init_tx_queue(tx_queue);
efx_init_tx_queue_core_txq(tx_queue);
}
}
} else {
/* Reduce number of classes before number of queues */
net_dev->num_tc = num_tc; net_dev->num_tc = num_tc;
}
rc = netif_set_real_num_tx_queues(net_dev, return netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) * max_t(int, num_tc, 1) *
efx->n_tx_channels); efx->n_tx_channels);
if (rc)
return rc;
/* Do not destroy high-priority queues when they become
* unused. We would have to flush them first, and it is
* fairly difficult to flush a subset of TX queues. Leave
* it to efx_fini_channels().
*/
net_dev->num_tc = num_tc;
return 0;
} }
...@@ -298,7 +298,11 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, ...@@ -298,7 +298,11 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
/* Map the fragment taking account of NIC-dependent DMA limits. */ /* Map the fragment taking account of NIC-dependent DMA limits. */
do { do {
buffer = efx_tx_queue_get_insert_buffer(tx_queue); buffer = efx_tx_queue_get_insert_buffer(tx_queue);
if (nic_type->tx_limit_len)
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
else
dma_len = len;
buffer->len = dma_len; buffer->len = dma_len;
buffer->dma_addr = dma_addr; buffer->dma_addr = dma_addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment