Commit abe303db authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. More workarounds for TX queue flush failures that can occur during
   interface reconfiguration.
2. Fix spurious failure of a firmware request running during a system
   clock change, e.g. ntpd started at the same time as driver load.
3. Fix inconsistent statistics after a firmware upgrade.
4. Fix a variable (non-)initialisation in offline self-test that can
   make it more disruptive than intended.
5. Fix a race that can (at least) cause an assertion failure.
6. Miscellaneous cleanup.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a20da984 b9cc977d
...@@ -106,8 +106,8 @@ static struct workqueue_struct *reset_workqueue; ...@@ -106,8 +106,8 @@ static struct workqueue_struct *reset_workqueue;
* *
* This is only used in MSI-X interrupt mode * This is only used in MSI-X interrupt mode
*/ */
static unsigned int separate_tx_channels; static bool separate_tx_channels;
module_param(separate_tx_channels, uint, 0444); module_param(separate_tx_channels, bool, 0444);
MODULE_PARM_DESC(separate_tx_channels, MODULE_PARM_DESC(separate_tx_channels,
"Use separate channels for TX and RX"); "Use separate channels for TX and RX");
...@@ -160,8 +160,8 @@ static unsigned int rss_cpus; ...@@ -160,8 +160,8 @@ static unsigned int rss_cpus;
module_param(rss_cpus, uint, 0444); module_param(rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
static int phy_flash_cfg; static bool phy_flash_cfg;
module_param(phy_flash_cfg, int, 0644); module_param(phy_flash_cfg, bool, 0644);
MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
static unsigned irq_adapt_low_thresh = 8000; static unsigned irq_adapt_low_thresh = 8000;
...@@ -2279,7 +2279,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) ...@@ -2279,7 +2279,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
RESET_TYPE(method)); RESET_TYPE(method));
netif_device_detach(efx->net_dev); efx_device_detach_sync(efx);
efx_reset_down(efx, method); efx_reset_down(efx, method);
rc = efx->type->reset(efx, method); rc = efx->type->reset(efx, method);
...@@ -2758,7 +2758,7 @@ static int efx_pm_freeze(struct device *dev) ...@@ -2758,7 +2758,7 @@ static int efx_pm_freeze(struct device *dev)
if (efx->state != STATE_DISABLED) { if (efx->state != STATE_DISABLED) {
efx->state = STATE_UNINIT; efx->state = STATE_UNINIT;
netif_device_detach(efx->net_dev); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, false); efx_stop_interrupts(efx, false);
......
...@@ -163,4 +163,17 @@ extern void efx_link_status_changed(struct efx_nic *efx); ...@@ -163,4 +163,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
extern void efx_link_set_advertising(struct efx_nic *efx, u32); extern void efx_link_set_advertising(struct efx_nic *efx, u32);
extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
static inline void efx_device_detach_sync(struct efx_nic *efx)
{
struct net_device *dev = efx->net_dev;
/* Lock/freeze all TX queues so that we can be sure the
* TX scheduler is stopped when we're done and before
* netif_device_present() becomes false.
*/
netif_tx_lock(dev);
netif_device_detach(dev);
netif_tx_unlock(dev);
}
#endif /* EFX_EFX_H */ #endif /* EFX_EFX_H */
...@@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) ...@@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
/* MAC address mask including only MC flag */ /* MAC address mask including only MC flag */
static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx, static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule) struct ethtool_rx_flow_spec *rule)
{ {
...@@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ...@@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
&ip_entry->ip4src, &ip_entry->psrc); &ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc); EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = ~0; ip_mask->ip4src = IP4_ADDR_FULL_MASK;
ip_mask->psrc = ~0; ip_mask->psrc = PORT_FULL_MASK;
} }
rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
ip_mask->ip4dst = ~0; ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
ip_mask->pdst = ~0; ip_mask->pdst = PORT_FULL_MASK;
return rc; return rc;
} }
...@@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, ...@@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
/* Check for unsupported extensions */ /* Check for unsupported extensions */
if ((rule->flow_type & FLOW_EXT) && if ((rule->flow_type & FLOW_EXT) &&
(rule->m_ext.vlan_etype | rule->m_ext.data[0] | (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
rule->m_ext.data[1])) rule->m_ext.data[1]))
return -EINVAL; return -EINVAL;
...@@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, ...@@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
IPPROTO_TCP : IPPROTO_UDP); IPPROTO_TCP : IPPROTO_UDP);
/* Must match all of destination, */ /* Must match all of destination, */
if ((__force u32)~ip_mask->ip4dst | if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
(__force u16)~ip_mask->pdst) ip_mask->pdst == PORT_FULL_MASK))
return -EINVAL; return -EINVAL;
/* all or none of source, */ /* all or none of source, */
if ((ip_mask->ip4src | ip_mask->psrc) && if ((ip_mask->ip4src || ip_mask->psrc) &&
((__force u32)~ip_mask->ip4src | !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
(__force u16)~ip_mask->psrc)) ip_mask->psrc == PORT_FULL_MASK))
return -EINVAL; return -EINVAL;
/* and nothing else */ /* and nothing else */
if (ip_mask->tos | rule->m_ext.vlan_tci) if (ip_mask->tos || rule->m_ext.vlan_tci)
return -EINVAL; return -EINVAL;
if (ip_mask->ip4src) if (ip_mask->ip4src)
......
...@@ -1792,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -1792,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.remove_port = falcon_remove_port, .remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event, .handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush, .prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
.update_stats = falcon_update_nic_stats, .update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats, .start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats, .stop_stats = falcon_stop_nic_stats,
...@@ -1834,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -1834,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.remove_port = falcon_remove_port, .remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event, .handle_global_event = falcon_handle_global_event,
.prepare_flush = falcon_prepare_flush, .prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
.update_stats = falcon_update_nic_stats, .update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats, .start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats, .stop_stats = falcon_stop_nic_stats,
......
...@@ -22,22 +22,21 @@ ...@@ -22,22 +22,21 @@
* *
* Notes on locking strategy: * Notes on locking strategy:
* *
* Most CSRs are 128-bit (oword) and therefore cannot be read or * Many CSRs are very wide and cannot be read or written atomically.
* written atomically. Access from the host is buffered by the Bus * Writes from the host are buffered by the Bus Interface Unit (BIU)
* Interface Unit (BIU). Whenever the host reads from the lowest * up to 128 bits. Whenever the host writes part of such a register,
* address of such a register, or from the address of a different such * the BIU collects the written value and does not write to the
* register, the BIU latches the register's value. Subsequent reads * underlying register until all 4 dwords have been written. A
* from higher addresses of the same register will read the latched * similar buffering scheme applies to host access to the NIC's 64-bit
* value. Whenever the host writes part of such a register, the BIU * SRAM.
* collects the written value and does not write to the underlying
* register until all 4 dwords have been written. A similar buffering
* scheme applies to host access to the NIC's 64-bit SRAM.
* *
* Access to different CSRs and 64-bit SRAM words must be serialised, * Writes to different CSRs and 64-bit SRAM words must be serialised,
* since interleaved access can result in lost writes or lost * since interleaved access can result in lost writes. We use
* information from read-to-clear fields. We use efx_nic::biu_lock * efx_nic::biu_lock for this.
* for this. (We could use separate locks for read and write, but *
* this is not normally a performance bottleneck.) * We also serialise reads from 128-bit CSRs and SRAM with the same
* spinlock. This may not be necessary, but it doesn't really matter
* as there are no such reads on the fast path.
* *
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* 128-bit but are special-cased in the BIU to avoid the need for * 128-bit but are special-cased in the BIU to avoid the need for
...@@ -204,20 +203,6 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, ...@@ -204,20 +203,6 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
} }
/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}
/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg, unsigned int index)
{
efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
}
/* Page-mapped register block size */ /* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000 #define EFX_PAGE_BLOCK_SIZE 0x2000
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
************************************************************************** **************************************************************************
*/ */
#define MCDI_RPC_TIMEOUT 10 /*seconds */ #define MCDI_RPC_TIMEOUT (10 * HZ)
#define MCDI_PDU(efx) \ #define MCDI_PDU(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
...@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) ...@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
static int efx_mcdi_poll(struct efx_nic *efx) static int efx_mcdi_poll(struct efx_nic *efx)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int time, finish; unsigned long time, finish;
unsigned int respseq, respcmd, error; unsigned int respseq, respcmd, error;
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int rc, spins; unsigned int rc, spins;
...@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
* and poll once a jiffy (approximately) * and poll once a jiffy (approximately)
*/ */
spins = TICK_USEC; spins = TICK_USEC;
finish = get_seconds() + MCDI_RPC_TIMEOUT; finish = jiffies + MCDI_RPC_TIMEOUT;
while (1) { while (1) {
if (spins != 0) { if (spins != 0) {
...@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
} }
time = get_seconds(); time = jiffies;
rmb(); rmb();
efx_readd(efx, &reg, pdu); efx_readd(efx, &reg, pdu);
...@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
break; break;
if (time >= finish) if (time_after(time, finish))
return -ETIMEDOUT; return -ETIMEDOUT;
} }
...@@ -207,7 +207,9 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -207,7 +207,9 @@ static int efx_mcdi_poll(struct efx_nic *efx)
return 0; return 0;
} }
/* Test and clear MC-rebooted flag for this port/function */ /* Test and clear MC-rebooted flag for this port/function; reset
* software state as necessary.
*/
int efx_mcdi_poll_reboot(struct efx_nic *efx) int efx_mcdi_poll_reboot(struct efx_nic *efx)
{ {
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
...@@ -223,6 +225,11 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx) ...@@ -223,6 +225,11 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
if (value == 0) if (value == 0)
return 0; return 0;
/* MAC statistics have been cleared on the NIC; clear our copy
* so that efx_update_diff_stat() can continue to work.
*/
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
EFX_ZERO_DWORD(reg); EFX_ZERO_DWORD(reg);
efx_writed(efx, &reg, addr); efx_writed(efx, &reg, addr);
...@@ -250,7 +257,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx) ...@@ -250,7 +257,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
if (wait_event_timeout( if (wait_event_timeout(
mcdi->wq, mcdi->wq,
atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) MCDI_RPC_TIMEOUT) == 0)
return -ETIMEDOUT; return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
...@@ -1216,7 +1223,7 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx) ...@@ -1216,7 +1223,7 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
count * sizeof(*qid), NULL, 0, NULL); count * sizeof(*qid), NULL, 0, NULL);
WARN_ON(rc > 0); WARN_ON(rc < 0);
kfree(qid); kfree(qid);
......
...@@ -200,6 +200,7 @@ struct efx_tx_queue { ...@@ -200,6 +200,7 @@ struct efx_tx_queue {
/* Members shared between paths and sometimes updated */ /* Members shared between paths and sometimes updated */
unsigned int empty_read_count ____cacheline_aligned_in_smp; unsigned int empty_read_count ____cacheline_aligned_in_smp;
#define EFX_EMPTY_COUNT_VALID 0x80000000 #define EFX_EMPTY_COUNT_VALID 0x80000000
atomic_t flush_outstanding;
}; };
/** /**
...@@ -907,6 +908,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -907,6 +908,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @remove_port: Free resources allocated by probe_port() * @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL) * @handle_global_event: Handle a "global" event (may be %NULL)
* @prepare_flush: Prepare the hardware for flushing the DMA queues * @prepare_flush: Prepare the hardware for flushing the DMA queues
* @finish_flush: Clean up after flushing the DMA queues
* @update_stats: Update statistics not provided by event handling * @update_stats: Update statistics not provided by event handling
* @start_stats: Start the regular fetching of statistics * @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics * @stop_stats: Stop the regular fetching of statistics
...@@ -954,6 +956,7 @@ struct efx_nic_type { ...@@ -954,6 +956,7 @@ struct efx_nic_type {
void (*remove_port)(struct efx_nic *efx); void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
void (*prepare_flush)(struct efx_nic *efx); void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
void (*update_stats)(struct efx_nic *efx); void (*update_stats)(struct efx_nic *efx);
void (*start_stats)(struct efx_nic *efx); void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx); void (*stop_stats)(struct efx_nic *efx);
......
...@@ -73,6 +73,8 @@ ...@@ -73,6 +73,8 @@
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
(_tx_queue)->queue) (_tx_queue)->queue)
static void efx_magic_event(struct efx_channel *channel, u32 magic);
/************************************************************************** /**************************************************************************
* *
* Solarstorm hardware access * Solarstorm hardware access
...@@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx, ...@@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
buffer->entries = len / EFX_BUF_SIZE; buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
/* All zeros is a potentially valid event so memset to 0xff */
memset(buffer->addr, 0xff, len);
/* Select new buffer ID */ /* Select new buffer ID */
buffer->index = efx->next_buffer_table; buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries; efx->next_buffer_table += buffer->entries;
...@@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq; efx_oword_t tx_flush_descq;
WARN_ON(atomic_read(&tx_queue->flush_outstanding));
atomic_set(&tx_queue->flush_outstanding, 1);
EFX_POPULATE_OWORD_2(tx_flush_descq, EFX_POPULATE_OWORD_2(tx_flush_descq,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
...@@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx) ...@@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx)
&& atomic_read(&efx->rxq_flush_pending) > 0)); && atomic_read(&efx->rxq_flush_pending) > 0));
} }
static bool efx_check_tx_flush_complete(struct efx_nic *efx)
{
bool i = true;
efx_oword_t txd_ptr_tbl;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
if (EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) ||
EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev,
"flush did not complete on TXQ %d\n",
tx_queue->queue);
i = false;
} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) {
/* The flush is complete, but we didn't
* receive a flush completion event
*/
netif_dbg(efx, hw, efx->net_dev,
"flush complete on TXQ %d, so drain "
"the queue\n", tx_queue->queue);
/* Don't need to increment drain_pending as it
* has already been incremented for the queues
* which did not drain
*/
efx_magic_event(channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(
tx_queue));
}
}
}
return i;
}
/* Flush all the transmit queues, and continue flushing receive queues until /* Flush all the transmit queues, and continue flushing receive queues until
* they're all flushed. Wait for the DRAIN events to be recieved so that there * they're all flushed. Wait for the DRAIN events to be recieved so that there
* are no more RX and TX events left on any channel. */ * are no more RX and TX events left on any channel. */
...@@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
int rc = 0; int rc = 0;
efx->fc_disable++;
efx->type->prepare_flush(efx); efx->type->prepare_flush(efx);
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
...@@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx)
timeout); timeout);
} }
if (atomic_read(&efx->drain_pending)) { if (atomic_read(&efx->drain_pending) &&
!efx_check_tx_flush_complete(efx)) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
"(rx %d+%d)\n", atomic_read(&efx->drain_pending), "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
atomic_read(&efx->rxq_flush_outstanding), atomic_read(&efx->rxq_flush_outstanding),
...@@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
atomic_set(&efx->rxq_flush_outstanding, 0); atomic_set(&efx->rxq_flush_outstanding, 0);
} }
efx->fc_disable--; efx->type->finish_flush(efx);
return rc; return rc;
} }
...@@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel) ...@@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask); channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
channel->channel); /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
* of 4 bytes, but it is really 16 bytes just like later revisions.
*/
efx_writed(efx, &reg,
efx->type->evq_rptr_tbl_base +
FR_BZ_EVQ_RPTR_STEP * channel->channel);
} }
/* Use HW to insert a SW defined event */ /* Use HW to insert a SW defined event */
...@@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) ...@@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES); qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
efx_magic_event(tx_queue->channel, efx_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
}
} }
} }
...@@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx) ...@@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
efx->rx_indir_table[i]); efx->rx_indir_table[i]);
efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); efx_writed(efx, &dword,
FR_BZ_RX_INDIRECTION_TBL +
FR_BZ_RX_INDIRECTION_TBL_STEP * i);
} }
} }
...@@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) ...@@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
for (i = 0; i < table->rows; i++) { for (i = 0; i < table->rows; i++) {
switch (table->step) { switch (table->step) {
case 4: /* 32-bit register or SRAM */ case 4: /* 32-bit SRAM */
efx_readd_table(efx, buf, table->offset, i); efx_readd(efx, buf, table->offset + 4 * i);
break; break;
case 8: /* 64-bit SRAM */ case 8: /* 64-bit SRAM */
efx_sram_readq(efx, efx_sram_readq(efx,
efx->membase + table->offset, efx->membase + table->offset,
buf, i); buf, i);
break; break;
case 16: /* 128-bit register */ case 16: /* 128-bit-readable register */
efx_reado_table(efx, buf, table->offset, i); efx_reado_table(efx, buf, table->offset, i);
break; break;
case 32: /* 128-bit register, interleaved */ case 32: /* 128-bit register, interleaved */
......
...@@ -344,6 +344,8 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) ...@@ -344,6 +344,8 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */ /* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx); extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void siena_prepare_flush(struct efx_nic *efx);
extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx); extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx); extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx); extern void falcon_setup_xaui(struct efx_nic *efx);
......
...@@ -187,7 +187,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -187,7 +187,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf; struct efx_rx_buffer *rx_buf;
struct page *page; struct page *page;
void *page_addr;
struct efx_rx_page_state *state; struct efx_rx_page_state *state;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned index, count; unsigned index, count;
...@@ -207,12 +206,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -207,12 +206,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
__free_pages(page, efx->rx_buffer_order); __free_pages(page, efx->rx_buffer_order);
return -EIO; return -EIO;
} }
page_addr = page_address(page); state = page_address(page);
state = page_addr;
state->refcnt = 0; state->refcnt = 0;
state->dma_addr = dma_addr; state->dma_addr = dma_addr;
page_addr += sizeof(struct efx_rx_page_state);
dma_addr += sizeof(struct efx_rx_page_state); dma_addr += sizeof(struct efx_rx_page_state);
split: split:
...@@ -230,7 +227,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -230,7 +227,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */ /* Use the second half of the page */
get_page(page); get_page(page);
dma_addr += (PAGE_SIZE >> 1); dma_addr += (PAGE_SIZE >> 1);
page_addr += (PAGE_SIZE >> 1);
++count; ++count;
goto split; goto split;
} }
......
...@@ -373,7 +373,7 @@ static void efx_iterate_state(struct efx_nic *efx) ...@@ -373,7 +373,7 @@ static void efx_iterate_state(struct efx_nic *efx)
/* saddr set later and used as incrementing count */ /* saddr set later and used as incrementing count */
payload->ip.daddr = htonl(INADDR_LOOPBACK); payload->ip.daddr = htonl(INADDR_LOOPBACK);
payload->ip.ihl = 5; payload->ip.ihl = 5;
payload->ip.check = htons(0xdead); payload->ip.check = (__force __sum16) htons(0xdead);
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
payload->ip.version = IPVERSION; payload->ip.version = IPVERSION;
payload->ip.protocol = IPPROTO_UDP; payload->ip.protocol = IPPROTO_UDP;
...@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Detach the device so the kernel doesn't transmit during the /* Detach the device so the kernel doesn't transmit during the
* loopback test and the watchdog timeout doesn't fire. * loopback test and the watchdog timeout doesn't fire.
*/ */
netif_device_detach(efx->net_dev); efx_device_detach_sync(efx);
if (efx->type->test_chip) { if (efx->type->test_chip) {
rc_reset = efx->type->test_chip(efx, tests); rc_reset = efx->type->test_chip(efx, tests);
......
...@@ -127,6 +127,18 @@ static void siena_remove_port(struct efx_nic *efx) ...@@ -127,6 +127,18 @@ static void siena_remove_port(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->stats_buffer); efx_nic_free_buffer(efx, &efx->stats_buffer);
} }
void siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
efx_mcdi_set_mac(efx);
}
void siena_finish_flush(struct efx_nic *efx)
{
if (--efx->fc_disable == 0)
efx_mcdi_set_mac(efx);
}
static const struct efx_nic_register_test siena_register_tests[] = { static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION, { FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
...@@ -158,7 +170,7 @@ static const struct efx_nic_register_test siena_register_tests[] = { ...@@ -158,7 +170,7 @@ static const struct efx_nic_register_test siena_register_tests[] = {
static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{ {
enum reset_type reset_method = reset_method; enum reset_type reset_method = RESET_TYPE_ALL;
int rc, rc2; int rc, rc2;
efx_reset_down(efx, reset_method); efx_reset_down(efx, reset_method);
...@@ -659,7 +671,8 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -659,7 +671,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.reset = siena_reset_hw, .reset = siena_reset_hw,
.probe_port = siena_probe_port, .probe_port = siena_probe_port,
.remove_port = siena_remove_port, .remove_port = siena_remove_port,
.prepare_flush = efx_port_dummy_op_void, .prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
.update_stats = siena_update_nic_stats, .update_stats = siena_update_nic_stats,
.start_stats = siena_start_nic_stats, .start_stats = siena_start_nic_stats,
.stop_stats = siena_stop_nic_stats, .stop_stats = siena_stop_nic_stats,
......
...@@ -695,8 +695,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) ...@@ -695,8 +695,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
return VFDI_RC_ENOMEM; return VFDI_RC_ENOMEM;
rtnl_lock(); rtnl_lock();
if (efx->fc_disable++ == 0) siena_prepare_flush(efx);
efx_mcdi_set_mac(efx);
rtnl_unlock(); rtnl_unlock();
/* Flush all the initialized queues */ /* Flush all the initialized queues */
...@@ -733,8 +732,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) ...@@ -733,8 +732,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
} }
rtnl_lock(); rtnl_lock();
if (--efx->fc_disable == 0) siena_finish_flush(efx);
efx_mcdi_set_mac(efx);
rtnl_unlock(); rtnl_unlock();
/* Irrespective of success/failure, fini the queues */ /* Irrespective of success/failure, fini the queues */
...@@ -995,7 +993,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer) ...@@ -995,7 +993,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
FRF_AZ_EVQ_BUF_BASE_ID, buftbl); FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq); efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq); efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
mutex_unlock(&vf->status_lock); mutex_unlock(&vf->status_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment