Commit 71ad88f6 authored by Martin Habets's avatar Martin Habets Committed by Jakub Kicinski

sfc/siena: Rename functions in efx headers to avoid conflicts with sfc

When building with allyesconfig there are many identical
symbol names.
For siena use efx_siena_ as the function and variable prefix
to avoid build errors.

efx_mtd_remove_partition can become static as it is no longer called
from other files.
efx_ticks_to_usecs and efx_xmit_done_single are not used in Siena, so
they are removed.
Several functions are only used inside efx_channels.c for Siena so
they can become static.
Signed-off-by: default avatarMartin Habets <habetsm.xilinx@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 956f2d86
...@@ -43,11 +43,11 @@ ...@@ -43,11 +43,11 @@
* *
*************************************************************************/ *************************************************************************/
module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); module_param_named(interrupt_mode, efx_siena_interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode, MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
module_param(rss_cpus, uint, 0444); module_param_named(rss_cpus, efx_siena_rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
/* /*
...@@ -174,7 +174,7 @@ static void efx_fini_port(struct efx_nic *efx) ...@@ -174,7 +174,7 @@ static void efx_fini_port(struct efx_nic *efx)
efx->port_initialized = false; efx->port_initialized = false;
efx->link_state.up = false; efx->link_state.up = false;
efx_link_status_changed(efx); efx_siena_link_status_changed(efx);
} }
static void efx_remove_port(struct efx_nic *efx) static void efx_remove_port(struct efx_nic *efx)
...@@ -284,11 +284,11 @@ static int efx_probe_nic(struct efx_nic *efx) ...@@ -284,11 +284,11 @@ static int efx_probe_nic(struct efx_nic *efx)
/* Determine the number of channels and queues by trying /* Determine the number of channels and queues by trying
* to hook in MSI-X interrupts. * to hook in MSI-X interrupts.
*/ */
rc = efx_probe_interrupts(efx); rc = efx_siena_probe_interrupts(efx);
if (rc) if (rc)
goto fail1; goto fail1;
rc = efx_set_channels(efx); rc = efx_siena_set_channels(efx);
if (rc) if (rc)
goto fail1; goto fail1;
...@@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx) ...@@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc == -EAGAIN) if (rc == -EAGAIN)
/* try again with new max_channels */ /* try again with new max_channels */
efx_remove_interrupts(efx); efx_siena_remove_interrupts(efx);
} while (rc == -EAGAIN); } while (rc == -EAGAIN);
...@@ -310,13 +310,13 @@ static int efx_probe_nic(struct efx_nic *efx) ...@@ -310,13 +310,13 @@ static int efx_probe_nic(struct efx_nic *efx)
/* Initialise the interrupt moderation settings */ /* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, efx_siena_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec,
true); true, true);
return 0; return 0;
fail2: fail2:
efx_remove_interrupts(efx); efx_siena_remove_interrupts(efx);
fail1: fail1:
efx->type->remove(efx); efx->type->remove(efx);
return rc; return rc;
...@@ -326,7 +326,7 @@ static void efx_remove_nic(struct efx_nic *efx) ...@@ -326,7 +326,7 @@ static void efx_remove_nic(struct efx_nic *efx)
{ {
netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
efx_remove_interrupts(efx); efx_siena_remove_interrupts(efx);
efx->type->remove(efx); efx->type->remove(efx);
} }
...@@ -373,7 +373,7 @@ static int efx_probe_all(struct efx_nic *efx) ...@@ -373,7 +373,7 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail4; goto fail4;
} }
rc = efx_probe_channels(efx); rc = efx_siena_probe_channels(efx);
if (rc) if (rc)
goto fail5; goto fail5;
...@@ -399,7 +399,7 @@ static void efx_remove_all(struct efx_nic *efx) ...@@ -399,7 +399,7 @@ static void efx_remove_all(struct efx_nic *efx)
efx_xdp_setup_prog(efx, NULL); efx_xdp_setup_prog(efx, NULL);
rtnl_unlock(); rtnl_unlock();
efx_remove_channels(efx); efx_siena_remove_channels(efx);
efx_remove_filters(efx); efx_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx); efx->type->vswitching_remove(efx);
...@@ -413,7 +413,7 @@ static void efx_remove_all(struct efx_nic *efx) ...@@ -413,7 +413,7 @@ static void efx_remove_all(struct efx_nic *efx)
* Interrupt moderation * Interrupt moderation
* *
**************************************************************************/ **************************************************************************/
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
{ {
if (usecs == 0) if (usecs == 0)
return 0; return 0;
...@@ -422,16 +422,8 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs) ...@@ -422,16 +422,8 @@ unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
return usecs * 1000 / efx->timer_quantum_ns; return usecs * 1000 / efx->timer_quantum_ns;
} }
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
{
/* We must round up when converting ticks to microseconds
* because we round down when converting the other way.
*/
return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
}
/* Set interrupt moderation parameters */ /* Set interrupt moderation parameters */
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive, unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx) bool rx_may_override_tx)
{ {
...@@ -466,7 +458,7 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, ...@@ -466,7 +458,7 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
return 0; return 0;
} }
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive) unsigned int *rx_usecs, bool *rx_adaptive)
{ {
*rx_adaptive = efx->irq_rx_adaptive; *rx_adaptive = efx->irq_rx_adaptive;
...@@ -520,7 +512,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) ...@@ -520,7 +512,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
*************************************************************************/ *************************************************************************/
/* Context: process, rtnl_lock() held. */ /* Context: process, rtnl_lock() held. */
int efx_net_open(struct net_device *net_dev) static int efx_net_open(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
int rc; int rc;
...@@ -533,14 +525,14 @@ int efx_net_open(struct net_device *net_dev) ...@@ -533,14 +525,14 @@ int efx_net_open(struct net_device *net_dev)
return rc; return rc;
if (efx->phy_mode & PHY_MODE_SPECIAL) if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY; return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) if (efx_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL))
return -EIO; return -EIO;
/* Notify the kernel of the link state polled during driver load, /* Notify the kernel of the link state polled during driver load,
* before the monitor starts running */ * before the monitor starts running */
efx_link_status_changed(efx); efx_siena_link_status_changed(efx);
efx_start_all(efx); efx_siena_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending) if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev); netif_device_detach(efx->net_dev);
efx_selftest_async_start(efx); efx_selftest_async_start(efx);
...@@ -551,7 +543,7 @@ int efx_net_open(struct net_device *net_dev) ...@@ -551,7 +543,7 @@ int efx_net_open(struct net_device *net_dev)
* Note that the kernel will ignore our return code; this method * Note that the kernel will ignore our return code; this method
* should really be a void. * should really be a void.
*/ */
int efx_net_stop(struct net_device *net_dev) static int efx_net_stop(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
...@@ -559,7 +551,7 @@ int efx_net_stop(struct net_device *net_dev) ...@@ -559,7 +551,7 @@ int efx_net_stop(struct net_device *net_dev)
raw_smp_processor_id()); raw_smp_processor_id());
/* Stop the device and flush all the channels */ /* Stop the device and flush all the channels */
efx_stop_all(efx); efx_siena_stop_all(efx);
return 0; return 0;
} }
...@@ -587,16 +579,16 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi ...@@ -587,16 +579,16 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
static const struct net_device_ops efx_netdev_ops = { static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open, .ndo_open = efx_net_open,
.ndo_stop = efx_net_stop, .ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats, .ndo_get_stats64 = efx_siena_net_stats,
.ndo_tx_timeout = efx_watchdog, .ndo_tx_timeout = efx_siena_watchdog,
.ndo_start_xmit = efx_hard_start_xmit, .ndo_start_xmit = efx_siena_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = efx_ioctl, .ndo_eth_ioctl = efx_ioctl,
.ndo_change_mtu = efx_change_mtu, .ndo_change_mtu = efx_siena_change_mtu,
.ndo_set_mac_address = efx_set_mac_address, .ndo_set_mac_address = efx_siena_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_rx_mode = efx_siena_set_rx_mode,
.ndo_set_features = efx_set_features, .ndo_set_features = efx_siena_set_features,
.ndo_features_check = efx_features_check, .ndo_features_check = efx_siena_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
...@@ -606,9 +598,9 @@ static const struct net_device_ops efx_netdev_ops = { ...@@ -606,9 +598,9 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_get_vf_config = efx_sriov_get_vf_config, .ndo_get_vf_config = efx_sriov_get_vf_config,
.ndo_set_vf_link_state = efx_sriov_set_vf_link_state, .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
#endif #endif
.ndo_get_phys_port_id = efx_get_phys_port_id, .ndo_get_phys_port_id = efx_siena_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name, .ndo_get_phys_port_name = efx_siena_get_phys_port_name,
.ndo_setup_tc = efx_setup_tc, .ndo_setup_tc = efx_siena_setup_tc,
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs, .ndo_rx_flow_steer = efx_filter_rfs,
#endif #endif
...@@ -626,10 +618,10 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) ...@@ -626,10 +618,10 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
return -EINVAL; return -EINVAL;
} }
if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) { if (prog && efx->net_dev->mtu > efx_siena_xdp_max_mtu(efx)) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"Unable to configure XDP with MTU of %d (max: %d)\n", "Unable to configure XDP with MTU of %d (max: %d)\n",
efx->net_dev->mtu, efx_xdp_max_mtu(efx)); efx->net_dev->mtu, efx_siena_xdp_max_mtu(efx));
return -EINVAL; return -EINVAL;
} }
...@@ -663,14 +655,14 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, ...@@ -663,14 +655,14 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
if (!netif_running(dev)) if (!netif_running(dev))
return -EINVAL; return -EINVAL;
return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH); return efx_siena_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
} }
static void efx_update_name(struct efx_nic *efx) static void efx_update_name(struct efx_nic *efx)
{ {
strcpy(efx->name, efx->net_dev->name); strcpy(efx->name, efx->net_dev->name);
efx_mtd_rename(efx); efx_siena_mtd_rename(efx);
efx_set_channel_names(efx); efx_siena_set_channel_names(efx);
} }
static int efx_netdev_event(struct notifier_block *this, static int efx_netdev_event(struct notifier_block *this,
...@@ -708,7 +700,7 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -708,7 +700,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->netdev_ops = &efx_netdev_ops; net_dev->netdev_ops = &efx_netdev_ops;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT; net_dev->priv_flags |= IFF_UNICAST_FLT;
net_dev->ethtool_ops = &efx_ethtool_ops; net_dev->ethtool_ops = &efx_siena_ethtool_ops;
netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS); netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU; net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU; net_dev->max_mtu = EFX_MAX_MTU;
...@@ -742,7 +734,7 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -742,7 +734,7 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue_core_txq(tx_queue); efx_siena_init_tx_queue_core_txq(tx_queue);
} }
efx_associate(efx); efx_associate(efx);
...@@ -756,7 +748,7 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -756,7 +748,7 @@ static int efx_register_netdev(struct efx_nic *efx)
goto fail_registered; goto fail_registered;
} }
efx_init_mcdi_logging(efx); efx_siena_init_mcdi_logging(efx);
return 0; return 0;
...@@ -780,7 +772,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) ...@@ -780,7 +772,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
if (efx_dev_registered(efx)) { if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
efx_fini_mcdi_logging(efx); efx_siena_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev); unregister_netdev(efx->net_dev);
} }
...@@ -807,7 +799,7 @@ static const struct pci_device_id efx_pci_table[] = { ...@@ -807,7 +799,7 @@ static const struct pci_device_id efx_pci_table[] = {
* *
**************************************************************************/ **************************************************************************/
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats)
{ {
u64 n_rx_nodesc_trunc = 0; u64 n_rx_nodesc_trunc = 0;
struct efx_channel *channel; struct efx_channel *channel;
...@@ -833,14 +825,14 @@ static void efx_pci_remove_main(struct efx_nic *efx) ...@@ -833,14 +825,14 @@ static void efx_pci_remove_main(struct efx_nic *efx)
* are not READY. * are not READY.
*/ */
BUG_ON(efx->state == STATE_READY); BUG_ON(efx->state == STATE_READY);
efx_flush_reset_workqueue(efx); efx_siena_flush_reset_workqueue(efx);
efx_disable_interrupts(efx); efx_siena_disable_interrupts(efx);
efx_clear_interrupt_affinity(efx); efx_siena_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx); efx_nic_fini_interrupt(efx);
efx_fini_port(efx); efx_fini_port(efx);
efx->type->fini(efx); efx->type->fini(efx);
efx_fini_napi(efx); efx_siena_fini_napi(efx);
efx_remove_all(efx); efx_remove_all(efx);
} }
...@@ -860,7 +852,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) ...@@ -860,7 +852,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
rtnl_lock(); rtnl_lock();
efx_dissociate(efx); efx_dissociate(efx);
dev_close(efx->net_dev); dev_close(efx->net_dev);
efx_disable_interrupts(efx); efx_siena_disable_interrupts(efx);
efx->state = STATE_UNINIT; efx->state = STATE_UNINIT;
rtnl_unlock(); rtnl_unlock();
...@@ -869,14 +861,14 @@ static void efx_pci_remove(struct pci_dev *pci_dev) ...@@ -869,14 +861,14 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_unregister_netdev(efx); efx_unregister_netdev(efx);
efx_mtd_remove(efx); efx_siena_mtd_remove(efx);
efx_pci_remove_main(efx); efx_pci_remove_main(efx);
efx_fini_io(efx); efx_siena_fini_io(efx);
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx); efx_siena_fini_struct(efx);
free_netdev(efx->net_dev); free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev); pci_disable_pcie_error_reporting(pci_dev);
...@@ -929,7 +921,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) ...@@ -929,7 +921,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
if (rc) if (rc)
goto fail1; goto fail1;
efx_init_napi(efx); efx_siena_init_napi(efx);
down_write(&efx->filter_sem); down_write(&efx->filter_sem);
rc = efx->type->init(efx); rc = efx->type->init(efx);
...@@ -950,22 +942,22 @@ static int efx_pci_probe_main(struct efx_nic *efx) ...@@ -950,22 +942,22 @@ static int efx_pci_probe_main(struct efx_nic *efx)
if (rc) if (rc)
goto fail5; goto fail5;
efx_set_interrupt_affinity(efx); efx_siena_set_interrupt_affinity(efx);
rc = efx_enable_interrupts(efx); rc = efx_siena_enable_interrupts(efx);
if (rc) if (rc)
goto fail6; goto fail6;
return 0; return 0;
fail6: fail6:
efx_clear_interrupt_affinity(efx); efx_siena_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx); efx_nic_fini_interrupt(efx);
fail5: fail5:
efx_fini_port(efx); efx_fini_port(efx);
fail4: fail4:
efx->type->fini(efx); efx->type->fini(efx);
fail3: fail3:
efx_fini_napi(efx); efx_siena_fini_napi(efx);
efx_remove_all(efx); efx_remove_all(efx);
fail1: fail1:
return rc; return rc;
...@@ -1046,7 +1038,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, ...@@ -1046,7 +1038,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
pci_set_drvdata(pci_dev, efx); pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev); SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, pci_dev, net_dev); rc = efx_siena_init_struct(efx, pci_dev, net_dev);
if (rc) if (rc)
goto fail1; goto fail1;
...@@ -1056,7 +1048,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev, ...@@ -1056,7 +1048,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
efx_probe_vpd_strings(efx); efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */ /* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask, rc = efx_siena_init_io(efx, efx->type->mem_bar(efx),
efx->type->max_dma_mask,
efx->type->mem_map_size(efx)); efx->type->mem_map_size(efx));
if (rc) if (rc)
goto fail2; goto fail2;
...@@ -1101,9 +1094,9 @@ static int efx_pci_probe(struct pci_dev *pci_dev, ...@@ -1101,9 +1094,9 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return 0; return 0;
fail3: fail3:
efx_fini_io(efx); efx_siena_fini_io(efx);
fail2: fail2:
efx_fini_struct(efx); efx_siena_fini_struct(efx);
fail1: fail1:
WARN_ON(rc > 0); WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
...@@ -1142,8 +1135,8 @@ static int efx_pm_freeze(struct device *dev) ...@@ -1142,8 +1135,8 @@ static int efx_pm_freeze(struct device *dev)
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_siena_stop_all(efx);
efx_disable_interrupts(efx); efx_siena_disable_interrupts(efx);
} }
rtnl_unlock(); rtnl_unlock();
...@@ -1159,7 +1152,7 @@ static int efx_pm_thaw(struct device *dev) ...@@ -1159,7 +1152,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock(); rtnl_lock();
if (efx->state != STATE_DISABLED) { if (efx->state != STATE_DISABLED) {
rc = efx_enable_interrupts(efx); rc = efx_siena_enable_interrupts(efx);
if (rc) if (rc)
goto fail; goto fail;
...@@ -1167,7 +1160,7 @@ static int efx_pm_thaw(struct device *dev) ...@@ -1167,7 +1160,7 @@ static int efx_pm_thaw(struct device *dev)
efx_mcdi_port_reconfigure(efx); efx_mcdi_port_reconfigure(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_start_all(efx); efx_siena_start_all(efx);
efx_device_attach_if_not_resetting(efx); efx_device_attach_if_not_resetting(efx);
...@@ -1179,7 +1172,7 @@ static int efx_pm_thaw(struct device *dev) ...@@ -1179,7 +1172,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_unlock(); rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
efx_queue_reset_work(efx); efx_siena_queue_reset_work(efx);
return 0; return 0;
...@@ -1255,7 +1248,7 @@ static struct pci_driver efx_pci_driver = { ...@@ -1255,7 +1248,7 @@ static struct pci_driver efx_pci_driver = {
.probe = efx_pci_probe, .probe = efx_pci_probe,
.remove = efx_pci_remove, .remove = efx_pci_remove,
.driver.pm = &efx_pm_ops, .driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers, .err_handler = &efx_siena_err_handlers,
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure, .sriov_configure = efx_pci_sriov_configure,
#endif #endif
...@@ -1277,7 +1270,7 @@ static int __init efx_init_module(void) ...@@ -1277,7 +1270,7 @@ static int __init efx_init_module(void)
if (rc) if (rc)
goto err_notifier; goto err_notifier;
rc = efx_create_reset_workqueue(); rc = efx_siena_create_reset_workqueue();
if (rc) if (rc)
goto err_reset; goto err_reset;
...@@ -1288,7 +1281,7 @@ static int __init efx_init_module(void) ...@@ -1288,7 +1281,7 @@ static int __init efx_init_module(void)
return 0; return 0;
err_pci: err_pci:
efx_destroy_reset_workqueue(); efx_siena_destroy_reset_workqueue();
err_reset: err_reset:
unregister_netdevice_notifier(&efx_netdev_notifier); unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier: err_notifier:
...@@ -1300,7 +1293,7 @@ static void __exit efx_exit_module(void) ...@@ -1300,7 +1293,7 @@ static void __exit efx_exit_module(void)
printk(KERN_INFO "Solarflare NET driver unloading\n"); printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver); pci_unregister_driver(&efx_pci_driver);
efx_destroy_reset_workqueue(); efx_siena_destroy_reset_workqueue();
unregister_netdevice_notifier(&efx_netdev_notifier); unregister_netdevice_notifier(&efx_netdev_notifier);
} }
......
...@@ -12,36 +12,28 @@ ...@@ -12,36 +12,28 @@
#include "net_driver.h" #include "net_driver.h"
#include "filter.h" #include "filter.h"
int efx_net_open(struct net_device *net_dev);
int efx_net_stop(struct net_device *net_dev);
/* TX */ /* TX */
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev); struct net_device *net_dev);
netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
struct sk_buff *skb);
static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{ {
return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue, return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue,
__efx_enqueue_skb, tx_queue, skb); __efx_siena_enqueue_skb, tx_queue, skb);
} }
void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data); void *type_data);
extern unsigned int efx_piobuf_size;
/* RX */ /* RX */
void __efx_rx_packet(struct efx_channel *channel); void __efx_siena_rx_packet(struct efx_channel *channel);
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags); unsigned int n_frags, unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel) static inline void efx_rx_flush_packet(struct efx_channel *channel)
{ {
if (channel->rx_pkt_n_frags) if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel); __efx_siena_rx_packet(channel);
}
static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
{
return true;
} }
/* Maximum number of TCP segments we support for soft-TSO */ /* Maximum number of TCP segments we support for soft-TSO */
...@@ -156,34 +148,33 @@ static inline bool efx_rss_active(struct efx_rss_context *ctx) ...@@ -156,34 +148,33 @@ static inline bool efx_rss_active(struct efx_rss_context *ctx)
} }
/* Ethtool support */ /* Ethtool support */
extern const struct ethtool_ops efx_ethtool_ops; extern const struct ethtool_ops efx_siena_ethtool_ops;
/* Global */ /* Global */
unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs); unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks); int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
unsigned int rx_usecs, bool rx_adaptive, unsigned int rx_usecs, bool rx_adaptive,
bool rx_may_override_tx); bool rx_may_override_tx);
void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
unsigned int *rx_usecs, bool *rx_adaptive); unsigned int *rx_usecs, bool *rx_adaptive);
/* Update the generic software stats in the passed stats array */ /* Update the generic software stats in the passed stats array */
void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats);
/* MTD */ /* MTD */
#ifdef CONFIG_SFC_MTD #ifdef CONFIG_SFC_MTD
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
size_t n_parts, size_t sizeof_part); size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx) static inline int efx_mtd_probe(struct efx_nic *efx)
{ {
return efx->type->mtd_probe(efx); return efx->type->mtd_probe(efx);
} }
void efx_mtd_rename(struct efx_nic *efx); void efx_siena_mtd_rename(struct efx_nic *efx);
void efx_mtd_remove(struct efx_nic *efx); void efx_siena_mtd_remove(struct efx_nic *efx);
#else #else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mtd_rename(struct efx_nic *efx) {} static inline void efx_siena_mtd_rename(struct efx_nic *efx) {}
static inline void efx_mtd_remove(struct efx_nic *efx) {} static inline void efx_siena_mtd_remove(struct efx_nic *efx) {}
#endif #endif
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
...@@ -221,7 +212,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) ...@@ -221,7 +212,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
return true; return true;
} }
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n,
bool flush); struct xdp_frame **xdpfs, bool flush);
#endif /* EFX_EFX_H */ #endif /* EFX_EFX_H */
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
* 1 => MSI * 1 => MSI
* 2 => legacy * 2 => legacy
*/ */
unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX;
/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
* i.e. the number of CPUs among which we may distribute simultaneous * i.e. the number of CPUs among which we may distribute simultaneous
...@@ -34,7 +34,7 @@ unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; ...@@ -34,7 +34,7 @@ unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt. * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
* The default (0) means to assign an interrupt to each core. * The default (0) means to assign an interrupt to each core.
*/ */
unsigned int rss_cpus; unsigned int efx_siena_rss_cpus;
static unsigned int irq_adapt_low_thresh = 8000; static unsigned int irq_adapt_low_thresh = 8000;
module_param(irq_adapt_low_thresh, uint, 0644); module_param(irq_adapt_low_thresh, uint, 0644);
...@@ -89,8 +89,8 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) ...@@ -89,8 +89,8 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
{ {
unsigned int count; unsigned int count;
if (rss_cpus) { if (efx_siena_rss_cpus) {
count = rss_cpus; count = efx_siena_rss_cpus;
} else { } else {
count = count_online_cores(efx, true); count = count_online_cores(efx, true);
...@@ -100,7 +100,8 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) ...@@ -100,7 +100,8 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
} }
if (count > EFX_MAX_RX_QUEUES) { if (count > EFX_MAX_RX_QUEUES) {
netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus,
warn,
"Reducing number of rx queues from %u to %u.\n", "Reducing number of rx queues from %u to %u.\n",
count, EFX_MAX_RX_QUEUES); count, EFX_MAX_RX_QUEUES);
count = EFX_MAX_RX_QUEUES; count = EFX_MAX_RX_QUEUES;
...@@ -249,7 +250,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, ...@@ -249,7 +250,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
/* Probe the number and type of interrupts we are able to obtain, and /* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues. * the resulting numbers of channels and RX queues.
*/ */
int efx_probe_interrupts(struct efx_nic *efx) int efx_siena_probe_interrupts(struct efx_nic *efx)
{ {
unsigned int extra_channels = 0; unsigned int extra_channels = 0;
unsigned int rss_spread; unsigned int rss_spread;
...@@ -361,7 +362,7 @@ int efx_probe_interrupts(struct efx_nic *efx) ...@@ -361,7 +362,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
} }
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
void efx_set_interrupt_affinity(struct efx_nic *efx) void efx_siena_set_interrupt_affinity(struct efx_nic *efx)
{ {
const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus); const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
struct efx_channel *channel; struct efx_channel *channel;
...@@ -380,7 +381,7 @@ void efx_set_interrupt_affinity(struct efx_nic *efx) ...@@ -380,7 +381,7 @@ void efx_set_interrupt_affinity(struct efx_nic *efx)
} }
} }
void efx_clear_interrupt_affinity(struct efx_nic *efx) void efx_siena_clear_interrupt_affinity(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -389,17 +390,17 @@ void efx_clear_interrupt_affinity(struct efx_nic *efx) ...@@ -389,17 +390,17 @@ void efx_clear_interrupt_affinity(struct efx_nic *efx)
} }
#else #else
void void
efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused)
{ {
} }
void void
efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused)
{ {
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
void efx_remove_interrupts(struct efx_nic *efx) void efx_siena_remove_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -422,7 +423,7 @@ void efx_remove_interrupts(struct efx_nic *efx) ...@@ -422,7 +423,7 @@ void efx_remove_interrupts(struct efx_nic *efx)
* is reset, the memory buffer will be reused; this guards against * is reset, the memory buffer will be reused; this guards against
* errors during channel reset and also simplifies interrupt handling. * errors during channel reset and also simplifies interrupt handling.
*/ */
int efx_probe_eventq(struct efx_channel *channel) static int efx_probe_eventq(struct efx_channel *channel)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
unsigned long entries; unsigned long entries;
...@@ -441,7 +442,7 @@ int efx_probe_eventq(struct efx_channel *channel) ...@@ -441,7 +442,7 @@ int efx_probe_eventq(struct efx_channel *channel)
} }
/* Prepare channel's event queue */ /* Prepare channel's event queue */
int efx_init_eventq(struct efx_channel *channel) static int efx_init_eventq(struct efx_channel *channel)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
int rc; int rc;
...@@ -461,7 +462,7 @@ int efx_init_eventq(struct efx_channel *channel) ...@@ -461,7 +462,7 @@ int efx_init_eventq(struct efx_channel *channel)
} }
/* Enable event queue processing and NAPI */ /* Enable event queue processing and NAPI */
void efx_start_eventq(struct efx_channel *channel) void efx_siena_start_eventq(struct efx_channel *channel)
{ {
netif_dbg(channel->efx, ifup, channel->efx->net_dev, netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel); "chan %d start event queue\n", channel->channel);
...@@ -475,7 +476,7 @@ void efx_start_eventq(struct efx_channel *channel) ...@@ -475,7 +476,7 @@ void efx_start_eventq(struct efx_channel *channel)
} }
/* Disable event queue processing and NAPI */ /* Disable event queue processing and NAPI */
void efx_stop_eventq(struct efx_channel *channel) void efx_siena_stop_eventq(struct efx_channel *channel)
{ {
if (!channel->enabled) if (!channel->enabled)
return; return;
...@@ -484,7 +485,7 @@ void efx_stop_eventq(struct efx_channel *channel) ...@@ -484,7 +485,7 @@ void efx_stop_eventq(struct efx_channel *channel)
channel->enabled = false; channel->enabled = false;
} }
void efx_fini_eventq(struct efx_channel *channel) static void efx_fini_eventq(struct efx_channel *channel)
{ {
if (!channel->eventq_init) if (!channel->eventq_init)
return; return;
...@@ -496,7 +497,7 @@ void efx_fini_eventq(struct efx_channel *channel) ...@@ -496,7 +497,7 @@ void efx_fini_eventq(struct efx_channel *channel)
channel->eventq_init = false; channel->eventq_init = false;
} }
void efx_remove_eventq(struct efx_channel *channel) static void efx_remove_eventq(struct efx_channel *channel)
{ {
netif_dbg(channel->efx, drv, channel->efx->net_dev, netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel); "chan %d remove event queue\n", channel->channel);
...@@ -562,7 +563,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) ...@@ -562,7 +563,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
return channel; return channel;
} }
int efx_init_channels(struct efx_nic *efx) int efx_siena_init_channels(struct efx_nic *efx)
{ {
unsigned int i; unsigned int i;
...@@ -576,7 +577,7 @@ int efx_init_channels(struct efx_nic *efx) ...@@ -576,7 +577,7 @@ int efx_init_channels(struct efx_nic *efx)
/* Higher numbered interrupt modes are less capable! */ /* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = min(efx->type->min_interrupt_mode, efx->interrupt_mode = min(efx->type->min_interrupt_mode,
efx_interrupt_mode); efx_siena_interrupt_mode);
efx->max_channels = EFX_MAX_CHANNELS; efx->max_channels = EFX_MAX_CHANNELS;
efx->max_tx_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS;
...@@ -584,7 +585,7 @@ int efx_init_channels(struct efx_nic *efx) ...@@ -584,7 +585,7 @@ int efx_init_channels(struct efx_nic *efx)
return 0; return 0;
} }
void efx_fini_channels(struct efx_nic *efx) void efx_siena_fini_channels(struct efx_nic *efx)
{ {
unsigned int i; unsigned int i;
...@@ -672,7 +673,7 @@ static int efx_probe_channel(struct efx_channel *channel) ...@@ -672,7 +673,7 @@ static int efx_probe_channel(struct efx_channel *channel)
return 0; return 0;
fail: fail:
efx_remove_channel(channel); efx_siena_remove_channel(channel);
return rc; return rc;
} }
...@@ -700,7 +701,7 @@ static void efx_get_channel_name(struct efx_channel *channel, char *buf, ...@@ -700,7 +701,7 @@ static void efx_get_channel_name(struct efx_channel *channel, char *buf,
snprintf(buf, len, "%s%s-%d", efx->name, type, number); snprintf(buf, len, "%s%s-%d", efx->name, type, number);
} }
void efx_set_channel_names(struct efx_nic *efx) void efx_siena_set_channel_names(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -710,7 +711,7 @@ void efx_set_channel_names(struct efx_nic *efx) ...@@ -710,7 +711,7 @@ void efx_set_channel_names(struct efx_nic *efx)
sizeof(efx->msi_context[0].name)); sizeof(efx->msi_context[0].name));
} }
int efx_probe_channels(struct efx_nic *efx) int efx_siena_probe_channels(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
int rc; int rc;
...@@ -732,16 +733,16 @@ int efx_probe_channels(struct efx_nic *efx) ...@@ -732,16 +733,16 @@ int efx_probe_channels(struct efx_nic *efx)
goto fail; goto fail;
} }
} }
efx_set_channel_names(efx); efx_siena_set_channel_names(efx);
return 0; return 0;
fail: fail:
efx_remove_channels(efx); efx_siena_remove_channels(efx);
return rc; return rc;
} }
void efx_remove_channel(struct efx_channel *channel) void efx_siena_remove_channel(struct efx_channel *channel)
{ {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
...@@ -757,12 +758,12 @@ void efx_remove_channel(struct efx_channel *channel) ...@@ -757,12 +758,12 @@ void efx_remove_channel(struct efx_channel *channel)
channel->type->post_remove(channel); channel->type->post_remove(channel);
} }
void efx_remove_channels(struct efx_nic *efx) void efx_siena_remove_channels(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
efx_for_each_channel(channel, efx) efx_for_each_channel(channel, efx)
efx_remove_channel(channel); efx_siena_remove_channel(channel);
kfree(efx->xdp_tx_queues); kfree(efx->xdp_tx_queues);
} }
...@@ -846,7 +847,13 @@ static void efx_set_xdp_channels(struct efx_nic *efx) ...@@ -846,7 +847,13 @@ static void efx_set_xdp_channels(struct efx_nic *efx)
} }
} }
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) static int efx_soft_enable_interrupts(struct efx_nic *efx);
static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_init_napi_channel(struct efx_channel *channel);
static void efx_fini_napi_channel(struct efx_channel *channel);
int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
u32 txq_entries)
{ {
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
unsigned int i, next_buffer_table = 0; unsigned int i, next_buffer_table = 0;
...@@ -880,7 +887,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -880,7 +887,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
} }
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_siena_stop_all(efx);
efx_soft_disable_interrupts(efx); efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */ /* Clone channels (where possible) */
...@@ -924,7 +931,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -924,7 +931,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
channel = other_channel[i]; channel = other_channel[i];
if (channel && channel->type->copy) { if (channel && channel->type->copy) {
efx_fini_napi_channel(channel); efx_fini_napi_channel(channel);
efx_remove_channel(channel); efx_siena_remove_channel(channel);
kfree(channel); kfree(channel);
} }
} }
...@@ -934,9 +941,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -934,9 +941,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
rc = rc ? rc : rc2; rc = rc ? rc : rc2;
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"unable to restart interrupts on channel reallocation\n"); "unable to restart interrupts on channel reallocation\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE); efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
} else { } else {
efx_start_all(efx); efx_siena_start_all(efx);
efx_device_attach_if_not_resetting(efx); efx_device_attach_if_not_resetting(efx);
} }
return rc; return rc;
...@@ -950,7 +957,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -950,7 +957,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
goto out; goto out;
} }
int efx_set_channels(struct efx_nic *efx) int efx_siena_set_channels(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
int rc; int rc;
...@@ -995,7 +1002,7 @@ static bool efx_default_channel_want_txqs(struct efx_channel *channel) ...@@ -995,7 +1002,7 @@ static bool efx_default_channel_want_txqs(struct efx_channel *channel)
* START/STOP * START/STOP
*************/ *************/
int efx_soft_enable_interrupts(struct efx_nic *efx) static int efx_soft_enable_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel, *end_channel; struct efx_channel *channel, *end_channel;
int rc; int rc;
...@@ -1011,7 +1018,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx) ...@@ -1011,7 +1018,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx)
if (rc) if (rc)
goto fail; goto fail;
} }
efx_start_eventq(channel); efx_siena_start_eventq(channel);
} }
efx_mcdi_mode_event(efx); efx_mcdi_mode_event(efx);
...@@ -1022,7 +1029,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx) ...@@ -1022,7 +1029,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (channel == end_channel) if (channel == end_channel)
break; break;
efx_stop_eventq(channel); efx_siena_stop_eventq(channel);
if (!channel->type->keep_eventq) if (!channel->type->keep_eventq)
efx_fini_eventq(channel); efx_fini_eventq(channel);
} }
...@@ -1030,7 +1037,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx) ...@@ -1030,7 +1037,7 @@ int efx_soft_enable_interrupts(struct efx_nic *efx)
return rc; return rc;
} }
void efx_soft_disable_interrupts(struct efx_nic *efx) static void efx_soft_disable_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -1049,7 +1056,7 @@ void efx_soft_disable_interrupts(struct efx_nic *efx) ...@@ -1049,7 +1056,7 @@ void efx_soft_disable_interrupts(struct efx_nic *efx)
if (channel->irq) if (channel->irq)
synchronize_irq(channel->irq); synchronize_irq(channel->irq);
efx_stop_eventq(channel); efx_siena_stop_eventq(channel);
if (!channel->type->keep_eventq) if (!channel->type->keep_eventq)
efx_fini_eventq(channel); efx_fini_eventq(channel);
} }
...@@ -1058,7 +1065,7 @@ void efx_soft_disable_interrupts(struct efx_nic *efx) ...@@ -1058,7 +1065,7 @@ void efx_soft_disable_interrupts(struct efx_nic *efx)
efx_mcdi_flush_async(efx); efx_mcdi_flush_async(efx);
} }
int efx_enable_interrupts(struct efx_nic *efx) int efx_siena_enable_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel, *end_channel; struct efx_channel *channel, *end_channel;
int rc; int rc;
...@@ -1101,7 +1108,7 @@ int efx_enable_interrupts(struct efx_nic *efx) ...@@ -1101,7 +1108,7 @@ int efx_enable_interrupts(struct efx_nic *efx)
return rc; return rc;
} }
void efx_disable_interrupts(struct efx_nic *efx) void efx_siena_disable_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -1115,7 +1122,7 @@ void efx_disable_interrupts(struct efx_nic *efx) ...@@ -1115,7 +1122,7 @@ void efx_disable_interrupts(struct efx_nic *efx)
efx->type->irq_disable_non_ev(efx); efx->type->irq_disable_non_ev(efx);
} }
void efx_start_channels(struct efx_nic *efx) void efx_siena_start_channels(struct efx_nic *efx)
{ {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
...@@ -1130,16 +1137,16 @@ void efx_start_channels(struct efx_nic *efx) ...@@ -1130,16 +1137,16 @@ void efx_start_channels(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) { efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue); efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues); atomic_inc(&efx->active_queues);
efx_stop_eventq(channel); efx_siena_stop_eventq(channel);
efx_fast_push_rx_descriptors(rx_queue, false); efx_fast_push_rx_descriptors(rx_queue, false);
efx_start_eventq(channel); efx_siena_start_eventq(channel);
} }
WARN_ON(channel->rx_pkt_n_frags); WARN_ON(channel->rx_pkt_n_frags);
} }
} }
void efx_stop_channels(struct efx_nic *efx) void efx_siena_stop_channels(struct efx_nic *efx)
{ {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
...@@ -1160,8 +1167,8 @@ void efx_stop_channels(struct efx_nic *efx) ...@@ -1160,8 +1167,8 @@ void efx_stop_channels(struct efx_nic *efx)
* temporarily. * temporarily.
*/ */
if (efx_channel_has_rx_queue(channel)) { if (efx_channel_has_rx_queue(channel)) {
efx_stop_eventq(channel); efx_siena_stop_eventq(channel);
efx_start_eventq(channel); efx_siena_start_eventq(channel);
} }
} }
...@@ -1311,7 +1318,7 @@ static int efx_poll(struct napi_struct *napi, int budget) ...@@ -1311,7 +1318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
return spent; return spent;
} }
void efx_init_napi_channel(struct efx_channel *channel) static void efx_init_napi_channel(struct efx_channel *channel)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
...@@ -1320,7 +1327,7 @@ void efx_init_napi_channel(struct efx_channel *channel) ...@@ -1320,7 +1327,7 @@ void efx_init_napi_channel(struct efx_channel *channel)
napi_weight); napi_weight);
} }
void efx_init_napi(struct efx_nic *efx) void efx_siena_init_napi(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -1328,7 +1335,7 @@ void efx_init_napi(struct efx_nic *efx) ...@@ -1328,7 +1335,7 @@ void efx_init_napi(struct efx_nic *efx)
efx_init_napi_channel(channel); efx_init_napi_channel(channel);
} }
void efx_fini_napi_channel(struct efx_channel *channel) static void efx_fini_napi_channel(struct efx_channel *channel)
{ {
if (channel->napi_dev) if (channel->napi_dev)
netif_napi_del(&channel->napi_str); netif_napi_del(&channel->napi_str);
...@@ -1336,7 +1343,7 @@ void efx_fini_napi_channel(struct efx_channel *channel) ...@@ -1336,7 +1343,7 @@ void efx_fini_napi_channel(struct efx_channel *channel)
channel->napi_dev = NULL; channel->napi_dev = NULL;
} }
void efx_fini_napi(struct efx_nic *efx) void efx_siena_fini_napi(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -1353,13 +1360,13 @@ static int efx_channel_dummy_op_int(struct efx_channel *channel) ...@@ -1353,13 +1360,13 @@ static int efx_channel_dummy_op_int(struct efx_channel *channel)
return 0; return 0;
} }
void efx_channel_dummy_op_void(struct efx_channel *channel) void efx_siena_channel_dummy_op_void(struct efx_channel *channel)
{ {
} }
static const struct efx_channel_type efx_default_channel_type = { static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int, .pre_probe = efx_channel_dummy_op_int,
.post_remove = efx_channel_dummy_op_void, .post_remove = efx_siena_channel_dummy_op_void,
.get_name = efx_get_channel_name, .get_name = efx_get_channel_name,
.copy = efx_copy_channel, .copy = efx_copy_channel,
.want_txqs = efx_default_channel_want_txqs, .want_txqs = efx_default_channel_want_txqs,
......
...@@ -11,42 +11,35 @@ ...@@ -11,42 +11,35 @@
#ifndef EFX_CHANNELS_H #ifndef EFX_CHANNELS_H
#define EFX_CHANNELS_H #define EFX_CHANNELS_H
extern unsigned int efx_interrupt_mode; extern unsigned int efx_siena_interrupt_mode;
extern unsigned int rss_cpus; extern unsigned int efx_siena_rss_cpus;
int efx_probe_interrupts(struct efx_nic *efx); int efx_siena_probe_interrupts(struct efx_nic *efx);
void efx_remove_interrupts(struct efx_nic *efx); void efx_siena_remove_interrupts(struct efx_nic *efx);
int efx_soft_enable_interrupts(struct efx_nic *efx); int efx_siena_enable_interrupts(struct efx_nic *efx);
void efx_soft_disable_interrupts(struct efx_nic *efx); void efx_siena_disable_interrupts(struct efx_nic *efx);
int efx_enable_interrupts(struct efx_nic *efx);
void efx_disable_interrupts(struct efx_nic *efx); void efx_siena_set_interrupt_affinity(struct efx_nic *efx);
void efx_siena_clear_interrupt_affinity(struct efx_nic *efx);
void efx_set_interrupt_affinity(struct efx_nic *efx);
void efx_clear_interrupt_affinity(struct efx_nic *efx); void efx_siena_start_eventq(struct efx_channel *channel);
void efx_siena_stop_eventq(struct efx_channel *channel);
int efx_probe_eventq(struct efx_channel *channel);
int efx_init_eventq(struct efx_channel *channel); int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
void efx_start_eventq(struct efx_channel *channel); u32 txq_entries);
void efx_stop_eventq(struct efx_channel *channel); void efx_siena_set_channel_names(struct efx_nic *efx);
void efx_fini_eventq(struct efx_channel *channel); int efx_siena_init_channels(struct efx_nic *efx);
void efx_remove_eventq(struct efx_channel *channel); int efx_siena_probe_channels(struct efx_nic *efx);
int efx_siena_set_channels(struct efx_nic *efx);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); void efx_siena_remove_channel(struct efx_channel *channel);
void efx_set_channel_names(struct efx_nic *efx); void efx_siena_remove_channels(struct efx_nic *efx);
int efx_init_channels(struct efx_nic *efx); void efx_siena_fini_channels(struct efx_nic *efx);
int efx_probe_channels(struct efx_nic *efx); void efx_siena_start_channels(struct efx_nic *efx);
int efx_set_channels(struct efx_nic *efx); void efx_siena_stop_channels(struct efx_nic *efx);
void efx_remove_channel(struct efx_channel *channel);
void efx_remove_channels(struct efx_nic *efx); void efx_siena_init_napi(struct efx_nic *efx);
void efx_fini_channels(struct efx_nic *efx); void efx_siena_fini_napi(struct efx_nic *efx);
void efx_start_channels(struct efx_nic *efx);
void efx_stop_channels(struct efx_nic *efx); void efx_siena_channel_dummy_op_void(struct efx_channel *channel);
void efx_init_napi_channel(struct efx_channel *channel);
void efx_init_napi(struct efx_nic *efx);
void efx_fini_napi_channel(struct efx_channel *channel);
void efx_fini_napi(struct efx_nic *efx);
void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif #endif
...@@ -110,7 +110,7 @@ const char *const efx_loopback_mode_names[] = { ...@@ -110,7 +110,7 @@ const char *const efx_loopback_mode_names[] = {
*/ */
static struct workqueue_struct *reset_workqueue; static struct workqueue_struct *reset_workqueue;
int efx_create_reset_workqueue(void) int efx_siena_create_reset_workqueue(void)
{ {
reset_workqueue = create_singlethread_workqueue("sfc_reset"); reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) { if (!reset_workqueue) {
...@@ -121,17 +121,17 @@ int efx_create_reset_workqueue(void) ...@@ -121,17 +121,17 @@ int efx_create_reset_workqueue(void)
return 0; return 0;
} }
void efx_queue_reset_work(struct efx_nic *efx) void efx_siena_queue_reset_work(struct efx_nic *efx)
{ {
queue_work(reset_workqueue, &efx->reset_work); queue_work(reset_workqueue, &efx->reset_work);
} }
void efx_flush_reset_workqueue(struct efx_nic *efx) void efx_siena_flush_reset_workqueue(struct efx_nic *efx)
{ {
cancel_work_sync(&efx->reset_work); cancel_work_sync(&efx->reset_work);
} }
void efx_destroy_reset_workqueue(void) void efx_siena_destroy_reset_workqueue(void)
{ {
if (reset_workqueue) { if (reset_workqueue) {
destroy_workqueue(reset_workqueue); destroy_workqueue(reset_workqueue);
...@@ -142,7 +142,7 @@ void efx_destroy_reset_workqueue(void) ...@@ -142,7 +142,7 @@ void efx_destroy_reset_workqueue(void)
/* We assume that efx->type->reconfigure_mac will always try to sync RX /* We assume that efx->type->reconfigure_mac will always try to sync RX
* filters and therefore needs to read-lock the filter table against freeing * filters and therefore needs to read-lock the filter table against freeing
*/ */
void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only) void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
{ {
if (efx->type->reconfigure_mac) { if (efx->type->reconfigure_mac) {
down_read(&efx->filter_sem); down_read(&efx->filter_sem);
...@@ -161,11 +161,11 @@ static void efx_mac_work(struct work_struct *data) ...@@ -161,11 +161,11 @@ static void efx_mac_work(struct work_struct *data)
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
if (efx->port_enabled) if (efx->port_enabled)
efx_mac_reconfigure(efx, false); efx_siena_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} }
int efx_set_mac_address(struct net_device *net_dev, void *data) int efx_siena_set_mac_address(struct net_device *net_dev, void *data)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data; struct sockaddr *addr = data;
...@@ -193,14 +193,14 @@ int efx_set_mac_address(struct net_device *net_dev, void *data) ...@@ -193,14 +193,14 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* Reconfigure the MAC */ /* Reconfigure the MAC */
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx_mac_reconfigure(efx, false); efx_siena_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
return 0; return 0;
} }
/* Context: netif_addr_lock held, BHs disabled. */ /* Context: netif_addr_lock held, BHs disabled. */
void efx_set_rx_mode(struct net_device *net_dev) void efx_siena_set_rx_mode(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
...@@ -209,7 +209,7 @@ void efx_set_rx_mode(struct net_device *net_dev) ...@@ -209,7 +209,7 @@ void efx_set_rx_mode(struct net_device *net_dev)
/* Otherwise efx_start_port() will do this */ /* Otherwise efx_start_port() will do this */
} }
int efx_set_features(struct net_device *net_dev, netdev_features_t data) int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
int rc; int rc;
...@@ -226,10 +226,10 @@ int efx_set_features(struct net_device *net_dev, netdev_features_t data) ...@@ -226,10 +226,10 @@ int efx_set_features(struct net_device *net_dev, netdev_features_t data)
*/ */
if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_RXFCS)) { NETIF_F_RXFCS)) {
/* efx_set_rx_mode() will schedule MAC work to update filters /* efx_siena_set_rx_mode() will schedule MAC work to update filters
* when a new features are finally set in net_dev. * when a new features are finally set in net_dev.
*/ */
efx_set_rx_mode(net_dev); efx_siena_set_rx_mode(net_dev);
} }
return 0; return 0;
...@@ -239,7 +239,7 @@ int efx_set_features(struct net_device *net_dev, netdev_features_t data) ...@@ -239,7 +239,7 @@ int efx_set_features(struct net_device *net_dev, netdev_features_t data)
* netif_carrier_on/off) of the link status, and also maintains the * netif_carrier_on/off) of the link status, and also maintains the
* link status's stop on the port's TX queue. * link status's stop on the port's TX queue.
*/ */
void efx_link_status_changed(struct efx_nic *efx) void efx_siena_link_status_changed(struct efx_nic *efx)
{ {
struct efx_link_state *link_state = &efx->link_state; struct efx_link_state *link_state = &efx->link_state;
...@@ -270,7 +270,7 @@ void efx_link_status_changed(struct efx_nic *efx) ...@@ -270,7 +270,7 @@ void efx_link_status_changed(struct efx_nic *efx)
netif_info(efx, link, efx->net_dev, "link down\n"); netif_info(efx, link, efx->net_dev, "link down\n");
} }
unsigned int efx_xdp_max_mtu(struct efx_nic *efx) unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx)
{ {
/* The maximum MTU that we can fit in a single page, allowing for /* The maximum MTU that we can fit in a single page, allowing for
* framing, overhead and XDP headroom + tailroom. * framing, overhead and XDP headroom + tailroom.
...@@ -283,7 +283,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx) ...@@ -283,7 +283,7 @@ unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
} }
/* Context: process, rtnl_lock() held. */ /* Context: process, rtnl_lock() held. */
int efx_change_mtu(struct net_device *net_dev, int new_mtu) int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
int rc; int rc;
...@@ -293,24 +293,24 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu) ...@@ -293,24 +293,24 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu)
return rc; return rc;
if (rtnl_dereference(efx->xdp_prog) && if (rtnl_dereference(efx->xdp_prog) &&
new_mtu > efx_xdp_max_mtu(efx)) { new_mtu > efx_siena_xdp_max_mtu(efx)) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"Requested MTU of %d too big for XDP (max: %d)\n", "Requested MTU of %d too big for XDP (max: %d)\n",
new_mtu, efx_xdp_max_mtu(efx)); new_mtu, efx_siena_xdp_max_mtu(efx));
return -EINVAL; return -EINVAL;
} }
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_siena_stop_all(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu; net_dev->mtu = new_mtu;
efx_mac_reconfigure(efx, true); efx_siena_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_start_all(efx); efx_siena_start_all(efx);
efx_device_attach_if_not_resetting(efx); efx_device_attach_if_not_resetting(efx);
return 0; return 0;
} }
...@@ -342,10 +342,10 @@ static void efx_monitor(struct work_struct *data) ...@@ -342,10 +342,10 @@ static void efx_monitor(struct work_struct *data)
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} }
efx_start_monitor(efx); efx_siena_start_monitor(efx);
} }
void efx_start_monitor(struct efx_nic *efx) void efx_siena_start_monitor(struct efx_nic *efx)
{ {
if (efx->type->monitor) if (efx->type->monitor)
queue_delayed_work(efx->workqueue, &efx->monitor_work, queue_delayed_work(efx->workqueue, &efx->monitor_work,
...@@ -432,7 +432,7 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -432,7 +432,7 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->txq_wake_thresh = efx->txq_stop_thresh / 2; efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */ /* Initialise the channels */
efx_start_channels(efx); efx_siena_start_channels(efx);
efx_ptp_start_datapath(efx); efx_ptp_start_datapath(efx);
...@@ -447,7 +447,7 @@ static void efx_stop_datapath(struct efx_nic *efx) ...@@ -447,7 +447,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_ptp_stop_datapath(efx); efx_ptp_stop_datapath(efx);
efx_stop_channels(efx); efx_siena_stop_channels(efx);
} }
/************************************************************************** /**************************************************************************
...@@ -459,13 +459,13 @@ static void efx_stop_datapath(struct efx_nic *efx) ...@@ -459,13 +459,13 @@ static void efx_stop_datapath(struct efx_nic *efx)
/* Equivalent to efx_link_set_advertising with all-zeroes, except does not /* Equivalent to efx_link_set_advertising with all-zeroes, except does not
* force the Autoneg bit on. * force the Autoneg bit on.
*/ */
void efx_link_clear_advertising(struct efx_nic *efx) void efx_siena_link_clear_advertising(struct efx_nic *efx)
{ {
bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
} }
void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{ {
efx->wanted_fc = wanted_fc; efx->wanted_fc = wanted_fc;
if (efx->link_advertising[0]) { if (efx->link_advertising[0]) {
...@@ -489,7 +489,7 @@ static void efx_start_port(struct efx_nic *efx) ...@@ -489,7 +489,7 @@ static void efx_start_port(struct efx_nic *efx)
efx->port_enabled = true; efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */ /* Ensure MAC ingress/egress is enabled */
efx_mac_reconfigure(efx, false); efx_siena_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} }
...@@ -525,7 +525,7 @@ static void efx_stop_port(struct efx_nic *efx) ...@@ -525,7 +525,7 @@ static void efx_stop_port(struct efx_nic *efx)
* is safe to call multiple times, so long as the NIC is not disabled. * is safe to call multiple times, so long as the NIC is not disabled.
* Requires the RTNL lock. * Requires the RTNL lock.
*/ */
void efx_start_all(struct efx_nic *efx) void efx_siena_start_all(struct efx_nic *efx)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->state == STATE_DISABLED); BUG_ON(efx->state == STATE_DISABLED);
...@@ -541,14 +541,14 @@ void efx_start_all(struct efx_nic *efx) ...@@ -541,14 +541,14 @@ void efx_start_all(struct efx_nic *efx)
efx_start_datapath(efx); efx_start_datapath(efx);
/* Start the hardware monitor if there is one */ /* Start the hardware monitor if there is one */
efx_start_monitor(efx); efx_siena_start_monitor(efx);
/* Link state detection is normally event-driven; we have /* Link state detection is normally event-driven; we have
* to poll now because we could have missed a change * to poll now because we could have missed a change
*/ */
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
if (efx_mcdi_phy_poll(efx)) if (efx_mcdi_phy_poll(efx))
efx_link_status_changed(efx); efx_siena_link_status_changed(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
if (efx->type->start_stats) { if (efx->type->start_stats) {
...@@ -565,7 +565,7 @@ void efx_start_all(struct efx_nic *efx) ...@@ -565,7 +565,7 @@ void efx_start_all(struct efx_nic *efx)
* times with the NIC in almost any state, but interrupts should be * times with the NIC in almost any state, but interrupts should be
* enabled. Requires the RTNL lock. * enabled. Requires the RTNL lock.
*/ */
void efx_stop_all(struct efx_nic *efx) void efx_siena_stop_all(struct efx_nic *efx)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
...@@ -598,7 +598,8 @@ void efx_stop_all(struct efx_nic *efx) ...@@ -598,7 +598,8 @@ void efx_stop_all(struct efx_nic *efx)
} }
/* Context: process, dev_base_lock or RTNL held, non-blocking. */ /* Context: process, dev_base_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
...@@ -614,7 +615,7 @@ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) ...@@ -614,7 +615,7 @@ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
* *
* Callers must hold the mac_lock * Callers must hold the mac_lock
*/ */
int __efx_reconfigure_port(struct efx_nic *efx) int __efx_siena_reconfigure_port(struct efx_nic *efx)
{ {
enum efx_phy_mode phy_mode; enum efx_phy_mode phy_mode;
int rc = 0; int rc = 0;
...@@ -640,14 +641,14 @@ int __efx_reconfigure_port(struct efx_nic *efx) ...@@ -640,14 +641,14 @@ int __efx_reconfigure_port(struct efx_nic *efx)
/* Reinitialise the MAC to pick up new PHY settings, even if the port is /* Reinitialise the MAC to pick up new PHY settings, even if the port is
* disabled. * disabled.
*/ */
int efx_reconfigure_port(struct efx_nic *efx) int efx_siena_reconfigure_port(struct efx_nic *efx)
{ {
int rc; int rc;
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
rc = __efx_reconfigure_port(efx); rc = __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
return rc; return rc;
...@@ -682,7 +683,7 @@ static void efx_wait_for_bist_end(struct efx_nic *efx) ...@@ -682,7 +683,7 @@ static void efx_wait_for_bist_end(struct efx_nic *efx)
* Returns 0 if the recovery mechanisms are unsuccessful. * Returns 0 if the recovery mechanisms are unsuccessful.
* Returns a non-zero value otherwise. * Returns a non-zero value otherwise.
*/ */
int efx_try_recovery(struct efx_nic *efx) int efx_siena_try_recovery(struct efx_nic *efx)
{ {
#ifdef CONFIG_EEH #ifdef CONFIG_EEH
/* A PCI error can occur and not be seen by EEH because nothing /* A PCI error can occur and not be seen by EEH because nothing
...@@ -704,15 +705,15 @@ int efx_try_recovery(struct efx_nic *efx) ...@@ -704,15 +705,15 @@ int efx_try_recovery(struct efx_nic *efx)
/* Tears down the entire software state and most of the hardware state /* Tears down the entire software state and most of the hardware state
* before reset. * before reset.
*/ */
void efx_reset_down(struct efx_nic *efx, enum reset_type method) void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
if (method == RESET_TYPE_MCDI_TIMEOUT) if (method == RESET_TYPE_MCDI_TIMEOUT)
efx->type->prepare_flr(efx); efx->type->prepare_flr(efx);
efx_stop_all(efx); efx_siena_stop_all(efx);
efx_disable_interrupts(efx); efx_siena_disable_interrupts(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem); down_write(&efx->filter_sem);
...@@ -721,7 +722,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) ...@@ -721,7 +722,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
} }
/* Context: netif_tx_lock held, BHs disabled. */ /* Context: netif_tx_lock held, BHs disabled. */
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
...@@ -729,16 +730,16 @@ void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) ...@@ -729,16 +730,16 @@ void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
"TX stuck with port_enabled=%d: resetting channels\n", "TX stuck with port_enabled=%d: resetting channels\n",
efx->port_enabled); efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); efx_siena_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
} }
/* This function will always ensure that the locks acquired in /* This function will always ensure that the locks acquired in
* efx_reset_down() are released. A failure return code indicates * efx_siena_reset_down() are released. A failure return code indicates
* that we were unable to reinitialise the hardware, and the * that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx * driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. * engines are not restarted, pending a RESET_DISABLE.
*/ */
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
{ {
int rc; int rc;
...@@ -765,7 +766,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) ...@@ -765,7 +766,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n"); "could not restore PHY settings\n");
} }
rc = efx_enable_interrupts(efx); rc = efx_siena_enable_interrupts(efx);
if (rc) if (rc)
goto fail; goto fail;
...@@ -787,7 +788,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) ...@@ -787,7 +788,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_start_all(efx); efx_siena_start_all(efx);
if (efx->type->udp_tnl_push_ports) if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx); efx->type->udp_tnl_push_ports(efx);
...@@ -809,7 +810,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) ...@@ -809,7 +810,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
* *
* Caller must hold the rtnl_lock. * Caller must hold the rtnl_lock.
*/ */
int efx_reset(struct efx_nic *efx, enum reset_type method) int efx_siena_reset(struct efx_nic *efx, enum reset_type method)
{ {
int rc, rc2 = 0; int rc, rc2 = 0;
bool disabled; bool disabled;
...@@ -818,11 +819,11 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) ...@@ -818,11 +819,11 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
RESET_TYPE(method)); RESET_TYPE(method));
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
/* efx_reset_down() grabs locks that prevent recovery on EF100. /* efx_siena_reset_down() grabs locks that prevent recovery on EF100.
* EF100 reset is handled in the efx_nic_type callback below. * EF100 reset is handled in the efx_nic_type callback below.
*/ */
if (efx_nic_rev(efx) != EFX_REV_EF100) if (efx_nic_rev(efx) != EFX_REV_EF100)
efx_reset_down(efx, method); efx_siena_reset_down(efx, method);
rc = efx->type->reset(efx, method); rc = efx->type->reset(efx, method);
if (rc) { if (rc) {
...@@ -851,7 +852,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) ...@@ -851,7 +852,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
method == RESET_TYPE_DISABLE || method == RESET_TYPE_DISABLE ||
method == RESET_TYPE_RECOVER_OR_DISABLE; method == RESET_TYPE_RECOVER_OR_DISABLE;
if (efx_nic_rev(efx) != EFX_REV_EF100) if (efx_nic_rev(efx) != EFX_REV_EF100)
rc2 = efx_reset_up(efx, method, !disabled); rc2 = efx_siena_reset_up(efx, method, !disabled);
if (rc2) { if (rc2) {
disabled = true; disabled = true;
if (!rc) if (!rc)
...@@ -886,7 +887,7 @@ static void efx_reset_work(struct work_struct *data) ...@@ -886,7 +887,7 @@ static void efx_reset_work(struct work_struct *data)
if ((method == RESET_TYPE_RECOVER_OR_DISABLE || if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) && method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx)) efx_siena_try_recovery(efx))
return; return;
if (!pending) if (!pending)
...@@ -894,17 +895,17 @@ static void efx_reset_work(struct work_struct *data) ...@@ -894,17 +895,17 @@ static void efx_reset_work(struct work_struct *data)
rtnl_lock(); rtnl_lock();
/* We checked the state in efx_schedule_reset() but it may /* We checked the state in efx_siena_schedule_reset() but it may
* have changed by now. Now that we have the RTNL lock, * have changed by now. Now that we have the RTNL lock,
* it cannot change again. * it cannot change again.
*/ */
if (efx->state == STATE_READY) if (efx->state == STATE_READY)
(void)efx_reset(efx, method); (void)efx_siena_reset(efx, method);
rtnl_unlock(); rtnl_unlock();
} }
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type)
{ {
enum reset_type method; enum reset_type method;
...@@ -951,7 +952,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) ...@@ -951,7 +952,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
*/ */
efx_mcdi_mode_poll(efx); efx_mcdi_mode_poll(efx);
efx_queue_reset_work(efx); efx_siena_queue_reset_work(efx);
} }
/************************************************************************** /**************************************************************************
...@@ -963,11 +964,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) ...@@ -963,11 +964,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
* before use * before use
* *
**************************************************************************/ **************************************************************************/
int efx_port_dummy_op_int(struct efx_nic *efx) int efx_siena_port_dummy_op_int(struct efx_nic *efx)
{ {
return 0; return 0;
} }
void efx_port_dummy_op_void(struct efx_nic *efx) {}
void efx_siena_port_dummy_op_void(struct efx_nic *efx) {}
/************************************************************************** /**************************************************************************
* *
...@@ -978,7 +980,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {} ...@@ -978,7 +980,7 @@ void efx_port_dummy_op_void(struct efx_nic *efx) {}
/* This zeroes out and then fills in the invariants in a struct /* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures). * efx_nic (including all sub-structures).
*/ */
int efx_init_struct(struct efx_nic *efx, int efx_siena_init_struct(struct efx_nic *efx,
struct pci_dev *pci_dev, struct net_device *net_dev) struct pci_dev *pci_dev, struct net_device *net_dev)
{ {
int rc = -ENOMEM; int rc = -ENOMEM;
...@@ -1033,7 +1035,7 @@ int efx_init_struct(struct efx_nic *efx, ...@@ -1033,7 +1035,7 @@ int efx_init_struct(struct efx_nic *efx,
efx->mem_bar = UINT_MAX; efx->mem_bar = UINT_MAX;
rc = efx_init_channels(efx); rc = efx_siena_init_channels(efx);
if (rc) if (rc)
goto fail; goto fail;
...@@ -1049,17 +1051,17 @@ int efx_init_struct(struct efx_nic *efx, ...@@ -1049,17 +1051,17 @@ int efx_init_struct(struct efx_nic *efx,
return 0; return 0;
fail: fail:
efx_fini_struct(efx); efx_siena_fini_struct(efx);
return rc; return rc;
} }
void efx_fini_struct(struct efx_nic *efx) void efx_siena_fini_struct(struct efx_nic *efx)
{ {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_hash_table); kfree(efx->rps_hash_table);
#endif #endif
efx_fini_channels(efx); efx_siena_fini_channels(efx);
kfree(efx->vpd_sn); kfree(efx->vpd_sn);
...@@ -1070,7 +1072,7 @@ void efx_fini_struct(struct efx_nic *efx) ...@@ -1070,7 +1072,7 @@ void efx_fini_struct(struct efx_nic *efx)
} }
/* This configures the PCI device to enable I/O and DMA. */ /* This configures the PCI device to enable I/O and DMA. */
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size) unsigned int mem_map_size)
{ {
struct pci_dev *pci_dev = efx->pci_dev; struct pci_dev *pci_dev = efx->pci_dev;
...@@ -1140,7 +1142,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, ...@@ -1140,7 +1142,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
return rc; return rc;
} }
void efx_fini_io(struct efx_nic *efx) void efx_siena_fini_io(struct efx_nic *efx)
{ {
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
...@@ -1185,7 +1187,7 @@ static ssize_t mcdi_logging_store(struct device *dev, ...@@ -1185,7 +1187,7 @@ static ssize_t mcdi_logging_store(struct device *dev,
static DEVICE_ATTR_RW(mcdi_logging); static DEVICE_ATTR_RW(mcdi_logging);
void efx_init_mcdi_logging(struct efx_nic *efx) void efx_siena_init_mcdi_logging(struct efx_nic *efx)
{ {
int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
...@@ -1195,7 +1197,7 @@ void efx_init_mcdi_logging(struct efx_nic *efx) ...@@ -1195,7 +1197,7 @@ void efx_init_mcdi_logging(struct efx_nic *efx)
} }
} }
void efx_fini_mcdi_logging(struct efx_nic *efx) void efx_siena_fini_mcdi_logging(struct efx_nic *efx)
{ {
device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
} }
...@@ -1222,8 +1224,8 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, ...@@ -1222,8 +1224,8 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_siena_stop_all(efx);
efx_disable_interrupts(efx); efx_siena_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET; status = PCI_ERS_RESULT_NEED_RESET;
} else { } else {
...@@ -1266,10 +1268,10 @@ static void efx_io_resume(struct pci_dev *pdev) ...@@ -1266,10 +1268,10 @@ static void efx_io_resume(struct pci_dev *pdev)
if (efx->state == STATE_DISABLED) if (efx->state == STATE_DISABLED)
goto out; goto out;
rc = efx_reset(efx, RESET_TYPE_ALL); rc = efx_siena_reset(efx, RESET_TYPE_ALL);
if (rc) { if (rc) {
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc); "efx_siena_reset failed after PCI error (%d)\n", rc);
} else { } else {
efx->state = STATE_READY; efx->state = STATE_READY;
netif_dbg(efx, hw, efx->net_dev, netif_dbg(efx, hw, efx->net_dev,
...@@ -1286,7 +1288,7 @@ static void efx_io_resume(struct pci_dev *pdev) ...@@ -1286,7 +1288,7 @@ static void efx_io_resume(struct pci_dev *pdev)
* with our request for slot reset the mmio_enabled callback will never be * with our request for slot reset the mmio_enabled callback will never be
* called, and the link_reset callback is not used by AER or EEH mechanisms. * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/ */
const struct pci_error_handlers efx_err_handlers = { const struct pci_error_handlers efx_siena_err_handlers = {
.error_detected = efx_io_error_detected, .error_detected = efx_io_error_detected,
.slot_reset = efx_io_slot_reset, .slot_reset = efx_io_slot_reset,
.resume = efx_io_resume, .resume = efx_io_resume,
...@@ -1354,7 +1356,8 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb) ...@@ -1354,7 +1356,8 @@ static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
} }
} }
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t efx_siena_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features) netdev_features_t features)
{ {
struct efx_nic *efx = netdev_priv(dev); struct efx_nic *efx = netdev_priv(dev);
...@@ -1375,7 +1378,7 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev ...@@ -1375,7 +1378,7 @@ netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev
return features; return features;
} }
int efx_get_phys_port_id(struct net_device *net_dev, int efx_siena_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid) struct netdev_phys_item_id *ppid)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
...@@ -1386,7 +1389,8 @@ int efx_get_phys_port_id(struct net_device *net_dev, ...@@ -1386,7 +1389,8 @@ int efx_get_phys_port_id(struct net_device *net_dev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len) int efx_siena_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
......
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
#ifndef EFX_COMMON_H #ifndef EFX_COMMON_H
#define EFX_COMMON_H #define EFX_COMMON_H
int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
unsigned int mem_map_size); unsigned int mem_map_size);
void efx_fini_io(struct efx_nic *efx); void efx_siena_fini_io(struct efx_nic *efx);
int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
struct net_device *net_dev); struct net_device *net_dev);
void efx_fini_struct(struct efx_nic *efx); void efx_siena_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL #define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL #define EFX_DEFAULT_DMAQ_SIZE 1024UL
...@@ -25,23 +25,24 @@ void efx_fini_struct(struct efx_nic *efx); ...@@ -25,23 +25,24 @@ void efx_fini_struct(struct efx_nic *efx);
#define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL #define EFX_MIN_EVQ_SIZE 512UL
void efx_link_clear_advertising(struct efx_nic *efx); void efx_siena_link_clear_advertising(struct efx_nic *efx);
void efx_link_set_wanted_fc(struct efx_nic *efx, u8); void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc);
void efx_start_all(struct efx_nic *efx); void efx_siena_start_all(struct efx_nic *efx);
void efx_stop_all(struct efx_nic *efx); void efx_siena_stop_all(struct efx_nic *efx);
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats); void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats);
int efx_create_reset_workqueue(void); int efx_siena_create_reset_workqueue(void);
void efx_queue_reset_work(struct efx_nic *efx); void efx_siena_queue_reset_work(struct efx_nic *efx);
void efx_flush_reset_workqueue(struct efx_nic *efx); void efx_siena_flush_reset_workqueue(struct efx_nic *efx);
void efx_destroy_reset_workqueue(void); void efx_siena_destroy_reset_workqueue(void);
void efx_start_monitor(struct efx_nic *efx); void efx_siena_start_monitor(struct efx_nic *efx);
int __efx_reconfigure_port(struct efx_nic *efx); int __efx_siena_reconfigure_port(struct efx_nic *efx);
int efx_reconfigure_port(struct efx_nic *efx); int efx_siena_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \ #define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \ do { \
...@@ -51,16 +52,16 @@ int efx_reconfigure_port(struct efx_nic *efx); ...@@ -51,16 +52,16 @@ int efx_reconfigure_port(struct efx_nic *efx);
ASSERT_RTNL(); \ ASSERT_RTNL(); \
} while (0) } while (0)
int efx_try_recovery(struct efx_nic *efx); int efx_siena_try_recovery(struct efx_nic *efx);
void efx_reset_down(struct efx_nic *efx, enum reset_type method); void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method);
void efx_watchdog(struct net_device *net_dev, unsigned int txqueue); void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue);
int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
int efx_reset(struct efx_nic *efx, enum reset_type method); int efx_siena_reset(struct efx_nic *efx, enum reset_type method);
void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type);
/* Dummy PHY ops for PHY drivers */ /* Dummy PHY ops for PHY drivers */
int efx_port_dummy_op_int(struct efx_nic *efx); int efx_siena_port_dummy_op_int(struct efx_nic *efx);
void efx_port_dummy_op_void(struct efx_nic *efx); void efx_siena_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx) static inline int efx_check_disabled(struct efx_nic *efx)
{ {
...@@ -88,29 +89,30 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) ...@@ -88,29 +89,30 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
} }
#ifdef CONFIG_SFC_MCDI_LOGGING #ifdef CONFIG_SFC_MCDI_LOGGING
void efx_init_mcdi_logging(struct efx_nic *efx); void efx_siena_init_mcdi_logging(struct efx_nic *efx);
void efx_fini_mcdi_logging(struct efx_nic *efx); void efx_siena_fini_mcdi_logging(struct efx_nic *efx);
#else #else
static inline void efx_init_mcdi_logging(struct efx_nic *efx) {} static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {}
static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {}
#endif #endif
void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only); void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
int efx_set_mac_address(struct net_device *net_dev, void *data); int efx_siena_set_mac_address(struct net_device *net_dev, void *data);
void efx_set_rx_mode(struct net_device *net_dev); void efx_siena_set_rx_mode(struct net_device *net_dev);
int efx_set_features(struct net_device *net_dev, netdev_features_t data); int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data);
void efx_link_status_changed(struct efx_nic *efx); void efx_siena_link_status_changed(struct efx_nic *efx);
unsigned int efx_xdp_max_mtu(struct efx_nic *efx); unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx);
int efx_change_mtu(struct net_device *net_dev, int new_mtu); int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu);
extern const struct pci_error_handlers efx_err_handlers; extern const struct pci_error_handlers efx_siena_err_handlers;
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t efx_siena_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features); netdev_features_t features);
int efx_get_phys_port_id(struct net_device *net_dev, int efx_siena_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid); struct netdev_phys_item_id *ppid);
int efx_get_phys_port_name(struct net_device *net_dev, int efx_siena_get_phys_port_name(struct net_device *net_dev,
char *name, size_t len); char *name, size_t len);
#endif #endif
...@@ -127,7 +127,7 @@ enum efx_loopback_mode { ...@@ -127,7 +127,7 @@ enum efx_loopback_mode {
* *
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The * %RESET_TYPE_DISABLE specify the method/scope of the reset. The
* other valuesspecify reasons, which efx_schedule_reset() will choose * other valuesspecify reasons, which efx_siena_schedule_reset() will choose
* a method for. * a method for.
* *
* Reset methods are numbered in order of increasing scope. * Reset methods are numbered in order of increasing scope.
......
...@@ -105,7 +105,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, ...@@ -105,7 +105,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
unsigned int tx_usecs, rx_usecs; unsigned int tx_usecs, rx_usecs;
bool rx_adaptive; bool rx_adaptive;
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive); efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
coalesce->tx_coalesce_usecs = tx_usecs; coalesce->tx_coalesce_usecs = tx_usecs;
coalesce->tx_coalesce_usecs_irq = tx_usecs; coalesce->tx_coalesce_usecs_irq = tx_usecs;
...@@ -127,7 +127,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, ...@@ -127,7 +127,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
bool adaptive, rx_may_override_tx; bool adaptive, rx_may_override_tx;
int rc; int rc;
efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive); efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs) if (coalesce->rx_coalesce_usecs != rx_usecs)
rx_usecs = coalesce->rx_coalesce_usecs; rx_usecs = coalesce->rx_coalesce_usecs;
...@@ -146,7 +146,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, ...@@ -146,7 +146,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
else else
tx_usecs = coalesce->tx_coalesce_usecs_irq; tx_usecs = coalesce->tx_coalesce_usecs_irq;
rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive, rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
rx_may_override_tx); rx_may_override_tx);
if (rc != 0) if (rc != 0)
return rc; return rc;
...@@ -198,7 +198,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev, ...@@ -198,7 +198,7 @@ efx_ethtool_set_ringparam(struct net_device *net_dev,
"increasing TX queue size to minimum of %u\n", "increasing TX queue size to minimum of %u\n",
txq_entries); txq_entries);
return efx_realloc_channels(efx, ring->rx_pending, txq_entries); return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries);
} }
static void efx_ethtool_get_wol(struct net_device *net_dev, static void efx_ethtool_get_wol(struct net_device *net_dev,
...@@ -239,7 +239,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, ...@@ -239,7 +239,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
return 0; return 0;
} }
const struct ethtool_ops efx_ethtool_ops = { const struct ethtool_ops efx_siena_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS | .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ | ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX, ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
......
...@@ -218,7 +218,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev, ...@@ -218,7 +218,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev,
old_adv = efx->link_advertising[0]; old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc; old_fc = efx->wanted_fc;
efx_link_set_wanted_fc(efx, wanted_fc); efx_siena_link_set_wanted_fc(efx, wanted_fc);
if (efx->link_advertising[0] != old_adv || if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx_mcdi_port_reconfigure(efx); rc = efx_mcdi_port_reconfigure(efx);
...@@ -233,7 +233,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev, ...@@ -233,7 +233,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev,
/* Reconfigure the MAC. The PHY *may* generate a link state change event /* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no * if the user just changed the advertised capabilities, but there's no
* harm doing this twice */ * harm doing this twice */
efx_mac_reconfigure(efx, false); efx_siena_mac_reconfigure(efx, false);
out: out:
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
...@@ -1307,7 +1307,7 @@ int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) ...@@ -1307,7 +1307,7 @@ int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
if (rc < 0) if (rc < 0)
return rc; return rc;
return efx_reset(efx, rc); return efx_siena_reset(efx, rc);
} }
int efx_ethtool_get_module_eeprom(struct net_device *net_dev, int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
......
...@@ -747,12 +747,13 @@ int efx_farch_fini_dmaq(struct efx_nic *efx) ...@@ -747,12 +747,13 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
* completion events. This means that efx->rxq_flush_outstanding remained at 4 * completion events. This means that efx->rxq_flush_outstanding remained at 4
* after the FLR; also, efx->active_queues was non-zero (as no flush completion * after the FLR; also, efx->active_queues was non-zero (as no flush completion
* events were received, and we didn't go through efx_check_tx_flush_complete()) * events were received, and we didn't go through efx_check_tx_flush_complete())
* If we don't fix this up, on the next call to efx_realloc_channels() we won't * If we don't fix this up, on the next call to efx_siena_realloc_channels() we
* flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
* for batched flush requests; and the efx->active_queues gets messed up because * of 4 for batched flush requests; and the efx->active_queues gets messed up
* we keep incrementing for the newly initialised queues, but it never went to * because we keep incrementing for the newly initialised queues, but it never
* zero previously. Then we get a timeout every time we try to restart the * went to zero previously. Then we get a timeout every time we try to restart
* queues, as it doesn't go back to zero when we should be flushing the queues. * the queues, as it doesn't go back to zero when we should be flushing the
* queues.
*/ */
void efx_farch_finish_flr(struct efx_nic *efx) void efx_farch_finish_flr(struct efx_nic *efx)
{ {
...@@ -838,7 +839,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -838,7 +839,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = channel->tx_queue + tx_queue = channel->tx_queue +
(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL); (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
efx_xmit_done(tx_queue, tx_ev_desc_ptr); efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */ /* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
...@@ -849,7 +850,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -849,7 +850,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
efx_farch_notify_tx_desc(tx_queue); efx_farch_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev); netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else { } else {
netif_err(efx, tx_err, efx->net_dev, netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event " "channel %d unexpected TX event "
...@@ -956,7 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) ...@@ -956,7 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
"dropped %d events (index=%d expected=%d)\n", "dropped %d events (index=%d expected=%d)\n",
dropped, index, expected); dropped, index, expected);
efx_schedule_reset(efx, RESET_TYPE_DISABLE); efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
return false; return false;
} }
...@@ -1001,7 +1002,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -1001,7 +1002,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
/* Discard all pending fragments */ /* Discard all pending fragments */
if (rx_queue->scatter_n) { if (rx_queue->scatter_n) {
efx_rx_packet( efx_siena_rx_packet(
rx_queue, rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask, rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
...@@ -1015,7 +1016,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -1015,7 +1016,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
/* Discard new fragment if not SOP */ /* Discard new fragment if not SOP */
if (!rx_ev_sop) { if (!rx_ev_sop) {
efx_rx_packet( efx_siena_rx_packet(
rx_queue, rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask, rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD); 1, 0, EFX_RX_PKT_DISCARD);
...@@ -1067,7 +1068,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -1067,7 +1068,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
channel->irq_mod_score += 2; channel->irq_mod_score += 2;
/* Handle received packet */ /* Handle received packet */
efx_rx_packet(rx_queue, efx_siena_rx_packet(rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask, rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, rx_ev_byte_cnt, flags); rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n; rx_queue->removed_count += rx_queue->scatter_n;
...@@ -1222,7 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -1222,7 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"channel %d seen DRIVER RX_RESET event. " "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel); "Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset); atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx, RESET_TYPE_DISABLE); efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
break; break;
case FSE_BZ_RX_DSC_ERROR_EV: case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) { if (ev_sub_data < EFX_VI_BASE) {
...@@ -1230,7 +1231,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -1230,7 +1231,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"RX DMA Q %d reports descriptor fetch error." "RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data, " RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data); ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} }
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
else else
...@@ -1243,7 +1244,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -1243,7 +1244,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
"TX DMA Q %d reports descriptor fetch error." "TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data, " TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data); ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} }
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
else else
...@@ -1496,12 +1497,12 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) ...@@ -1496,12 +1497,12 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n"); "SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else { } else {
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen." "SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n"); "NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE); efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -1529,7 +1530,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) ...@@ -1529,7 +1530,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
* code. Disable them earlier. * code. Disable them earlier.
* If an EEH error occurred, the read will have returned all ones. * If an EEH error occurred, the read will have returned all ones.
*/ */
if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) { !efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq); disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true; efx->eeh_disabled_legacy_irq = true;
......
...@@ -725,7 +725,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd, ...@@ -725,7 +725,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
cmd, -rc); cmd, -rc);
if (efx->type->mcdi_reboot_detected) if (efx->type->mcdi_reboot_detected)
efx->type->mcdi_reboot_detected(efx); efx->type->mcdi_reboot_detected(efx);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else if (proxy_handle && (rc == -EPROTO) && } else if (proxy_handle && (rc == -EPROTO) &&
efx_mcdi_get_proxy_handle(efx, hdr_len, data_len, efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
proxy_handle)) { proxy_handle)) {
...@@ -849,7 +849,7 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd, ...@@ -849,7 +849,7 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
cmd, rc); cmd, rc);
if (rc == -EINTR || rc == -EIO) if (rc == -EINTR || rc == -EIO)
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
efx_mcdi_release(mcdi); efx_mcdi_release(mcdi);
} }
} }
...@@ -1254,7 +1254,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) ...@@ -1254,7 +1254,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
mcdi->new_epoch = true; mcdi->new_epoch = true;
/* Nobody was waiting for an MCDI request, so trigger a reset */ /* Nobody was waiting for an MCDI request, so trigger a reset */
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} }
spin_unlock(&mcdi->iface_lock); spin_unlock(&mcdi->iface_lock);
...@@ -1282,7 +1282,7 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx) ...@@ -1282,7 +1282,7 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
} }
} }
mcdi->new_epoch = true; mcdi->new_epoch = true;
efx_schedule_reset(efx, RESET_TYPE_MC_BIST); efx_siena_schedule_reset(efx, RESET_TYPE_MC_BIST);
spin_unlock(&mcdi->iface_lock); spin_unlock(&mcdi->iface_lock);
} }
...@@ -1296,7 +1296,7 @@ static void efx_mcdi_abandon(struct efx_nic *efx) ...@@ -1296,7 +1296,7 @@ static void efx_mcdi_abandon(struct efx_nic *efx)
if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
return; /* it had already been done */ return; /* it had already been done */
netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n"); netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); efx_siena_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
} }
static void efx_handle_drain_event(struct efx_nic *efx) static void efx_handle_drain_event(struct efx_nic *efx)
...@@ -1387,7 +1387,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, ...@@ -1387,7 +1387,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
"%s DMA error (event: "EFX_QWORD_FMT")\n", "%s DMA error (event: "EFX_QWORD_FMT")\n",
code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX", code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
EFX_QWORD_VAL(*event)); EFX_QWORD_VAL(*event));
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
break; break;
case MCDI_EVENT_CODE_PROXY_RESPONSE: case MCDI_EVENT_CODE_PROXY_RESPONSE:
efx_mcdi_ev_proxy_response(efx, efx_mcdi_ev_proxy_response(efx,
......
...@@ -518,7 +518,7 @@ int efx_mcdi_phy_probe(struct efx_nic *efx) ...@@ -518,7 +518,7 @@ int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->wanted_fc |= EFX_FC_AUTO; efx->wanted_fc |= EFX_FC_AUTO;
efx_link_set_wanted_fc(efx, efx->wanted_fc); efx_siena_link_set_wanted_fc(efx, efx->wanted_fc);
return 0; return 0;
...@@ -605,7 +605,7 @@ int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_li ...@@ -605,7 +605,7 @@ int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_li
efx_link_set_advertising(efx, cmd->link_modes.advertising); efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0; phy_cfg->forced_cap = 0;
} else { } else {
efx_link_clear_advertising(efx); efx_siena_link_clear_advertising(efx);
phy_cfg->forced_cap = caps; phy_cfg->forced_cap = caps;
} }
return 0; return 0;
...@@ -1297,5 +1297,5 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) ...@@ -1297,5 +1297,5 @@ void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
efx_mcdi_phy_check_fcntl(efx, lpa); efx_mcdi_phy_check_fcntl(efx, lpa);
efx_link_status_changed(efx); efx_siena_link_status_changed(efx);
} }
...@@ -37,7 +37,7 @@ static void efx_mtd_sync(struct mtd_info *mtd) ...@@ -37,7 +37,7 @@ static void efx_mtd_sync(struct mtd_info *mtd)
part->name, part->dev_type_name, rc); part->name, part->dev_type_name, rc);
} }
static void efx_mtd_remove_partition(struct efx_mtd_partition *part) static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part)
{ {
int rc; int rc;
...@@ -51,7 +51,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part) ...@@ -51,7 +51,7 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
list_del(&part->node); list_del(&part->node);
} }
int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
size_t n_parts, size_t sizeof_part) size_t n_parts, size_t sizeof_part)
{ {
struct efx_mtd_partition *part; struct efx_mtd_partition *part;
...@@ -79,7 +79,7 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, ...@@ -79,7 +79,7 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
if (mtd_device_register(&part->mtd, NULL, 0)) if (mtd_device_register(&part->mtd, NULL, 0))
goto fail; goto fail;
/* Add to list in order - efx_mtd_remove() depends on this */ /* Add to list in order - efx_siena_mtd_remove() depends on this */
list_add_tail(&part->node, &efx->mtd_list); list_add_tail(&part->node, &efx->mtd_list);
} }
...@@ -89,13 +89,13 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, ...@@ -89,13 +89,13 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
while (i--) { while (i--) {
part = (struct efx_mtd_partition *)((char *)parts + part = (struct efx_mtd_partition *)((char *)parts +
i * sizeof_part); i * sizeof_part);
efx_mtd_remove_partition(part); efx_siena_mtd_remove_partition(part);
} }
/* Failure is unlikely here, but probably means we're out of memory */ /* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM; return -ENOMEM;
} }
void efx_mtd_remove(struct efx_nic *efx) void efx_siena_mtd_remove(struct efx_nic *efx)
{ {
struct efx_mtd_partition *parts, *part, *next; struct efx_mtd_partition *parts, *part, *next;
...@@ -108,12 +108,12 @@ void efx_mtd_remove(struct efx_nic *efx) ...@@ -108,12 +108,12 @@ void efx_mtd_remove(struct efx_nic *efx)
node); node);
list_for_each_entry_safe(part, next, &efx->mtd_list, node) list_for_each_entry_safe(part, next, &efx->mtd_list, node)
efx_mtd_remove_partition(part); efx_siena_mtd_remove_partition(part);
kfree(parts); kfree(parts);
} }
void efx_mtd_rename(struct efx_nic *efx) void efx_siena_mtd_rename(struct efx_nic *efx)
{ {
struct efx_mtd_partition *part; struct efx_mtd_partition *part;
......
...@@ -207,7 +207,6 @@ struct efx_tx_buffer { ...@@ -207,7 +207,6 @@ struct efx_tx_buffer {
* @txd: The hardware descriptor ring * @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1. * @ptr_mask: The size of the ring minus 1.
* @piobuf: PIO buffer region for this TX queue (shared with its partner). * @piobuf: PIO buffer region for this TX queue (shared with its partner).
* Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors * @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised? * @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel? * @timestamping: Is timestamping enabled for this channel?
...@@ -478,9 +477,9 @@ enum efx_sync_events_state { ...@@ -478,9 +477,9 @@ enum efx_sync_events_state {
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none * __efx_siena_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0 * by __efx_siena_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_list: list of SKBs from current RX, awaiting processing * @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel * @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel * @tx_queue: TX queues for this channel
...@@ -869,12 +868,12 @@ enum efx_xdp_tx_queues_mode { ...@@ -869,12 +868,12 @@ enum efx_xdp_tx_queues_mode {
* @nic_data: Hardware dependent state * @nic_data: Hardware dependent state
* @mcdi: Management-Controller-to-Driver Interface state * @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* efx_monitor() and efx_reconfigure_port() * efx_monitor() and efx_siena_reconfigure_port()
* @port_enabled: Port enabled indicator. * @port_enabled: Port enabled indicator.
* Serialises efx_stop_all(), efx_start_all(), efx_monitor() and * Serialises efx_siena_stop_all(), efx_siena_start_all(),
* efx_mac_work() with kernel interfaces. Safe to read under any * efx_monitor() and efx_mac_work() with kernel interfaces.
* one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must * Safe to read under any one of the rtnl_lock, mac_lock, or netif_tx_lock,
* be held to modify it. * but all three must be held to modify it.
* @port_initialized: Port initialized? * @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock * @net_dev: Operating system network device. Consider holding the rtnl lock
* @fixed_features: Features which cannot be turned off * @fixed_features: Features which cannot be turned off
...@@ -1255,7 +1254,7 @@ struct efx_udp_tunnel { ...@@ -1255,7 +1254,7 @@ struct efx_udp_tunnel {
* This must check whether the specified table entry is used by RFS * This must check whether the specified table entry is used by RFS
* and that rps_may_expire_flow() returns true for it. * and that rps_may_expire_flow() returns true for it.
* @mtd_probe: Probe and add MTD partitions associated with this net device, * @mtd_probe: Probe and add MTD partitions associated with this net device,
* using efx_mtd_add() * using efx_siena_mtd_add()
* @mtd_rename: Set an MTD partition name using the net device name * @mtd_rename: Set an MTD partition name using the net device name
* @mtd_read: Read from an MTD partition * @mtd_read: Read from an MTD partition
* @mtd_erase: Erase part of an MTD partition * @mtd_erase: Erase part of an MTD partition
......
...@@ -118,7 +118,7 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel, ...@@ -118,7 +118,7 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
return skb; return skb;
} }
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int n_frags, unsigned int len, u16 flags) unsigned int n_frags, unsigned int len, u16 flags)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
...@@ -310,7 +310,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -310,7 +310,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
case XDP_TX: case XDP_TX:
/* Buffer ownership passes to tx on success. */ /* Buffer ownership passes to tx on success. */
xdpf = xdp_convert_buff_to_frame(&xdp); xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true); err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) { if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1); efx_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit()) if (net_ratelimit())
...@@ -357,7 +357,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -357,7 +357,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
} }
/* Handle a received packet. Second half: Touches packet payload. */ /* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel) void __efx_siena_rx_packet(struct efx_channel *channel)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_rx_buffer *rx_buf = struct efx_rx_buffer *rx_buf =
...@@ -391,7 +391,8 @@ void __efx_rx_packet(struct efx_channel *channel) ...@@ -391,7 +391,8 @@ void __efx_rx_packet(struct efx_channel *channel)
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0); efx_siena_rx_packet_gro(channel, rx_buf,
channel->rx_pkt_n_frags, eh, 0);
else else
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
out: out:
......
...@@ -504,7 +504,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) ...@@ -504,7 +504,8 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
* regardless of checksum state and skbs with a good checksum. * regardless of checksum state and skbs with a good checksum.
*/ */
void void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum) unsigned int n_frags, u8 *eh, __wsum csum)
{ {
struct napi_struct *napi = &channel->napi_str; struct napi_struct *napi = &channel->napi_str;
...@@ -520,8 +521,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, ...@@ -520,8 +521,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
return; return;
} }
if (efx->net_dev->features & NETIF_F_RXHASH && if (efx->net_dev->features & NETIF_F_RXHASH)
efx_rx_buf_hash_valid(efx, eh))
skb_set_hash(skb, efx_rx_buf_hash(efx, eh), skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3); PKT_HASH_TYPE_L3);
if (csum) { if (csum) {
......
...@@ -81,7 +81,8 @@ void efx_rx_config_page_split(struct efx_nic *efx); ...@@ -81,7 +81,8 @@ void efx_rx_config_page_split(struct efx_nic *efx);
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum); unsigned int n_frags, u8 *eh, __wsum csum);
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
......
...@@ -58,14 +58,14 @@ static const char payload_msg[] = ...@@ -58,14 +58,14 @@ static const char payload_msg[] =
"Hello world! This is an Efx loopback test in progress!"; "Hello world! This is an Efx loopback test in progress!";
/* Interrupt mode names */ /* Interrupt mode names */
static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX;
static const char *const efx_interrupt_mode_names[] = { static const char *const efx_siena_interrupt_mode_names[] = {
[EFX_INT_MODE_MSIX] = "MSI-X", [EFX_INT_MODE_MSIX] = "MSI-X",
[EFX_INT_MODE_MSI] = "MSI", [EFX_INT_MODE_MSI] = "MSI",
[EFX_INT_MODE_LEGACY] = "legacy", [EFX_INT_MODE_LEGACY] = "legacy",
}; };
#define INT_MODE(efx) \ #define INT_MODE(efx) \
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode)
/** /**
* struct efx_loopback_state - persistent state during a loopback selftest * struct efx_loopback_state - persistent state during a loopback selftest
...@@ -197,7 +197,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, ...@@ -197,7 +197,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx,
schedule_timeout_uninterruptible(wait); schedule_timeout_uninterruptible(wait);
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_stop_eventq(channel); efx_siena_stop_eventq(channel);
if (channel->eventq_read_ptr != if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) { read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran); set_bit(channel->channel, &napi_ran);
...@@ -209,7 +209,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx, ...@@ -209,7 +209,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx,
if (efx_nic_event_test_irq_cpu(channel) >= 0) if (efx_nic_event_test_irq_cpu(channel) >= 0)
clear_bit(channel->channel, &int_pend); clear_bit(channel->channel, &int_pend);
} }
efx_start_eventq(channel); efx_siena_start_eventq(channel);
} }
wait *= 2; wait *= 2;
...@@ -637,7 +637,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -637,7 +637,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
state->flush = true; state->flush = true;
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode; efx->loopback_mode = mode;
rc = __efx_reconfigure_port(efx); rc = __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
if (rc) { if (rc) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
...@@ -731,7 +731,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -731,7 +731,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
if (rc_reset) { if (rc_reset) {
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"Unable to recover from chip test\n"); "Unable to recover from chip test\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE); efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset; return rc_reset;
} }
...@@ -744,7 +744,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -744,7 +744,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER; efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE; efx->loopback_mode = LOOPBACK_NONE;
__efx_reconfigure_port(efx); __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
rc = efx_test_phy(efx, tests, flags); rc = efx_test_phy(efx, tests, flags);
...@@ -759,7 +759,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -759,7 +759,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode; efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode; efx->loopback_mode = loopback_mode;
__efx_reconfigure_port(efx); __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_device_attach_if_not_resetting(efx); efx_device_attach_if_not_resetting(efx);
......
...@@ -40,7 +40,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel) ...@@ -40,7 +40,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
if (channel->irq_moderation_us) { if (channel->irq_moderation_us) {
unsigned int ticks; unsigned int ticks;
ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us); ticks = efx_siena_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd, EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE, FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF, FFE_CZ_TIMER_MODE_INT_HLDOFF,
...@@ -102,7 +102,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -102,7 +102,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
enum reset_type reset_method = RESET_TYPE_ALL; enum reset_type reset_method = RESET_TYPE_ALL;
int rc, rc2; int rc, rc2;
efx_reset_down(efx, reset_method); efx_siena_reset_down(efx, reset_method);
/* Reset the chip immediately so that it is completely /* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does. * quiescent regardless of what any VF driver does.
...@@ -118,7 +118,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -118,7 +118,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
rc = efx_mcdi_reset(efx, reset_method); rc = efx_mcdi_reset(efx, reset_method);
out: out:
rc2 = efx_reset_up(efx, reset_method, rc == 0); rc2 = efx_siena_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2; return rc ? rc : rc2;
} }
...@@ -583,7 +583,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) ...@@ -583,7 +583,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes], efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
stats[SIENA_STAT_rx_bytes] - stats[SIENA_STAT_rx_bytes] -
stats[SIENA_STAT_rx_bad_bytes]); stats[SIENA_STAT_rx_bad_bytes]);
efx_update_sw_stats(efx, stats); efx_siena_update_sw_stats(efx, stats);
return 0; return 0;
} }
...@@ -943,7 +943,7 @@ static int siena_mtd_probe(struct efx_nic *efx) ...@@ -943,7 +943,7 @@ static int siena_mtd_probe(struct efx_nic *efx)
if (rc) if (rc)
goto fail; goto fail;
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); rc = efx_siena_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail: fail:
if (rc) if (rc)
kfree(parts); kfree(parts);
...@@ -980,7 +980,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -980,7 +980,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.remove = siena_remove_nic, .remove = siena_remove_nic,
.init = siena_init_nic, .init = siena_init_nic,
.dimension_resources = siena_dimension_resources, .dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void, .fini = efx_siena_port_dummy_op_void,
#ifdef CONFIG_EEH #ifdef CONFIG_EEH
.monitor = siena_monitor, .monitor = siena_monitor,
#else #else
...@@ -994,7 +994,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -994,7 +994,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq, .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = efx_siena_prepare_flush, .prepare_flush = efx_siena_prepare_flush,
.finish_flush = siena_finish_flush, .finish_flush = siena_finish_flush,
.prepare_flr = efx_port_dummy_op_void, .prepare_flr = efx_siena_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr, .finish_flr = efx_farch_finish_flr,
.describe_stats = siena_describe_nic_stats, .describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats, .update_stats = siena_update_nic_stats,
...@@ -1024,7 +1024,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -1024,7 +1024,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_remove = efx_farch_tx_remove, .tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write, .tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len, .tx_limit_len = efx_farch_tx_limit_len,
.tx_enqueue = __efx_enqueue_skb, .tx_enqueue = __efx_siena_enqueue_skb,
.rx_push_rss_config = siena_rx_push_rss_config, .rx_push_rss_config = siena_rx_push_rss_config,
.rx_pull_rss_config = siena_rx_pull_rss_config, .rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe, .rx_probe = efx_farch_rx_probe,
...@@ -1032,7 +1032,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -1032,7 +1032,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_remove = efx_farch_rx_remove, .rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write, .rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill, .rx_defer_refill = efx_farch_rx_defer_refill,
.rx_packet = __efx_rx_packet, .rx_packet = __efx_siena_rx_packet,
.ev_probe = efx_farch_ev_probe, .ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init, .ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini, .ev_fini = efx_farch_ev_fini,
...@@ -1075,9 +1075,9 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -1075,9 +1075,9 @@ const struct efx_nic_type siena_a0_nic_type = {
.sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan, .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
.sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk, .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
.sriov_get_vf_config = efx_siena_sriov_get_vf_config, .sriov_get_vf_config = efx_siena_sriov_get_vf_config,
.vswitching_probe = efx_port_dummy_op_int, .vswitching_probe = efx_siena_port_dummy_op_int,
.vswitching_restore = efx_port_dummy_op_int, .vswitching_restore = efx_siena_port_dummy_op_int,
.vswitching_remove = efx_port_dummy_op_void, .vswitching_remove = efx_siena_port_dummy_op_void,
.set_mac_address = efx_siena_sriov_mac_address_changed, .set_mac_address = efx_siena_sriov_mac_address_changed,
#endif #endif
......
...@@ -1043,7 +1043,7 @@ efx_siena_sriov_get_channel_name(struct efx_channel *channel, ...@@ -1043,7 +1043,7 @@ efx_siena_sriov_get_channel_name(struct efx_channel *channel,
static const struct efx_channel_type efx_siena_sriov_channel_type = { static const struct efx_channel_type efx_siena_sriov_channel_type = {
.handle_no_channel = efx_siena_sriov_handle_no_channel, .handle_no_channel = efx_siena_sriov_handle_no_channel,
.pre_probe = efx_siena_sriov_probe_channel, .pre_probe = efx_siena_sriov_probe_channel,
.post_remove = efx_channel_dummy_op_void, .post_remove = efx_siena_channel_dummy_op_void,
.get_name = efx_siena_sriov_get_channel_name, .get_name = efx_siena_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */ /* no copy operation; channel must not be reallocated */
.keep_eventq = true, .keep_eventq = true,
......
...@@ -138,13 +138,14 @@ static void efx_tx_send_pending(struct efx_channel *channel) ...@@ -138,13 +138,14 @@ static void efx_tx_send_pending(struct efx_channel *channel)
* If any DMA mapping fails, any mapped fragments will be unmapped, * If any DMA mapping fails, any mapped fragments will be unmapped,
* the queue's insert pointer will be restored to its original value. * the queue's insert pointer will be restored to its original value.
* *
* This function is split out from efx_hard_start_xmit to allow the * This function is split out from efx_siena_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues. * loopback test to direct packets via specific TX queues.
* *
* Returns NETDEV_TX_OK. * Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function. * You must hold netif_tx_lock() to call this function.
*/ */
netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{ {
unsigned int old_insert_count = tx_queue->insert_count; unsigned int old_insert_count = tx_queue->insert_count;
bool xmit_more = netdev_xmit_more(); bool xmit_more = netdev_xmit_more();
...@@ -219,7 +220,7 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb ...@@ -219,7 +220,7 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
* Runs in NAPI context, either in our poll (for XDP TX) or a different NIC * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
* (for XDP redirect). * (for XDP redirect).
*/ */
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
bool flush) bool flush)
{ {
struct efx_tx_buffer *tx_buffer; struct efx_tx_buffer *tx_buffer;
...@@ -310,7 +311,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, ...@@ -310,7 +311,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
* Context: non-blocking. * Context: non-blocking.
* Should always return NETDEV_TX_OK and consume the skb. * Should always return NETDEV_TX_OK and consume the skb.
*/ */
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev) struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
...@@ -354,52 +355,14 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, ...@@ -354,52 +355,14 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
return __efx_enqueue_skb(tx_queue, skb); return __efx_siena_enqueue_skb(tx_queue, skb);
}
void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
{
unsigned int pkts_compl = 0, bytes_compl = 0;
unsigned int read_ptr;
bool finished = false;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
while (!finished) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (!efx_tx_buffer_in_use(buffer)) {
struct efx_nic *efx = tx_queue->efx;
netif_err(efx, hw, efx->net_dev,
"TX queue %d spurious single TX completion\n",
tx_queue->queue);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
/* Need to check the flag before dequeueing. */
if (buffer->flags & EFX_TX_BUF_SKB)
finished = true;
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
}
tx_queue->pkts_compl += pkts_compl;
tx_queue->bytes_compl += bytes_compl;
EFX_WARN_ON_PARANOID(pkts_compl != 1);
efx_xmit_done_check_empty(tx_queue);
} }
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
/* Must be inverse of queue lookup in efx_hard_start_xmit() */ /* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */
tx_queue->core_txq = tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev, netdev_get_tx_queue(efx->net_dev,
tx_queue->channel->channel + tx_queue->channel->channel +
...@@ -407,7 +370,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) ...@@ -407,7 +370,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
efx->n_tx_channels : 0)); efx->n_tx_channels : 0));
} }
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data) void *type_data)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
......
...@@ -214,7 +214,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, ...@@ -214,7 +214,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
netif_err(efx, tx_err, efx->net_dev, netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %d\n", "TX queue %d spurious TX completion id %d\n",
tx_queue->queue, read_ptr); tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return; return;
} }
...@@ -225,7 +225,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, ...@@ -225,7 +225,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
} }
} }
void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
{ {
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
...@@ -238,7 +238,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) ...@@ -238,7 +238,7 @@ void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
} }
} }
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{ {
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
...@@ -265,7 +265,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) ...@@ -265,7 +265,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
netif_tx_wake_queue(tx_queue->core_txq); netif_tx_wake_queue(tx_queue->core_txq);
} }
efx_xmit_done_check_empty(tx_queue); efx_siena_xmit_done_check_empty(tx_queue);
} }
/* Remove buffers put into a tx_queue for the current packet. /* Remove buffers put into a tx_queue for the current packet.
......
...@@ -26,8 +26,8 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) ...@@ -26,8 +26,8 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION); return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
} }
void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue); void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count); unsigned int insert_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment