Commit 2d0f0ca2 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2018-10-17

This series adds support for the new igc driver.

The igc driver is the new client driver supporting the Intel I225
Ethernet Controller, which supports 2.5GbE speeds.  The reason for
creating a new client driver, instead of adding support for the new
device in e1000e, is that the silicon behaves more like devices
supported in igb driver.  It also did not make sense to add a client
part, to the igb driver which supports only 1GbE server parts.

This initial set of patches is designed for basic support (i.e. link and
pass traffic).  Follow-on patch series will add more advanced support
like VLAN, Wake-on-LAN, etc..
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 99e9acd8 208983f0
......@@ -287,4 +287,20 @@ config FM10K
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
config IGC
tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
default n
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called igc.
endif # NET_VENDOR_INTEL
......@@ -7,6 +7,7 @@ obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGC) += igc/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
......
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Intel Corporation
#
# Intel(R) I225-LM/I225-V 2.5G Ethernet Controller
#
obj-$(CONFIG_IGC) += igc.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_H_
#define _IGC_H_
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/ethtool.h>
#include <linux/sctp.h>
#define IGC_ERR(args...) pr_err("igc: " args)
#define PFX "igc: "
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include "igc_hw.h"
/* main */
extern char igc_driver_name[];
extern char igc_driver_version[];
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_FLAG_HAS_MSI BIT(0)
#define IGC_FLAG_QUEUE_PAIRS BIT(4)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
#define IGC_FLAG_MEDIA_RESET BIT(10)
#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
#define IGC_70K_ITR 56
#define IGC_DEFAULT_ITR 3 /* dynamic */
#define IGC_MAX_ITR_USECS 10000
#define IGC_MIN_ITR_USECS 10
#define NON_Q_VECTORS 1
#define MAX_MSIX_ENTRIES 10
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
#define IGC_MIN_TXD 80
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
#define IGC_MIN_RXD 80
#define IGC_MAX_RXD 4096
/* Transmit and receive queues */
#define IGC_MAX_RX_QUEUES 4
#define IGC_MAX_TX_QUEUES 4
#define MAX_Q_VECTORS 8
#define MAX_STD_JUMBO_FRAME_SIZE 9216
/* Supported Rx Buffer Sizes */
#define IGC_RXBUFFER_256 256
#define IGC_RXBUFFER_2048 2048
#define IGC_RXBUFFER_3072 3072
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
* descriptors available in its onboard memory.
* Setting this to 0 disables RX descriptor prefetch.
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
* available in host memory.
* If PTHRESH is 0, this should also be 0.
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
#define IGC_RX_PTHRESH 8
#define IGC_RX_HTHRESH 8
#define IGC_TX_PTHRESH 8
#define IGC_TX_HTHRESH 1
#define IGC_RX_WTHRESH 4
#define IGC_TX_WTHRESH 16
#define IGC_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
#define IGC_TS_HDR_LEN 16
#define IGC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IGC_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IGC_RXBUFFER_2048) - IGC_SKB_PAD - IGC_TS_HDR_LEN)
#else
#define IGC_MAX_FRAME_BUILD_SKB (IGC_RXBUFFER_2048 - IGC_TS_HDR_LEN)
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGC_RX_BUFFER_WRITE 16 /* Must be power of 2 */
/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
const u32 stat_err_bits)
{
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
}
enum igc_state_t {
__IGC_TESTING,
__IGC_RESETTING,
__IGC_DOWN,
__IGC_PTP_TX_IN_PROGRESS,
};
enum igc_tx_flags {
/* cmd_type flags */
IGC_TX_FLAGS_VLAN = 0x01,
IGC_TX_FLAGS_TSO = 0x02,
IGC_TX_FLAGS_TSTAMP = 0x04,
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
};
enum igc_boards {
board_base,
};
/* The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGC_MAX_TXD_PWR 15
#define IGC_MAX_DATA_PER_TXD BIT(IGC_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
struct sk_buff *skb;
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
};
struct igc_rx_buffer {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
};
struct igc_tx_queue_stats {
u64 packets;
u64 bytes;
u64 restart_queue;
u64 restart_queue2;
};
struct igc_rx_queue_stats {
u64 packets;
u64 bytes;
u64 drops;
u64 csum_err;
u64 alloc_failed;
};
struct igc_rx_packet_stats {
u64 ipv4_packets; /* IPv4 headers processed */
u64 ipv4e_packets; /* IPv4E headers with extensions processed */
u64 ipv6_packets; /* IPv6 headers processed */
u64 ipv6e_packets; /* IPv6E headers with extensions processed */
u64 tcp_packets; /* TCP headers processed */
u64 udp_packets; /* UDP headers processed */
u64 sctp_packets; /* SCTP headers processed */
u64 nfs_packets; /* NFS headers processe */
u64 other_packets;
};
struct igc_ring_container {
struct igc_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u8 count; /* total number of rings in vector */
u8 itr; /* current ITR setting for ring */
};
struct igc_ring {
struct igc_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
struct device *dev; /* device for dma mapping */
union { /* array of buffer info structs */
struct igc_tx_buffer *tx_buffer_info;
struct igc_rx_buffer *rx_buffer_info;
};
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
/* everything past this point are written often */
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
union {
/* TX */
struct {
struct igc_tx_queue_stats tx_stats;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync tx_syncp2;
};
/* RX */
struct {
struct igc_rx_queue_stats rx_stats;
struct igc_rx_packet_stats pkt_stats;
struct u64_stats_sync rx_syncp;
struct sk_buff *skb;
};
};
} ____cacheline_internodealigned_in_smp;
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
u32 eims_value; /* EIMS mask value */
u16 itr_val;
u8 set_itr;
struct igc_ring_container rx, tx;
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
};
struct igc_mac_addr {
u8 addr[ETH_ALEN];
u8 queue;
u8 state; /* bitmask */
};
#define IGC_MAC_STATE_DEFAULT 0x1
#define IGC_MAC_STATE_MODIFIED 0x2
#define IGC_MAC_STATE_IN_USE 0x4
/* Board specific private data structure */
struct igc_adapter {
struct net_device *netdev;
unsigned long state;
unsigned int flags;
unsigned int num_q_vectors;
struct msix_entry *msix_entries;
/* TX */
u16 tx_work_limit;
u32 tx_timeout_count;
int num_tx_queues;
struct igc_ring *tx_ring[IGC_MAX_TX_QUEUES];
/* RX */
int num_rx_queues;
struct igc_ring *rx_ring[IGC_MAX_RX_QUEUES];
struct timer_list watchdog_timer;
struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
u16 link_speed;
u16 link_duplex;
u8 port_num;
u8 __iomem *io_addr;
/* Interrupt Throttle Rate */
u32 rx_itr_setting;
u32 tx_itr_setting;
struct work_struct reset_task;
struct work_struct watchdog_task;
struct work_struct dma_err_task;
bool fc_autoneg;
u8 tx_timeout_factor;
int msg_enable;
u32 max_frame_size;
u32 min_frame_size;
/* OS defined structs */
struct pci_dev *pdev;
/* lock for statistics */
spinlock_t stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in igc_hw.h */
struct igc_hw hw;
struct igc_hw_stats stats;
struct igc_q_vector *q_vector[MAX_Q_VECTORS];
u32 eims_enable_mask;
u32 eims_other;
u16 tx_ring_count;
u16 rx_ring_count;
u32 *shadow_vfta;
u32 rss_queues;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
struct igc_mac_addr *mac_table;
unsigned long link_check_timeout;
struct igc_info ei;
};
/* igc_desc_unused - calculate if we have unused descriptors */
static inline u16 igc_desc_unused(const struct igc_ring *ring)
{
u16 ntc = ring->next_to_clean;
u16 ntu = ring->next_to_use;
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
static inline s32 igc_get_phy_info(struct igc_hw *hw)
{
if (hw->phy.ops.get_phy_info)
return hw->phy.ops.get_phy_info(hw);
return 0;
}
static inline s32 igc_reset_phy(struct igc_hw *hw)
{
if (hw->phy.ops.reset)
return hw->phy.ops.reset(hw);
return 0;
}
static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring)
{
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
}
enum igc_ring_flags_t {
IGC_RING_FLAG_RX_3K_BUFFER,
IGC_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX,
IGC_RING_FLAG_TX_DETECT_HANG
};
#define ring_uses_large_buffer(ring) \
test_bit(IGC_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
#define ring_uses_build_skb(ring) \
test_bit(IGC_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
static inline unsigned int igc_rx_bufsz(struct igc_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return IGC_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
return IGC_MAX_FRAME_BUILD_SKB + IGC_TS_HDR_LEN;
#endif
return IGC_RXBUFFER_2048;
}
static inline unsigned int igc_rx_pg_order(struct igc_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring_uses_large_buffer(ring))
return 1;
#endif
return 0;
}
static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
{
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
return 0;
}
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
#define IGC_RX_DESC(R, i) \
(&(((union igc_adv_rx_desc *)((R)->desc))[i]))
#define IGC_TX_DESC(R, i) \
(&(((union igc_adv_tx_desc *)((R)->desc))[i]))
#define IGC_TX_CTXTDESC(R, i) \
(&(((struct igc_adv_tx_context_desc *)((R)->desc))[i]))
#endif /* _IGC_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/delay.h>
#include "igc_hw.h"
#include "igc_i225.h"
#include "igc_mac.h"
#include "igc_base.h"
#include "igc.h"
/**
* igc_set_pcie_completion_timeout - set pci-e completion timeout
* @hw: pointer to the HW structure
*/
static s32 igc_set_pcie_completion_timeout(struct igc_hw *hw)
{
u32 gcr = rd32(IGC_GCR);
u16 pcie_devctl2;
s32 ret_val = 0;
/* only take action if timeout value is defaulted to 0 */
if (gcr & IGC_GCR_CMPL_TMOUT_MASK)
goto out;
/* if capabilities version is type 1 we can write the
* timeout of 10ms to 200ms through the GCR register
*/
if (!(gcr & IGC_GCR_CAP_VER2)) {
gcr |= IGC_GCR_CMPL_TMOUT_10ms;
goto out;
}
/* for version 2 capabilities we need to write the config space
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
ret_val = igc_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
&pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igc_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
&pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~IGC_GCR_CMPL_TMOUT_RESEND;
wr32(IGC_GCR, gcr);
return ret_val;
}
/**
* igc_check_for_link_base - Check for link
* @hw: pointer to the HW structure
*
* If sgmii is enabled, then use the pcs register to determine link, otherwise
* use the generic interface for determining link.
*/
static s32 igc_check_for_link_base(struct igc_hw *hw)
{
s32 ret_val = 0;
ret_val = igc_check_for_copper_link(hw);
return ret_val;
}
/**
* igc_reset_hw_base - Reset hardware
* @hw: pointer to the HW structure
*
* This resets the hardware into a known state. This is a
* function pointer entry point called by the api module.
*/
static s32 igc_reset_hw_base(struct igc_hw *hw)
{
s32 ret_val;
u32 ctrl;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = igc_disable_pcie_master(hw);
if (ret_val)
hw_dbg("PCI-E Master disable polling has failed.\n");
/* set the completion timeout for interface */
ret_val = igc_set_pcie_completion_timeout(hw);
if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
hw_dbg("Masking off all interrupts\n");
wr32(IGC_IMC, 0xffffffff);
wr32(IGC_RCTL, 0);
wr32(IGC_TCTL, IGC_TCTL_PSP);
wrfl();
usleep_range(10000, 20000);
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
/* When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
hw_dbg("Auto Read Done did not complete\n");
}
/* Clear any pending interrupt events. */
wr32(IGC_IMC, 0xffffffff);
rd32(IGC_ICR);
return ret_val;
}
/**
* igc_get_phy_id_base - Retrieve PHY addr and id
* @hw: pointer to the HW structure
*
* Retrieves the PHY address and ID for both PHY's which do and do not use
* sgmi interface.
*/
static s32 igc_get_phy_id_base(struct igc_hw *hw)
{
s32 ret_val = 0;
ret_val = igc_get_phy_id(hw);
return ret_val;
}
/**
* igc_init_nvm_params_base - Init NVM func ptrs.
* @hw: pointer to the HW structure
*/
static s32 igc_init_nvm_params_base(struct igc_hw *hw)
{
struct igc_nvm_info *nvm = &hw->nvm;
u32 eecd = rd32(IGC_EECD);
u16 size;
size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
IGC_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* Just in case size is out of range, cap it to the largest
* EEPROM size supported
*/
if (size > 15)
size = 15;
nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
16 : 8;
if (nvm->word_size == BIT(15))
nvm->page_size = 128;
return 0;
}
/**
* igc_setup_copper_link_base - Configure copper link settings
* @hw: pointer to the HW structure
*
* Configures the link for auto-neg or forced speed and duplex. Then we check
* for link, once link is established calls to configure collision distance
* and flow control are called.
*/
static s32 igc_setup_copper_link_base(struct igc_hw *hw)
{
s32 ret_val = 0;
u32 ctrl;
ctrl = rd32(IGC_CTRL);
ctrl |= IGC_CTRL_SLU;
ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
wr32(IGC_CTRL, ctrl);
ret_val = igc_setup_copper_link(hw);
return ret_val;
}
/**
* igc_init_mac_params_base - Init MAC func ptrs.
* @hw: pointer to the HW structure
*/
static s32 igc_init_mac_params_base(struct igc_hw *hw)
{
struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
struct igc_mac_info *mac = &hw->mac;
/* Set mta register count */
mac->mta_reg_count = 128;
mac->rar_entry_count = IGC_RAR_ENTRIES;
/* reset */
mac->ops.reset_hw = igc_reset_hw_base;
mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
/* Allow a single clear of the SW semaphore on I225 */
if (mac->type == igc_i225)
dev_spec->clear_semaphore_once = true;
/* physical interface link setup */
mac->ops.setup_physical_interface = igc_setup_copper_link_base;
return 0;
}
/**
* igc_init_phy_params_base - Init PHY func ptrs.
* @hw: pointer to the HW structure
*/
static s32 igc_init_phy_params_base(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u32 ctrl_ext;
if (hw->phy.media_type != igc_media_type_copper) {
phy->type = igc_phy_none;
goto out;
}
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
ctrl_ext = rd32(IGC_CTRL_EXT);
/* set lan id */
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
IGC_STATUS_FUNC_SHIFT;
/* Make sure the PHY is in a good state. Several people have reported
* firmware leaving the PHY's page select register set to something
* other than the default of zero, which causes the PHY ID read to
* access something other than the intended register.
*/
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
hw_dbg("Error resetting the PHY.\n");
goto out;
}
ret_val = igc_get_phy_id_base(hw);
if (ret_val)
return ret_val;
igc_check_for_link_base(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
case I225_I_PHY_ID:
phy->type = igc_phy_i225;
break;
default:
ret_val = -IGC_ERR_PHY;
goto out;
}
out:
return ret_val;
}
static s32 igc_get_invariants_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
u32 link_mode = 0;
u32 ctrl_ext = 0;
s32 ret_val = 0;
switch (hw->device_id) {
case IGC_DEV_ID_I225_LM:
case IGC_DEV_ID_I225_V:
mac->type = igc_i225;
break;
default:
return -IGC_ERR_MAC_INIT;
}
hw->phy.media_type = igc_media_type_copper;
ctrl_ext = rd32(IGC_CTRL_EXT);
link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
/* mac initialization and operations */
ret_val = igc_init_mac_params_base(hw);
if (ret_val)
goto out;
/* NVM initialization */
ret_val = igc_init_nvm_params_base(hw);
switch (hw->mac.type) {
case igc_i225:
ret_val = igc_init_nvm_params_i225(hw);
break;
default:
break;
}
/* setup PHY parameters */
ret_val = igc_init_phy_params_base(hw);
if (ret_val)
goto out;
out:
return ret_val;
}
/**
* igc_acquire_phy_base - Acquire rights to access PHY
* @hw: pointer to the HW structure
*
* Acquire access rights to the correct PHY. This is a
* function pointer entry point called by the api module.
*/
static s32 igc_acquire_phy_base(struct igc_hw *hw)
{
u16 mask = IGC_SWFW_PHY0_SM;
return hw->mac.ops.acquire_swfw_sync(hw, mask);
}
/**
* igc_release_phy_base - Release rights to access PHY
* @hw: pointer to the HW structure
*
* A wrapper to release access rights to the correct PHY. This is a
* function pointer entry point called by the api module.
*/
static void igc_release_phy_base(struct igc_hw *hw)
{
u16 mask = IGC_SWFW_PHY0_SM;
hw->mac.ops.release_swfw_sync(hw, mask);
}
/**
* igc_get_link_up_info_base - Get link speed/duplex info
* @hw: pointer to the HW structure
* @speed: stores the current speed
* @duplex: stores the current duplex
*
* This is a wrapper function, if using the serial gigabit media independent
* interface, use PCS to retrieve the link speed and duplex information.
* Otherwise, use the generic function to get the link speed and duplex info.
*/
static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed,
u16 *duplex)
{
s32 ret_val;
ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex);
return ret_val;
}
/**
* igc_init_hw_base - Initialize hardware
* @hw: pointer to the HW structure
*
* This inits the hardware readying it for operation.
*/
static s32 igc_init_hw_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
u16 i, rar_count = mac->rar_entry_count;
s32 ret_val = 0;
/* Setup the receive address */
igc_init_rx_addrs(hw, rar_count);
/* Zero out the Multicast HASH table */
hw_dbg("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
array_wr32(IGC_MTA, i, 0);
/* Zero out the Unicast HASH table */
hw_dbg("Zeroing the UTA\n");
for (i = 0; i < mac->uta_reg_count; i++)
array_wr32(IGC_UTA, i, 0);
/* Setup link and flow control */
ret_val = igc_setup_link(hw);
/* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
*/
igc_clear_hw_cntrs_base(hw);
return ret_val;
}
/**
* igc_read_mac_addr_base - Read device MAC address
* @hw: pointer to the HW structure
*/
static s32 igc_read_mac_addr_base(struct igc_hw *hw)
{
s32 ret_val = 0;
ret_val = igc_read_mac_addr(hw);
return ret_val;
}
/**
* igc_power_down_phy_copper_base - Remove link during PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, or wake on lan is not enabled, remove the link.
*/
void igc_power_down_phy_copper_base(struct igc_hw *hw)
{
/* If the management interface is not enabled, then power down */
if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
igc_power_down_phy_copper(hw);
}
/**
* igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
* @hw: pointer to the HW structure
*
* After Rx enable, if manageability is enabled then there is likely some
* bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
*/
void igc_rx_fifo_flush_base(struct igc_hw *hw)
{
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
/* disable IPv6 options as per hardware errata */
rfctl = rd32(IGC_RFCTL);
rfctl |= IGC_RFCTL_IPV6_EX_DIS;
wr32(IGC_RFCTL, rfctl);
if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
return;
/* Disable all Rx queues */
for (i = 0; i < 4; i++) {
rxdctl[i] = rd32(IGC_RXDCTL(i));
wr32(IGC_RXDCTL(i),
rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(IGC_RXDCTL(i));
if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
break;
}
if (ms_wait == 10)
pr_debug("Queue disable timed out after 10ms\n");
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
rlpml = rd32(IGC_RLPML);
wr32(IGC_RLPML, 0);
rctl = rd32(IGC_RCTL);
temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
temp_rctl |= IGC_RCTL_LPE;
wr32(IGC_RCTL, temp_rctl);
wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
wrfl();
usleep_range(2000, 3000);
/* Enable Rx queues that were previously enabled and restore our
* previous state
*/
for (i = 0; i < 4; i++)
wr32(IGC_RXDCTL(i), rxdctl[i]);
wr32(IGC_RCTL, rctl);
wrfl();
wr32(IGC_RLPML, rlpml);
wr32(IGC_RFCTL, rfctl);
/* Flush receive errors generated by workaround */
rd32(IGC_ROC);
rd32(IGC_RNBC);
rd32(IGC_MPC);
}
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
.check_for_link = igc_check_for_link_base,
.rar_set = igc_rar_set,
.read_mac_addr = igc_read_mac_addr_base,
.get_speed_and_duplex = igc_get_link_up_info_base,
};
static const struct igc_phy_operations igc_phy_ops_base = {
.acquire = igc_acquire_phy_base,
.release = igc_release_phy_base,
.reset = igc_phy_hw_reset,
.read_reg = igc_read_phy_reg_gpy,
.write_reg = igc_write_phy_reg_gpy,
};
const struct igc_info igc_base_info = {
.get_invariants = igc_get_invariants_base,
.mac_ops = &igc_mac_ops_base,
.phy_ops = &igc_phy_ops_base,
};
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_BASE_H
#define _IGC_BASE_H
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
void igc_power_down_phy_copper_base(struct igc_hw *hw);
/* Transmit Descriptor - Advanced */
union igc_adv_tx_desc {
struct {
__le64 buffer_addr; /* Address of descriptor's data buf */
__le32 cmd_type_len;
__le32 olinfo_status;
} read;
struct {
__le64 rsvd; /* Reserved */
__le32 nxtseq_seed;
__le32 status;
} wb;
};
/* Adv Transmit Descriptor Config Masks */
#define IGC_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
#define IGC_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define IGC_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define IGC_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
#define IGC_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IGC_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
#define IGC_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
#define IGC_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
#define IGC_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IGC_RAR_ENTRIES 16
struct igc_adv_data_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
union {
u32 data;
struct {
u32 datalen:16; /* Data buffer length */
u32 rsvd:4;
u32 dtyp:4; /* Descriptor type */
u32 dcmd:8; /* Descriptor command */
} config;
} lower;
union {
u32 data;
struct {
u32 status:4; /* Descriptor status */
u32 idx:4;
u32 popts:6; /* Packet Options */
u32 paylen:18; /* Payload length */
} options;
} upper;
};
/* Receive Descriptor - Advanced */
union igc_adv_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
__le32 data;
struct {
__le16 pkt_info; /*RSS type, Pkt type*/
/* Split Header, header buffer len */
__le16 hdr_info;
} hs_rss;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
struct {
__le16 ip_id; /* IP id */
__le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
__le32 status_error; /* ext status/error */
__le16 length; /* Packet length */
__le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
/* Adv Transmit Descriptor Config Masks */
#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
/* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#endif /* _IGC_BASE_H */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_DEFINES_H_
#define _IGC_DEFINES_H_
#define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
/* PCI Bus Info */
#define PCIE_DEVICE_CONTROL2 0x28
#define PCIE_DEVICE_CONTROL2_16ms 0x0005
/* Physical Func Reset Done Indication */
#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
#define MASTER_DISABLE_TIMEOUT 800
/*Blocks new Master requests */
#define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004
/* Status of Master requests. */
#define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000
/* PCI Express Control */
#define IGC_GCR_CMPL_TMOUT_MASK 0x0000F000
#define IGC_GCR_CMPL_TMOUT_10ms 0x00001000
#define IGC_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IGC_GCR_CAP_VER2 0x00040000
/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */
#define IGC_RAH_POOL_1 0x00040000
#define IGC_RAL_MAC_ADDR_LEN 4
#define IGC_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define IGC_SUCCESS 0
#define IGC_ERR_NVM 1
#define IGC_ERR_PHY 2
#define IGC_ERR_CONFIG 3
#define IGC_ERR_PARAM 4
#define IGC_ERR_MAC_INIT 5
#define IGC_ERR_RESET 9
#define IGC_ERR_MASTER_REQUESTS_PENDING 10
#define IGC_ERR_BLK_PHY_RESET 12
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
#define IGC_CTRL_RST 0x04000000 /* Global reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
#define IGC_CONNSW_AUTOSENSE_EN 0x1
/* PBA constants */
#define IGC_PBA_34K 0x0022
/* SW Semaphore Register */
#define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
/* SWFW_SYNC Definitions */
#define IGC_SWFW_EEP_SM 0x1
#define IGC_SWFW_PHY0_SM 0x2
/* Autoneg Advertisement Register */
#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
/* Link Partner Ability Register (Base Page) */
#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* 1000BASE-T Control Register */
#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
/* 1000BASE-T Status Register */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
#define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */
#define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */
#define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */
/* NVM Control */
/* Number of milliseconds for NVM auto read done after MAC reset. */
#define AUTO_READ_DONE_TIMEOUT 10
#define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define IGC_EECD_REQ 0x00000040 /* NVM Access Request */
#define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */
/* NVM Addressing bits based on type 0=small, 1=large */
#define IGC_EECD_ADDR_BITS 0x00000400
#define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define IGC_EECD_SIZE_EX_SHIFT 11
#define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */
#define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done*/
#define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */
#define IGC_FLUDONE_ATTEMPTS 20000
#define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
/* Offset to data in NVM read/write registers */
#define IGC_NVM_RW_REG_DATA 16
#define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
#define IGC_NVM_RW_REG_START 1 /* Start operation */
#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
/* NVM Word Offsets */
#define NVM_CHECKSUM_REG 0x003F
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
#define NVM_RESERVED_WORD 0xFFFF
#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_WORD_SIZE_BASE_SHIFT 6
/* Collision related configuration parameters */
#define IGC_COLLISION_THRESHOLD 15
#define IGC_CT_SHIFT 4
#define IGC_COLLISION_DISTANCE 63
#define IGC_COLD_SHIFT 12
/* Device Status */
#define IGC_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define IGC_STATUS_FUNC_SHIFT 2
#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
/* 1Gbps and 2.5Gbps half duplex is not supported, nor spec-compliant. */
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
#define ADVERTISE_1000_FULL 0x0020
#define ADVERTISE_2500_HALF 0x0040 /* Not used, just FYI */
#define ADVERTISE_2500_FULL 0x0080
#define IGC_ALL_SPEED_DUPLEX_2500 ( \
ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500
/* Interrupt Cause Read */
#define IGC_ICR_TXDW BIT(0) /* Transmit desc written back */
#define IGC_ICR_TXQE BIT(1) /* Transmit Queue empty */
#define IGC_ICR_LSC BIT(2) /* Link Status Change */
#define IGC_ICR_RXSEQ BIT(3) /* Rx sequence error */
#define IGC_ICR_RXDMT0 BIT(4) /* Rx desc min. threshold (0) */
#define IGC_ICR_RXO BIT(6) /* Rx overrun */
#define IGC_ICR_RXT0 BIT(7) /* Rx timer intr (ring 0) */
#define IGC_ICR_DRSTA BIT(30) /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
#define IGC_ICR_INT_ASSERTED BIT(31)
#define IGC_ICS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IMS_ENABLE_MASK ( \
IGC_IMS_RXT0 | \
IGC_IMS_TXDW | \
IGC_IMS_RXDMT0 | \
IGC_IMS_RXSEQ | \
IGC_IMS_LSC)
/* Interrupt Mask Set */
#define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */
#define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */
#define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */
#define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */
#define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */
#define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */
#define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */
#define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */
#define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */
/* Interrupt Cause Set */
#define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */
#define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* rx desc min. threshold */
#define IGC_ICS_DRSTA IGC_ICR_DRSTA /* Device Reset Aserted */
#define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
#define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
#define IGC_IVAR_VALID 0x80
#define IGC_GPIE_NSICR 0x00000001
#define IGC_GPIE_MSIX_MODE 0x00000010
#define IGC_GPIE_EIAME 0x40000000
#define IGC_GPIE_PBA 0x80000000
/* Transmit Descriptor bit definitions */
#define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */
#define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */
#define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
#define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */
#define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IGC_TXD_CMD_RS 0x08000000 /* Report Status */
#define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
#define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
#define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
#define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define IGC_TXD_STAT_EC 0x00000002 /* Excess Collisions */
#define IGC_TXD_STAT_LC 0x00000004 /* Late Collisions */
#define IGC_TXD_STAT_TU 0x00000008 /* Transmit underrun */
#define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */
#define IGC_TXD_CMD_IP 0x02000000 /* IP packet */
#define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define IGC_TXD_STAT_TC 0x00000004 /* Tx Underrun */
#define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
#define IGC_CT_SHIFT 4
#define IGC_COLLISION_THRESHOLD 15
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
/* Enable XON frame transmission */
#define IGC_FCRTL_XONE 0x80000000
/* Management Control */
#define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Receive Control */
#define IGC_RCTL_RST 0x00000001 /* Software reset */
#define IGC_RCTL_EN 0x00000002 /* enable */
#define IGC_RCTL_SBP 0x00000004 /* store bad packet */
#define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */
#define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */
#define IGC_RCTL_LPE 0x00000020 /* long packet enable */
#define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define IGC_RCTL_BAM 0x00008000 /* broadcast enable */
/* Receive Descriptor bit definitions */
#define IGC_RXD_STAT_EOP 0x02 /* End of Packet */
#define IGC_RXDEXT_STATERR_CE 0x01000000
#define IGC_RXDEXT_STATERR_SE 0x02000000
#define IGC_RXDEXT_STATERR_SEQ 0x04000000
#define IGC_RXDEXT_STATERR_CXE 0x10000000
#define IGC_RXDEXT_STATERR_TCPE 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
/* Same mask, but for extended and packet split descriptors */
#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
IGC_RXDEXT_STATERR_CE | \
IGC_RXDEXT_STATERR_SE | \
IGC_RXDEXT_STATERR_SEQ | \
IGC_RXDEXT_STATERR_CXE | \
IGC_RXDEXT_STATERR_RXE)
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
#define IGC_RFCTL_LEF 0x00040000
#define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
#define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define IGC_RCTL_DPF 0x00400000 /* discard pause frames */
#define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
/* GPY211 - I225 defines */
#define GPY_MMD_MASK 0xFFFF0000
#define GPY_MMD_SHIFT 16
#define GPY_REG_MASK 0x0000FFFF
#define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
/* MAC definitions */
#define IGC_FACTPS_MNGCG 0x20000000
#define IGC_FWSM_MODE_MASK 0xE
#define IGC_FWSM_MODE_SHIFT 1
/* Management Control */
#define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
/* PHY */
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define IGC_GEN_POLL_TIMEOUT 1920
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
#define PHY_CONTROL 0x00 /* Control Register */
#define PHY_STATUS 0x01 /* Status Register */
#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
/* Bit definitions for valid PHY IDs. I = Integrated E = External */
#define I225_I_PHY_ID 0x67C9DC00
/* MDI Control */
#define IGC_MDIC_DATA_MASK 0x0000FFFF
#define IGC_MDIC_REG_MASK 0x001F0000
#define IGC_MDIC_REG_SHIFT 16
#define IGC_MDIC_PHY_MASK 0x03E00000
#define IGC_MDIC_PHY_SHIFT 21
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_MDIC_DEST 0x80000000
#define IGC_N0_QUEUE -1
#endif /* _IGC_DEFINES_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_HW_H_
#define _IGC_HW_H_
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include "igc_regs.h"
#include "igc_defines.h"
#include "igc_mac.h"
#include "igc_phy.h"
#include "igc_nvm.h"
#include "igc_i225.h"
#include "igc_base.h"
#define IGC_DEV_ID_I225_LM 0x15F2
#define IGC_DEV_ID_I225_V 0x15F3
#define IGC_FUNC_0 0
/* Function pointers for the MAC. */
struct igc_mac_operations {
s32 (*check_for_link)(struct igc_hw *hw);
s32 (*reset_hw)(struct igc_hw *hw);
s32 (*init_hw)(struct igc_hw *hw);
s32 (*setup_physical_interface)(struct igc_hw *hw);
void (*rar_set)(struct igc_hw *hw, u8 *address, u32 index);
s32 (*read_mac_addr)(struct igc_hw *hw);
s32 (*get_speed_and_duplex)(struct igc_hw *hw, u16 *speed,
u16 *duplex);
s32 (*acquire_swfw_sync)(struct igc_hw *hw, u16 mask);
void (*release_swfw_sync)(struct igc_hw *hw, u16 mask);
};
enum igc_mac_type {
igc_undefined = 0,
igc_i225,
igc_num_macs /* List is 1-based, so subtract 1 for true count. */
};
enum igc_phy_type {
igc_phy_unknown = 0,
igc_phy_none,
igc_phy_i225,
};
enum igc_media_type {
igc_media_type_unknown = 0,
igc_media_type_copper = 1,
igc_num_media_types
};
enum igc_nvm_type {
igc_nvm_unknown = 0,
igc_nvm_flash_hw,
igc_nvm_invm,
};
struct igc_info {
s32 (*get_invariants)(struct igc_hw *hw);
struct igc_mac_operations *mac_ops;
const struct igc_phy_operations *phy_ops;
struct igc_nvm_operations *nvm_ops;
};
extern const struct igc_info igc_base_info;
struct igc_mac_info {
struct igc_mac_operations ops;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
enum igc_mac_type type;
u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
u32 tx_packet_delta;
u32 txcw;
u16 mta_reg_count;
u16 uta_reg_count;
u16 rar_entry_count;
u8 forced_speed_duplex;
bool adaptive_ifs;
bool has_fwsm;
bool asf_firmware_present;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
bool get_link_status;
};
struct igc_nvm_operations {
s32 (*acquire)(struct igc_hw *hw);
s32 (*read)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
void (*release)(struct igc_hw *hw);
s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
s32 (*update)(struct igc_hw *hw);
s32 (*validate)(struct igc_hw *hw);
s32 (*valid_led_default)(struct igc_hw *hw, u16 *data);
};
struct igc_phy_operations {
s32 (*acquire)(struct igc_hw *hw);
s32 (*check_polarity)(struct igc_hw *hw);
s32 (*check_reset_block)(struct igc_hw *hw);
s32 (*force_speed_duplex)(struct igc_hw *hw);
s32 (*get_cfg_done)(struct igc_hw *hw);
s32 (*get_cable_length)(struct igc_hw *hw);
s32 (*get_phy_info)(struct igc_hw *hw);
s32 (*read_reg)(struct igc_hw *hw, u32 address, u16 *data);
void (*release)(struct igc_hw *hw);
s32 (*reset)(struct igc_hw *hw);
s32 (*write_reg)(struct igc_hw *hw, u32 address, u16 data);
};
struct igc_nvm_info {
struct igc_nvm_operations ops;
enum igc_nvm_type type;
u32 flash_bank_size;
u32 flash_base_addr;
u16 word_size;
u16 delay_usec;
u16 address_bits;
u16 opcode_bits;
u16 page_size;
};
struct igc_phy_info {
struct igc_phy_operations ops;
enum igc_phy_type type;
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
u32 revision;
enum igc_media_type media_type;
u16 autoneg_advertised;
u16 autoneg_mask;
u16 cable_length;
u16 max_cable_length;
u16 min_cable_length;
u16 pair_length[4];
u8 mdix;
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
struct igc_bus_info {
u16 func;
u16 pci_cmd_word;
};
enum igc_fc_mode {
igc_fc_none = 0,
igc_fc_rx_pause,
igc_fc_tx_pause,
igc_fc_full,
igc_fc_default = 0xFF
};
struct igc_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum igc_fc_mode current_mode; /* Type of flow control */
enum igc_fc_mode requested_mode;
};
struct igc_dev_spec_base {
bool global_device_reset;
bool eee_disable;
bool clear_semaphore_once;
bool module_plugged;
u8 media_port;
bool mas_capable;
};
struct igc_hw {
void *back;
u8 __iomem *hw_addr;
unsigned long io_base;
struct igc_mac_info mac;
struct igc_fc_info fc;
struct igc_nvm_info nvm;
struct igc_phy_info phy;
struct igc_bus_info bus;
union {
struct igc_dev_spec_base _base;
} dev_spec;
u16 device_id;
u16 subsystem_vendor_id;
u16 subsystem_device_id;
u16 vendor_id;
u8 revision_id;
};
/* Statistics counters collected by the MAC */
struct igc_hw_stats {
u64 crcerrs;
u64 algnerrc;
u64 symerrs;
u64 rxerrc;
u64 mpc;
u64 scc;
u64 ecol;
u64 mcc;
u64 latecol;
u64 colc;
u64 dc;
u64 tncrs;
u64 sec;
u64 cexterr;
u64 rlec;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 fcruc;
u64 prc64;
u64 prc127;
u64 prc255;
u64 prc511;
u64 prc1023;
u64 prc1522;
u64 gprc;
u64 bprc;
u64 mprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 rnbc;
u64 ruc;
u64 rfc;
u64 roc;
u64 rjc;
u64 mgprc;
u64 mgpdc;
u64 mgptc;
u64 tor;
u64 tot;
u64 tpr;
u64 tpt;
u64 ptc64;
u64 ptc127;
u64 ptc255;
u64 ptc511;
u64 ptc1023;
u64 ptc1522;
u64 mptc;
u64 bptc;
u64 tsctc;
u64 tsctfc;
u64 iac;
u64 icrxptc;
u64 icrxatc;
u64 ictxptc;
u64 ictxatc;
u64 ictxqec;
u64 ictxqmtc;
u64 icrxdmtc;
u64 icrxoc;
u64 cbtmpc;
u64 htdpmc;
u64 cbrdpc;
u64 cbrmpc;
u64 rpthc;
u64 hgptc;
u64 htcbdpc;
u64 hgorc;
u64 hgotc;
u64 lenerrs;
u64 scvpc;
u64 hrmpc;
u64 doosync;
u64 o2bgptc;
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
};
struct net_device *igc_get_hw_dev(struct igc_hw *hw);
#define hw_dbg(format, arg...) \
netdev_dbg(igc_get_hw_dev(hw), format, ##arg)
s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value);
void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value);
#endif /* _IGC_HW_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/delay.h>
#include "igc_hw.h"
/**
* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -IGC_ERR_NVM (-1).
*/
static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
{
return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
}
/**
* igc_release_nvm_i225 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
*/
static void igc_release_nvm_i225(struct igc_hw *hw)
{
igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
}
/**
* igc_get_hw_semaphore_i225 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
*/
static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
{
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
u32 swsm;
/* Get the SW semaphore */
while (i < timeout) {
swsm = rd32(IGC_SWSM);
if (!(swsm & IGC_SWSM_SMBI))
break;
usleep_range(500, 600);
i++;
}
if (i == timeout) {
/* In rare circumstances, the SW semaphore may already be held
* unintentionally. Clear the semaphore once before giving up.
*/
if (hw->dev_spec._base.clear_semaphore_once) {
hw->dev_spec._base.clear_semaphore_once = false;
igc_put_hw_semaphore(hw);
for (i = 0; i < timeout; i++) {
swsm = rd32(IGC_SWSM);
if (!(swsm & IGC_SWSM_SMBI))
break;
usleep_range(500, 600);
}
}
/* If we do not have the semaphore here, we have to give up. */
if (i == timeout) {
hw_dbg("Driver can't access device - SMBI bit is set.\n");
return -IGC_ERR_NVM;
}
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = rd32(IGC_SWSM);
wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
break;
usleep_range(500, 600);
}
if (i == timeout) {
/* Release semaphores */
igc_put_hw_semaphore(hw);
hw_dbg("Driver can't access the NVM\n");
return -IGC_ERR_NVM;
}
return 0;
}
/**
* igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
*/
s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
s32 i = 0, timeout = 200;
u32 fwmask = mask << 16;
u32 swmask = mask;
s32 ret_val = 0;
u32 swfw_sync;
while (i < timeout) {
if (igc_get_hw_semaphore_i225(hw)) {
ret_val = -IGC_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = rd32(IGC_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/* Firmware currently using resource (fwmask) */
igc_put_hw_semaphore(hw);
mdelay(5);
i++;
}
if (i == timeout) {
hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -IGC_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
wr32(IGC_SW_FW_SYNC, swfw_sync);
igc_put_hw_semaphore(hw);
out:
return ret_val;
}
/**
* igc_release_swfw_sync_i225 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
*/
void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
while (igc_get_hw_semaphore_i225(hw))
; /* Empty */
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
wr32(IGC_SW_FW_SYNC, swfw_sync);
igc_put_hw_semaphore(hw);
}
/**
* igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
* @words: number of words to read
* @data: word read from the Shadow Ram
*
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
*/
static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to read in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
IGC_EERD_EEWR_MAX_COUNT : (words - i);
status = hw->nvm.ops.acquire(hw);
if (status)
break;
status = igc_read_nvm_eerd(hw, offset, count, data + i);
hw->nvm.ops.release(hw);
if (status)
break;
}
return status;
}
/**
* igc_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow Ram
*
* Writes data to Shadow Ram at offset using EEWR register.
*
* If igc_update_nvm_checksum is not called after this function , the
* Shadow Ram will most likely contain an invalid checksum.
*/
static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
u32 attempts = 100000;
u32 i, k, eewr = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -IGC_ERR_NVM;
goto out;
}
for (i = 0; i < words; i++) {
eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
(data[i] << IGC_NVM_RW_REG_DATA) |
IGC_NVM_RW_REG_START;
wr32(IGC_SRWR, eewr);
for (k = 0; k < attempts; k++) {
if (IGC_NVM_RW_REG_DONE &
rd32(IGC_SRWR)) {
ret_val = 0;
break;
}
udelay(5);
}
if (ret_val) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
}
out:
return ret_val;
}
/**
* igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow RAM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the Shadow RAM
*
* Writes data to Shadow RAM at offset using EEWR register.
*
* If igc_update_nvm_checksum is not called after this function , the
* data will not be committed to FLASH and also Shadow RAM will most likely
* contain an invalid checksum.
*
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
*/
static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
* to write in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
IGC_EERD_EEWR_MAX_COUNT : (words - i);
status = hw->nvm.ops.acquire(hw);
if (status)
break;
status = igc_write_nvm_srwr(hw, offset, count, data + i);
hw->nvm.ops.release(hw);
if (status)
break;
}
return status;
}
/**
* igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
*/
static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
{
s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
u16 *data);
s32 status = 0;
status = hw->nvm.ops.acquire(hw);
if (status)
goto out;
/* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
* We have semaphore taken already here.
*/
read_op_ptr = hw->nvm.ops.read;
hw->nvm.ops.read = igc_read_nvm_eerd;
status = igc_validate_nvm_checksum(hw);
/* Revert original read operation. */
hw->nvm.ops.read = read_op_ptr;
hw->nvm.ops.release(hw);
out:
return status;
}
/**
* igc_pool_flash_update_done_i225 - Pool FLUDONE status
* @hw: pointer to the HW structure
*/
static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
{
s32 ret_val = -IGC_ERR_NVM;
u32 i, reg;
for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
reg = rd32(IGC_EECD);
if (reg & IGC_EECD_FLUDONE_I225) {
ret_val = 0;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igc_update_flash_i225 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*/
static s32 igc_update_flash_i225(struct igc_hw *hw)
{
s32 ret_val = 0;
u32 flup;
ret_val = igc_pool_flash_update_done_i225(hw);
if (ret_val == -IGC_ERR_NVM) {
hw_dbg("Flash update time out\n");
goto out;
}
flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
wr32(IGC_EECD, flup);
ret_val = igc_pool_flash_update_done_i225(hw);
if (ret_val)
hw_dbg("Flash update time out\n");
else
hw_dbg("Flash update complete\n");
out:
return ret_val;
}
/**
* igc_update_nvm_checksum_i225 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
*/
static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
{
u16 checksum = 0;
s32 ret_val = 0;
u16 i, nvm_data;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
if (ret_val) {
hw_dbg("EEPROM read failed\n");
goto out;
}
ret_val = hw->nvm.ops.acquire(hw);
if (ret_val)
goto out;
/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
*/
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16)NVM_SUM - checksum;
ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
}
hw->nvm.ops.release(hw);
ret_val = igc_update_flash_i225(hw);
out:
return ret_val;
}
/**
* igc_get_flash_presence_i225 - Check if flash device is detected
* @hw: pointer to the HW structure
*/
bool igc_get_flash_presence_i225(struct igc_hw *hw)
{
bool ret_val = false;
u32 eec = 0;
eec = rd32(IGC_EECD);
if (eec & IGC_EECD_FLASH_DETECTED_I225)
ret_val = true;
return ret_val;
}
/**
* igc_init_nvm_params_i225 - Init NVM func ptrs.
* @hw: pointer to the HW structure
*/
s32 igc_init_nvm_params_i225(struct igc_hw *hw)
{
struct igc_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igc_acquire_nvm_i225;
nvm->ops.release = igc_release_nvm_i225;
/* NVM Function Pointers */
if (igc_get_flash_presence_i225(hw)) {
hw->nvm.type = igc_nvm_flash_hw;
nvm->ops.read = igc_read_nvm_srrd_i225;
nvm->ops.write = igc_write_nvm_srwr_i225;
nvm->ops.validate = igc_validate_nvm_checksum_i225;
nvm->ops.update = igc_update_nvm_checksum_i225;
} else {
hw->nvm.type = igc_nvm_invm;
nvm->ops.read = igc_read_nvm_eerd;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_I225_H_
#define _IGC_I225_H_
s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask);
void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
s32 igc_init_nvm_params_i225(struct igc_hw *hw);
bool igc_get_flash_presence_i225(struct igc_hw *hw);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include <linux/pci.h>
#include <linux/delay.h>
#include "igc_mac.h"
#include "igc_hw.h"
/* forward declaration */
static s32 igc_set_default_fc(struct igc_hw *hw);
static s32 igc_set_fc_watermarks(struct igc_hw *hw);
/**
* igc_disable_pcie_master - Disables PCI-express master access
* @hw: pointer to the HW structure
*
* Returns 0 (0) if successful, else returns -10
* (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
* the master requests to be disabled.
*
* Disables PCI-Express master access and verifies there are no pending
* requests.
*/
s32 igc_disable_pcie_master(struct igc_hw *hw)
{
s32 timeout = MASTER_DISABLE_TIMEOUT;
s32 ret_val = 0;
u32 ctrl;
ctrl = rd32(IGC_CTRL);
ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
wr32(IGC_CTRL, ctrl);
while (timeout) {
if (!(rd32(IGC_STATUS) &
IGC_STATUS_GIO_MASTER_ENABLE))
break;
usleep_range(2000, 3000);
timeout--;
}
if (!timeout) {
hw_dbg("Master requests are pending.\n");
ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
goto out;
}
out:
return ret_val;
}
/**
* igc_init_rx_addrs - Initialize receive addresses
* @hw: pointer to the HW structure
* @rar_count: receive address registers
*
* Setup the receive address registers by setting the base receive address
* register to the devices MAC address and clearing all the other receive
* address registers to 0.
*/
void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
{
u8 mac_addr[ETH_ALEN] = {0};
u32 i;
/* Setup the receive address */
hw_dbg("Programming MAC Address into RAR[0]\n");
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
for (i = 1; i < rar_count; i++)
hw->mac.ops.rar_set(hw, mac_addr, i);
}
/**
* igc_setup_link - Setup flow control and link settings
* @hw: pointer to the HW structure
*
* Determines which flow control settings to use, then configures flow
* control. Calls the appropriate media-specific link configuration
* function. Assuming the adapter has a valid link partner, a valid link
* should be established. Assumes the hardware has previously been reset
* and the transmitter and receiver are not enabled.
*/
s32 igc_setup_link(struct igc_hw *hw)
{
s32 ret_val = 0;
/* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
if (igc_check_reset_block(hw))
goto out;
/* If requested flow control is set to default, set flow control
* based on the EEPROM flow control settings.
*/
if (hw->fc.requested_mode == igc_fc_default) {
ret_val = igc_set_default_fc(hw);
if (ret_val)
goto out;
}
/* We want to save off the original Flow Control configuration just
* in case we get disconnected and then reconnected into a different
* hub or switch with different Flow Control capabilities.
*/
hw->fc.current_mode = hw->fc.requested_mode;
hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
/* Call the necessary media_type subroutine to configure the link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
if (ret_val)
goto out;
/* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
hw_dbg("Initializing the Flow Control address, type and timer regs\n");
wr32(IGC_FCT, FLOW_CONTROL_TYPE);
wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
wr32(IGC_FCTTV, hw->fc.pause_time);
ret_val = igc_set_fc_watermarks(hw);
out:
return ret_val;
}
/**
* igc_set_default_fc - Set flow control default values
* @hw: pointer to the HW structure
*
* Read the EEPROM for the default values for flow control and store the
* values.
*/
static s32 igc_set_default_fc(struct igc_hw *hw)
{
hw->fc.requested_mode = igc_fc_full;
return 0;
}
/**
* igc_force_mac_fc - Force the MAC's flow control settings
* @hw: pointer to the HW structure
*
* Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
* device control register to reflect the adapter settings. TFCE and RFCE
* need to be explicitly set by software when a copper PHY is used because
* autonegotiation is managed by the PHY rather than the MAC. Software must
* also configure these bits when link is forced on a fiber connection.
*/
s32 igc_force_mac_fc(struct igc_hw *hw)
{
s32 ret_val = 0;
u32 ctrl;
ctrl = rd32(IGC_CTRL);
/* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
*
* The "Case" statement below enables/disable flow control
* according to the "hw->fc.current_mode" parameter.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* frames but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
switch (hw->fc.current_mode) {
case igc_fc_none:
ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
break;
case igc_fc_rx_pause:
ctrl &= (~IGC_CTRL_TFCE);
ctrl |= IGC_CTRL_RFCE;
break;
case igc_fc_tx_pause:
ctrl &= (~IGC_CTRL_RFCE);
ctrl |= IGC_CTRL_TFCE;
break;
case igc_fc_full:
ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
break;
default:
hw_dbg("Flow control param set incorrectly\n");
ret_val = -IGC_ERR_CONFIG;
goto out;
}
wr32(IGC_CTRL, ctrl);
out:
return ret_val;
}
/**
* igc_set_fc_watermarks - Set flow control high/low watermarks
* @hw: pointer to the HW structure
*
* Sets the flow control high/low threshold (watermark) registers. If
* flow control XON frame transmission is enabled, then set XON frame
* transmission as well.
*/
static s32 igc_set_fc_watermarks(struct igc_hw *hw)
{
u32 fcrtl = 0, fcrth = 0;
/* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (hw->fc.current_mode & igc_fc_tx_pause) {
/* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
fcrtl = hw->fc.low_water;
if (hw->fc.send_xon)
fcrtl |= IGC_FCRTL_XONE;
fcrth = hw->fc.high_water;
}
wr32(IGC_FCRTL, fcrtl);
wr32(IGC_FCRTH, fcrth);
return 0;
}
/**
* igc_clear_hw_cntrs_base - Clear base hardware counters
* @hw: pointer to the HW structure
*
* Clears the base hardware counters by reading the counter registers.
*/
void igc_clear_hw_cntrs_base(struct igc_hw *hw)
{
rd32(IGC_CRCERRS);
rd32(IGC_SYMERRS);
rd32(IGC_MPC);
rd32(IGC_SCC);
rd32(IGC_ECOL);
rd32(IGC_MCC);
rd32(IGC_LATECOL);
rd32(IGC_COLC);
rd32(IGC_DC);
rd32(IGC_SEC);
rd32(IGC_RLEC);
rd32(IGC_XONRXC);
rd32(IGC_XONTXC);
rd32(IGC_XOFFRXC);
rd32(IGC_XOFFTXC);
rd32(IGC_FCRUC);
rd32(IGC_GPRC);
rd32(IGC_BPRC);
rd32(IGC_MPRC);
rd32(IGC_GPTC);
rd32(IGC_GORCL);
rd32(IGC_GORCH);
rd32(IGC_GOTCL);
rd32(IGC_GOTCH);
rd32(IGC_RNBC);
rd32(IGC_RUC);
rd32(IGC_RFC);
rd32(IGC_ROC);
rd32(IGC_RJC);
rd32(IGC_TORL);
rd32(IGC_TORH);
rd32(IGC_TOTL);
rd32(IGC_TOTH);
rd32(IGC_TPR);
rd32(IGC_TPT);
rd32(IGC_MPTC);
rd32(IGC_BPTC);
rd32(IGC_PRC64);
rd32(IGC_PRC127);
rd32(IGC_PRC255);
rd32(IGC_PRC511);
rd32(IGC_PRC1023);
rd32(IGC_PRC1522);
rd32(IGC_PTC64);
rd32(IGC_PTC127);
rd32(IGC_PTC255);
rd32(IGC_PTC511);
rd32(IGC_PTC1023);
rd32(IGC_PTC1522);
rd32(IGC_ALGNERRC);
rd32(IGC_RXERRC);
rd32(IGC_TNCRS);
rd32(IGC_CEXTERR);
rd32(IGC_TSCTC);
rd32(IGC_TSCTFC);
rd32(IGC_MGTPRC);
rd32(IGC_MGTPDC);
rd32(IGC_MGTPTC);
rd32(IGC_IAC);
rd32(IGC_ICRXOC);
rd32(IGC_ICRXPTC);
rd32(IGC_ICRXATC);
rd32(IGC_ICTXPTC);
rd32(IGC_ICTXATC);
rd32(IGC_ICTXQEC);
rd32(IGC_ICTXQMTC);
rd32(IGC_ICRXDMTC);
rd32(IGC_CBTMPC);
rd32(IGC_HTDPMC);
rd32(IGC_CBRMPC);
rd32(IGC_RPTHC);
rd32(IGC_HGPTC);
rd32(IGC_HTCBDPC);
rd32(IGC_HGORCL);
rd32(IGC_HGORCH);
rd32(IGC_HGOTCL);
rd32(IGC_HGOTCH);
rd32(IGC_LENERRS);
}
/**
* igc_rar_set - Set receive address register
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index: receive address array register
*
* Sets the receive address array register at index to the address passed
* in by addr.
*/
void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
((u32)addr[1] << 8) |
((u32)addr[2] << 16) | ((u32)addr[3] << 24));
rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
/* If MAC address zero, no need to set the AV bit */
if (rar_low || rar_high)
rar_high |= IGC_RAH_AV;
/* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
wr32(IGC_RAL(index), rar_low);
wrfl();
wr32(IGC_RAH(index), rar_high);
wrfl();
}
/**
* igc_check_for_copper_link - Check for link (Copper)
* @hw: pointer to the HW structure
*
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
*/
s32 igc_check_for_copper_link(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
s32 ret_val;
bool link;
/* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status) {
ret_val = 0;
goto out;
}
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
ret_val = igc_phy_has_link(hw, 1, 0, &link);
if (ret_val)
goto out;
if (!link)
goto out; /* No link detected */
mac->get_link_status = false;
/* Check if there was DownShift, must be checked
* immediately after link-up
*/
igc_check_downshift(hw);
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg) {
ret_val = -IGC_ERR_CONFIG;
goto out;
}
/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
igc_config_collision_dist(hw);
/* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
*/
ret_val = igc_config_fc_after_link_up(hw);
if (ret_val)
hw_dbg("Error configuring flow control\n");
out:
return ret_val;
}
/**
* igc_config_collision_dist - Configure collision distance
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
* during link setup. Currently no func pointer exists and all
* implementations are handled in the generic version of this function.
*/
void igc_config_collision_dist(struct igc_hw *hw)
{
u32 tctl;
tctl = rd32(IGC_TCTL);
tctl &= ~IGC_TCTL_COLD;
tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
wr32(IGC_TCTL, tctl);
wrfl();
}
/**
* igc_config_fc_after_link_up - Configures flow control after link
* @hw: pointer to the HW structure
*
* Checks the status of auto-negotiation after link up to ensure that the
* speed and duplex were not forced. If the link needed to be forced, then
* flow control needs to be forced also. If auto-negotiation is enabled
* and did not fail, then we configure flow control based on our link
* partner.
*/
s32 igc_config_fc_after_link_up(struct igc_hw *hw)
{
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
struct igc_mac_info *mac = &hw->mac;
u16 speed, duplex;
s32 ret_val = 0;
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
if (mac->autoneg_failed) {
if (hw->phy.media_type == igc_media_type_copper)
ret_val = igc_force_mac_fc(hw);
}
if (ret_val) {
hw_dbg("Error forcing flow control settings\n");
goto out;
}
/* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) {
/* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
&mii_status_reg);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
&mii_status_reg);
if (ret_val)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
* flow control was negotiated.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
&mii_nway_lp_ability_reg);
if (ret_val)
goto out;
/* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
* table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
* 1999, describes these PAUSE resolution bits and how flow
* control is determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
*-------|---------|-------|---------|--------------------
* 0 | 0 | DC | DC | igc_fc_none
* 0 | 1 | 0 | DC | igc_fc_none
* 0 | 1 | 1 | 0 | igc_fc_none
* 0 | 1 | 1 | 1 | igc_fc_tx_pause
* 1 | 0 | 0 | DC | igc_fc_none
* 1 | DC | 1 | DC | igc_fc_full
* 1 | 1 | 0 | 0 | igc_fc_none
* 1 | 1 | 0 | 1 | igc_fc_rx_pause
*
* Are both PAUSE bits set to 1? If so, this implies
* Symmetric Flow Control is enabled at both ends. The
* ASM_DIR bits are irrelevant per the spec.
*
* For Symmetric Flow Control:
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 1 | DC | 1 | DC | IGC_fc_full
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
/* Now we need to check if the user selected RX ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise RX
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
if (hw->fc.requested_mode == igc_fc_full) {
hw->fc.current_mode = igc_fc_full;
hw_dbg("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | igc_fc_tx_pause
*/
else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = igc_fc_tx_pause;
hw_dbg("Flow Control = TX PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
*-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | igc_fc_rx_pause
*/
else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
/* Per the IEEE spec, at this point flow control should be
* disabled. However, we want to consider that we could
* be connected to a legacy switch that doesn't advertise
* desired flow control, but can be forced on the link
* partner. So if we advertised no flow control, that is
* what we will resolve to. If we advertised some kind of
* receive capability (Rx Pause Only or Full Flow Control)
* and the link partner advertised none, we will configure
* ourselves to enable Rx Flow Control only. We can do
* this safely for two reasons: If the link partner really
* didn't want flow control enabled, and we enable Rx, no
* harm done since we won't be receiving any PAUSE frames
* anyway. If the intent on the link partner was to have
* flow control enabled, then by us enabling RX only, we
* can at least receive pause frames and process them.
* This is a good idea because in most cases, since we are
* predominantly a server NIC, more times than not we will
* be asked to delay transmission of packets than asking
* our link partner to pause transmission of frames.
*/
else if ((hw->fc.requested_mode == igc_fc_none) ||
(hw->fc.requested_mode == igc_fc_tx_pause) ||
(hw->fc.strict_ieee)) {
hw->fc.current_mode = igc_fc_none;
hw_dbg("Flow Control = NONE.\n");
} else {
hw->fc.current_mode = igc_fc_rx_pause;
hw_dbg("Flow Control = RX PAUSE frames only.\n");
}
/* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
if (ret_val) {
hw_dbg("Error getting link speed and duplex\n");
goto out;
}
if (duplex == HALF_DUPLEX)
hw->fc.current_mode = igc_fc_none;
/* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = igc_force_mac_fc(hw);
if (ret_val) {
hw_dbg("Error forcing flow control settings\n");
goto out;
}
}
out:
return 0;
}
/**
* igc_get_auto_rd_done - Check for auto read completion
* @hw: pointer to the HW structure
*
* Check EEPROM for Auto Read done bit.
*/
s32 igc_get_auto_rd_done(struct igc_hw *hw)
{
s32 ret_val = 0;
s32 i = 0;
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
break;
usleep_range(1000, 2000);
i++;
}
if (i == AUTO_READ_DONE_TIMEOUT) {
hw_dbg("Auto read by HW from NVM has not completed.\n");
ret_val = -IGC_ERR_RESET;
goto out;
}
out:
return ret_val;
}
/**
* igc_get_speed_and_duplex_copper - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
* @duplex: stores the current duplex
*
* Read the status register for the current speed/duplex and store the current
* speed and duplex for copper connections.
*/
s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex)
{
u32 status;
status = rd32(IGC_STATUS);
if (status & IGC_STATUS_SPEED_1000) {
/* For I225, STATUS will indicate 1G speed in both 1 Gbps
* and 2.5 Gbps link modes. An additional bit is used
* to differentiate between 1 Gbps and 2.5 Gbps.
*/
if (hw->mac.type == igc_i225 &&
(status & IGC_STATUS_SPEED_2500)) {
*speed = SPEED_2500;
hw_dbg("2500 Mbs, ");
} else {
*speed = SPEED_1000;
hw_dbg("1000 Mbs, ");
}
} else if (status & IGC_STATUS_SPEED_100) {
*speed = SPEED_100;
hw_dbg("100 Mbs, ");
} else {
*speed = SPEED_10;
hw_dbg("10 Mbs, ");
}
if (status & IGC_STATUS_FD) {
*duplex = FULL_DUPLEX;
hw_dbg("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
hw_dbg("Half Duplex\n");
}
return 0;
}
/**
* igc_put_hw_semaphore - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*/
void igc_put_hw_semaphore(struct igc_hw *hw)
{
u32 swsm;
swsm = rd32(IGC_SWSM);
swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
wr32(IGC_SWSM, swsm);
}
/**
* igc_enable_mng_pass_thru - Enable processing of ARP's
* @hw: pointer to the HW structure
*
* Verifies the hardware needs to leave interface enabled so that frames can
* be directed to and from the management interface.
*/
bool igc_enable_mng_pass_thru(struct igc_hw *hw)
{
bool ret_val = false;
u32 fwsm, factps;
u32 manc;
if (!hw->mac.asf_firmware_present)
goto out;
manc = rd32(IGC_MANC);
if (!(manc & IGC_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {
fwsm = rd32(IGC_FWSM);
factps = rd32(IGC_FACTPS);
if (!(factps & IGC_FACTPS_MNGCG) &&
((fwsm & IGC_FWSM_MODE_MASK) ==
(igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
ret_val = true;
goto out;
}
} else {
if ((manc & IGC_MANC_SMBUS_EN) &&
!(manc & IGC_MANC_ASF_EN)) {
ret_val = true;
goto out;
}
}
out:
return ret_val;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_MAC_H_
#define _IGC_MAC_H_
#include "igc_hw.h"
#include "igc_phy.h"
#include "igc_defines.h"
#ifndef IGC_REMOVED
#define IGC_REMOVED(a) (0)
#endif /* IGC_REMOVED */
/* forward declaration */
s32 igc_disable_pcie_master(struct igc_hw *hw);
s32 igc_check_for_copper_link(struct igc_hw *hw);
s32 igc_config_fc_after_link_up(struct igc_hw *hw);
s32 igc_force_mac_fc(struct igc_hw *hw);
void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count);
s32 igc_setup_link(struct igc_hw *hw);
void igc_clear_hw_cntrs_base(struct igc_hw *hw);
s32 igc_get_auto_rd_done(struct igc_hw *hw);
void igc_put_hw_semaphore(struct igc_hw *hw);
void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index);
void igc_config_collision_dist(struct igc_hw *hw);
s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
u16 *duplex);
bool igc_enable_mng_pass_thru(struct igc_hw *hw);
enum igc_mng_mode {
igc_mng_mode_none = 0,
igc_mng_mode_asf,
igc_mng_mode_pt,
igc_mng_mode_ipmi,
igc_mng_mode_host_if_only
};
#endif
This source diff could not be displayed because it is too large. You can view the blob instead.
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc_mac.h"
#include "igc_nvm.h"
/**
* igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion
* @hw: pointer to the HW structure
* @ee_reg: EEPROM flag for polling
*
* Polls the EEPROM status bit for either read or write completion based
* upon the value of 'ee_reg'.
*/
static s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg)
{
s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000;
u32 i, reg = 0;
for (i = 0; i < attempts; i++) {
if (ee_reg == IGC_NVM_POLL_READ)
reg = rd32(IGC_EERD);
else
reg = rd32(IGC_EEWR);
if (reg & IGC_NVM_RW_REG_DONE) {
ret_val = 0;
break;
}
udelay(5);
}
return ret_val;
}
/**
* igc_acquire_nvm - Generic request for access to EEPROM
* @hw: pointer to the HW structure
*
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -IGC_ERR_NVM (-1).
*/
s32 igc_acquire_nvm(struct igc_hw *hw)
{
s32 timeout = IGC_NVM_GRANT_ATTEMPTS;
u32 eecd = rd32(IGC_EECD);
s32 ret_val = 0;
wr32(IGC_EECD, eecd | IGC_EECD_REQ);
eecd = rd32(IGC_EECD);
while (timeout) {
if (eecd & IGC_EECD_GNT)
break;
udelay(5);
eecd = rd32(IGC_EECD);
timeout--;
}
if (!timeout) {
eecd &= ~IGC_EECD_REQ;
wr32(IGC_EECD, eecd);
hw_dbg("Could not acquire NVM grant\n");
ret_val = -IGC_ERR_NVM;
}
return ret_val;
}
/**
* igc_release_nvm - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
*/
void igc_release_nvm(struct igc_hw *hw)
{
u32 eecd;
eecd = rd32(IGC_EECD);
eecd &= ~IGC_EECD_REQ;
wr32(IGC_EECD, eecd);
}
/**
* igc_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
*/
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data)
{
struct igc_nvm_info *nvm = &hw->nvm;
u32 i, eerd = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -IGC_ERR_NVM;
goto out;
}
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) +
IGC_NVM_RW_REG_START;
wr32(IGC_EERD, eerd);
ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ);
if (ret_val)
break;
data[i] = (rd32(IGC_EERD) >> IGC_NVM_RW_REG_DATA);
}
out:
return ret_val;
}
/**
* igc_read_mac_addr - Read device MAC address
* @hw: pointer to the HW structure
*/
s32 igc_read_mac_addr(struct igc_hw *hw)
{
u32 rar_high;
u32 rar_low;
u16 i;
rar_high = rd32(IGC_RAH(0));
rar_low = rd32(IGC_RAL(0));
for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
return 0;
}
/**
* igc_validate_nvm_checksum - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
*/
s32 igc_validate_nvm_checksum(struct igc_hw *hw)
{
u16 checksum = 0;
u16 i, nvm_data;
s32 ret_val = 0;
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
checksum += nvm_data;
}
if (checksum != (u16)NVM_SUM) {
hw_dbg("NVM Checksum Invalid\n");
ret_val = -IGC_ERR_NVM;
goto out;
}
out:
return ret_val;
}
/**
* igc_update_nvm_checksum - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
*/
s32 igc_update_nvm_checksum(struct igc_hw *hw)
{
u16 checksum = 0;
u16 i, nvm_data;
s32 ret_val;
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16)NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
out:
return ret_val;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
s32 igc_acquire_nvm(struct igc_hw *hw);
void igc_release_nvm(struct igc_hw *hw);
s32 igc_read_mac_addr(struct igc_hw *hw);
s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data);
s32 igc_validate_nvm_checksum(struct igc_hw *hw);
s32 igc_update_nvm_checksum(struct igc_hw *hw);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Intel Corporation */
#include "igc_phy.h"
/* forward declaration */
static s32 igc_phy_setup_autoneg(struct igc_hw *hw);
static s32 igc_wait_autoneg(struct igc_hw *hw);
/**
* igc_check_reset_block - Check if PHY reset is blocked
* @hw: pointer to the HW structure
*
* Read the PHY management control register and check whether a PHY reset
* is blocked. If a reset is not blocked return 0, otherwise
* return IGC_ERR_BLK_PHY_RESET (12).
*/
s32 igc_check_reset_block(struct igc_hw *hw)
{
u32 manc;
manc = rd32(IGC_MANC);
return (manc & IGC_MANC_BLK_PHY_RST_ON_IDE) ?
IGC_ERR_BLK_PHY_RESET : 0;
}
/**
* igc_get_phy_id - Retrieve the PHY ID and revision
* @hw: pointer to the HW structure
*
* Reads the PHY registers and stores the PHY ID and possibly the PHY
* revision in the hardware structure.
*/
s32 igc_get_phy_id(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
u16 phy_id;
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
phy->id = (u32)(phy_id << 16);
usleep_range(200, 500);
ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
if (ret_val)
goto out;
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
out:
return ret_val;
}
/**
* igc_phy_has_link - Polls PHY for link
* @hw: pointer to the HW structure
* @iterations: number of times to poll for link
* @usec_interval: delay between polling attempts
* @success: pointer to whether polling was successful or not
*
* Polls the PHY status register for link, 'iterations' number of times.
*/
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success)
{
u16 i, phy_status;
s32 ret_val = 0;
for (i = 0; i < iterations; i++) {
/* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val && usec_interval > 0) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
if (usec_interval >= 1000)
mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_LINK_STATUS)
break;
if (usec_interval >= 1000)
mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
*success = (i < iterations) ? true : false;
return ret_val;
}
/**
* igc_power_up_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, restore the link to previous settings.
*/
void igc_power_up_phy_copper(struct igc_hw *hw)
{
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
/**
* igc_power_down_phy_copper - Power down copper PHY
* @hw: pointer to the HW structure
*
* Power down PHY to save power when interface is down and wake on lan
* is not enabled.
*/
void igc_power_down_phy_copper(struct igc_hw *hw)
{
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
/* Temporary workaround - should be removed when PHY will implement
* IEEE registers as properly
*/
/* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
usleep_range(1000, 2000);
}
/**
* igc_check_downshift - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns 1
*
* A downshift is detected by querying the PHY link health.
*/
s32 igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u16 phy_data, offset, mask;
s32 ret_val;
switch (phy->type) {
case igc_phy_i225:
default:
/* speed downshift not supported */
phy->speed_downgraded = false;
ret_val = 0;
goto out;
}
ret_val = phy->ops.read_reg(hw, offset, &phy_data);
if (!ret_val)
phy->speed_downgraded = (phy_data & mask) ? true : false;
out:
return ret_val;
}
/**
* igc_phy_hw_reset - PHY hardware reset
* @hw: pointer to the HW structure
*
* Verify the reset block is not blocking us from resetting. Acquire
* semaphore (if necessary) and read/set/write the device control reset
* bit in the PHY. Wait the appropriate delay time for the device to
* reset and release the semaphore (if necessary).
*/
s32 igc_phy_hw_reset(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val;
u32 ctrl;
ret_val = igc_check_reset_block(hw);
if (ret_val) {
ret_val = 0;
goto out;
}
ret_val = phy->ops.acquire(hw);
if (ret_val)
goto out;
ctrl = rd32(IGC_CTRL);
wr32(IGC_CTRL, ctrl | IGC_CTRL_PHY_RST);
wrfl();
udelay(phy->reset_delay_us);
wr32(IGC_CTRL, ctrl);
wrfl();
usleep_range(1500, 2000);
phy->ops.release(hw);
out:
return ret_val;
}
/**
* igc_copper_link_autoneg - Setup/Enable autoneg for copper link
* @hw: pointer to the HW structure
*
* Performs initial bounds checking on autoneg advertisement parameter, then
* configure to advertise the full capability. Setup the PHY to autoneg
* and restart the negotiation process between the link partner. If
* autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
*/
static s32 igc_copper_link_autoneg(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u16 phy_ctrl;
s32 ret_val;
/* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
/* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
if (phy->autoneg_advertised == 0)
phy->autoneg_advertised = phy->autoneg_mask;
hw_dbg("Reconfiguring auto-neg advertisement params\n");
ret_val = igc_phy_setup_autoneg(hw);
if (ret_val) {
hw_dbg("Error Setting up Auto-Negotiation\n");
goto out;
}
hw_dbg("Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
goto out;
phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
if (ret_val)
goto out;
/* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->autoneg_wait_to_complete) {
ret_val = igc_wait_autoneg(hw);
if (ret_val) {
hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
hw->mac.get_link_status = true;
out:
return ret_val;
}
/**
* igc_wait_autoneg - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
* Waits for auto-negotiation to complete or for the auto-negotiation time
* limit to expire, which ever happens first.
*/
static s32 igc_wait_autoneg(struct igc_hw *hw)
{
u16 i, phy_status;
s32 ret_val = 0;
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_AUTONEG_COMPLETE)
break;
msleep(100);
}
/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
}
/**
* igc_phy_setup_autoneg - Configure PHY for auto-negotiation
* @hw: pointer to the HW structure
*
* Reads the MII auto-neg advertisement register and/or the 1000T control
* register and if the PHY is already setup for auto-negotiation, then
* return successful. Otherwise, setup advertisement and flow control to
* the appropriate values for the wanted auto-negotiation.
*/
static s32 igc_phy_setup_autoneg(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
u16 aneg_multigbt_an_ctrl = 0;
u16 mii_1000t_ctrl_reg = 0;
u16 mii_autoneg_adv_reg;
s32 ret_val;
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
&mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
hw->phy.id == I225_I_PHY_ID) {
/* Read the MULTI GBT AN Control Register - reg 7.32 */
ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
&aneg_multigbt_an_ctrl);
if (ret_val)
return ret_val;
}
/* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
NWAY_AR_100TX_HD_CAPS |
NWAY_AR_10T_FD_CAPS |
NWAY_AR_10T_HD_CAPS);
mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
hw_dbg("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
hw_dbg("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
hw_dbg("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
hw_dbg("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
hw_dbg("Advertise 1000mb Half duplex request denied!\n");
/* Do we want to advertise 1000 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
hw_dbg("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
/* We do not allow the Phy to advertise 2500 Mb Half Duplex */
if (phy->autoneg_advertised & ADVERTISE_2500_HALF)
hw_dbg("Advertise 2500mb Half duplex request denied!\n");
/* Do we want to advertise 2500 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_2500_FULL) {
hw_dbg("Advertise 2500mb Full duplex\n");
aneg_multigbt_an_ctrl |= CR_2500T_FD_CAPS;
} else {
aneg_multigbt_an_ctrl &= ~CR_2500T_FD_CAPS;
}
/* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
* Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
* negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* but we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: No software override. The flow control configuration
* in the EEPROM is used.
*/
switch (hw->fc.current_mode) {
case igc_fc_none:
/* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case igc_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
* capable of Rx Pause ONLY, we will advertise that we
* support both symmetric and asymmetric Rx PAUSE. Later
* (in igc_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case igc_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case igc_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
hw_dbg("Flow control param set incorrectly\n");
return -IGC_ERR_CONFIG;
}
ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL)
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
if ((phy->autoneg_mask & ADVERTISE_2500_FULL) &&
hw->phy.id == I225_I_PHY_ID)
ret_val = phy->ops.write_reg(hw,
(STANDARD_AN_REG_MASK <<
MMD_DEVADDR_SHIFT) |
ANEG_MULTIGBT_AN_CTRL,
aneg_multigbt_an_ctrl);
return ret_val;
}
/**
* igc_setup_copper_link - Configure copper link settings
* @hw: pointer to the HW structure
*
* Calls the appropriate function to configure the link for auto-neg or forced
* speed and duplex. Then we check for link, once link is established calls
* to configure collision distance and flow control are called. If link is
* not established, we return -IGC_ERR_PHY (-2).
*/
s32 igc_setup_copper_link(struct igc_hw *hw)
{
s32 ret_val = 0;
bool link;
if (hw->mac.autoneg) {
/* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = igc_copper_link_autoneg(hw);
if (ret_val)
goto out;
} else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
hw_dbg("Forcing Speed and Duplex\n");
ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
hw_dbg("Error Forcing Speed and Duplex\n");
goto out;
}
}
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
ret_val = igc_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
if (ret_val)
goto out;
if (link) {
hw_dbg("Valid link established!!!\n");
igc_config_collision_dist(hw);
ret_val = igc_config_fc_after_link_up(hw);
} else {
hw_dbg("Unable to establish link!!!\n");
}
out:
return ret_val;
}
/**
* igc_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
*
* Reads the MDI control register in the PHY at offset and stores the
* information read to data.
*/
static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
{
struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
s32 ret_val = 0;
if (offset > MAX_PHY_REG_ADDRESS) {
hw_dbg("PHY Address %d is out of range\n", offset);
ret_val = -IGC_ERR_PARAM;
goto out;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
mdic = ((offset << IGC_MDIC_REG_SHIFT) |
(phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_READ));
wr32(IGC_MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
usleep_range(500, 1000);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
}
if (!(mdic & IGC_MDIC_READY)) {
hw_dbg("MDI Read did not complete\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
if (mdic & IGC_MDIC_ERROR) {
hw_dbg("MDI Error\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
*data = (u16)mdic;
out:
return ret_val;
}
/**
* igc_write_phy_reg_mdic - Write MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write to register at offset
*
* Writes data to MDI control register in the PHY at offset.
*/
static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
{
struct igc_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
s32 ret_val = 0;
if (offset > MAX_PHY_REG_ADDRESS) {
hw_dbg("PHY Address %d is out of range\n", offset);
ret_val = -IGC_ERR_PARAM;
goto out;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to write the desired data.
*/
mdic = (((u32)data) |
(offset << IGC_MDIC_REG_SHIFT) |
(phy->addr << IGC_MDIC_PHY_SHIFT) |
(IGC_MDIC_OP_WRITE));
wr32(IGC_MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
usleep_range(500, 1000);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
}
if (!(mdic & IGC_MDIC_READY)) {
hw_dbg("MDI Write did not complete\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
if (mdic & IGC_MDIC_ERROR) {
hw_dbg("MDI Error\n");
ret_val = -IGC_ERR_PHY;
goto out;
}
out:
return ret_val;
}
/**
* __igc_access_xmdio_reg - Read/write XMDIO register
* @hw: pointer to the HW structure
* @address: XMDIO address to program
* @dev_addr: device address to program
* @data: pointer to value to read/write from/to the XMDIO address
* @read: boolean flag to indicate read or write
*/
static s32 __igc_access_xmdio_reg(struct igc_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
s32 ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, dev_addr);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, address);
if (ret_val)
return ret_val;
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, IGC_MMDAC_FUNC_DATA |
dev_addr);
if (ret_val)
return ret_val;
if (read)
ret_val = hw->phy.ops.read_reg(hw, IGC_MMDAAD, data);
else
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAAD, *data);
if (ret_val)
return ret_val;
/* Recalibrate the device back to 0 */
ret_val = hw->phy.ops.write_reg(hw, IGC_MMDAC, 0);
if (ret_val)
return ret_val;
return ret_val;
}
/**
* igc_read_xmdio_reg - Read XMDIO register
* @hw: pointer to the HW structure
* @addr: XMDIO address to program
* @dev_addr: device address to program
* @data: value to be read from the EMI address
*/
static s32 igc_read_xmdio_reg(struct igc_hw *hw, u16 addr,
u8 dev_addr, u16 *data)
{
return __igc_access_xmdio_reg(hw, addr, dev_addr, data, true);
}
/**
* igc_write_xmdio_reg - Write XMDIO register
* @hw: pointer to the HW structure
* @addr: XMDIO address to program
* @dev_addr: device address to program
* @data: value to be written to the XMDIO address
*/
static s32 igc_write_xmdio_reg(struct igc_hw *hw, u16 addr,
u8 dev_addr, u16 data)
{
return __igc_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
/**
* igc_write_phy_reg_gpy - Write GPY PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
*/
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
{
u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
s32 ret_val;
offset = offset & GPY_REG_MASK;
if (!dev_addr) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
if (ret_val)
return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
data);
}
return ret_val;
}
/**
* igc_read_phy_reg_gpy - Read GPY PHY register
* @hw: pointer to the HW structure
* @offset: lower half is register offset to read to
* upper half is MMD to use.
* @data: data to read at register offset
*
* Acquires semaphore, if necessary, then reads the data in the PHY register
* at the offset. Release any acquired semaphores before exiting.
*/
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
{
u8 dev_addr = (offset & GPY_MMD_MASK) >> GPY_MMD_SHIFT;
s32 ret_val;
offset = offset & GPY_REG_MASK;
if (!dev_addr) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
if (ret_val)
return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
data);
}
return ret_val;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_PHY_H_
#define _IGC_PHY_H_
#include "igc_mac.h"
s32 igc_check_reset_block(struct igc_hw *hw);
s32 igc_phy_hw_reset(struct igc_hw *hw);
s32 igc_get_phy_id(struct igc_hw *hw);
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
s32 igc_check_downshift(struct igc_hw *hw);
s32 igc_setup_copper_link(struct igc_hw *hw);
void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
#ifndef _IGC_REGS_H_
#define _IGC_REGS_H_
/* General Register Descriptions */
#define IGC_CTRL 0x00000 /* Device Control - RW */
#define IGC_STATUS 0x00008 /* Device Status - RO */
#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
/* NVM Register Descriptions */
#define IGC_EERD 0x12014 /* EEprom mode read - RW */
#define IGC_EEWR 0x12018 /* EEprom mode write - RW */
/* Flow Control Register Descriptions */
#define IGC_FCAL 0x00028 /* FC Address Low - RW */
#define IGC_FCAH 0x0002C /* FC Address High - RW */
#define IGC_FCT 0x00030 /* FC Type - RW */
#define IGC_FCTTV 0x00170 /* FC Transmit Timer - RW */
#define IGC_FCRTL 0x02160 /* FC Receive Threshold Low - RW */
#define IGC_FCRTH 0x02168 /* FC Receive Threshold High - RW */
#define IGC_FCRTV 0x02460 /* FC Refresh Timer Value - RW */
#define IGC_FCSTS 0x02464 /* FC Status - RO */
/* PCIe Register Description */
#define IGC_GCR 0x05B00 /* PCIe control- RW */
/* Semaphore registers */
#define IGC_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
#define IGC_SWSM 0x05B50 /* SW Semaphore */
#define IGC_FWSM 0x05B54 /* FW Semaphore */
/* Function Active and Power State to MNG */
#define IGC_FACTPS 0x05B30
/* Interrupt Register Description */
#define IGC_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
#define IGC_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
#define IGC_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
#define IGC_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
#define IGC_EIAM 0x01530 /* Ext. Interrupt Auto Mask - RW */
#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */
#define IGC_ICS 0x01504 /* Intr Cause Set - WO */
#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */
#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */
#define IGC_IAM 0x01510 /* Intr Ack Auto Mask- RW */
/* Intr Throttle - RW */
#define IGC_EITR(_n) (0x01680 + (0x4 * (_n)))
/* Interrupt Vector Allocation - RW */
#define IGC_IVAR0 0x01700
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
/* Interrupt Cause */
#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */
#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */
#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */
#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */
#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */
#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */
#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */
#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */
#define IGC_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
#define IGC_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
#define IGC_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
#define IGC_HGPTC 0x04118 /* Host Good Packets TX Count */
#define IGC_HTCBDPC 0x04124 /* Host TX Circ.Breaker Drop Count */
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
#define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40))
#define IGC_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define IGC_RDBAL(_n) (0x0C000 + ((_n) * 0x40))
#define IGC_RDBAH(_n) (0x0C004 + ((_n) * 0x40))
#define IGC_RDLEN(_n) (0x0C008 + ((_n) * 0x40))
#define IGC_RDH(_n) (0x0C010 + ((_n) * 0x40))
#define IGC_RDT(_n) (0x0C018 + ((_n) * 0x40))
#define IGC_RXDCTL(_n) (0x0C028 + ((_n) * 0x40))
#define IGC_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
#define IGC_RXCSUM 0x05000 /* Rx Checksum Control - RW */
#define IGC_RLPML 0x05004 /* Rx Long Packet Max Length */
#define IGC_RFCTL 0x05008 /* Receive Filter Control*/
#define IGC_MTA 0x05200 /* Multicast Table Array - RW Array */
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
#define IGC_TIPG 0x00410 /* Tx Inter-packet gap - RW */
#define IGC_TDBAL(_n) (0x0E000 + ((_n) * 0x40))
#define IGC_TDBAH(_n) (0x0E004 + ((_n) * 0x40))
#define IGC_TDLEN(_n) (0x0E008 + ((_n) * 0x40))
#define IGC_TDH(_n) (0x0E010 + ((_n) * 0x40))
#define IGC_TDT(_n) (0x0E018 + ((_n) * 0x40))
#define IGC_TXDCTL(_n) (0x0E028 + ((_n) * 0x40))
/* MMD Register Descriptions */
#define IGC_MMDAC 13 /* MMD Access Control */
#define IGC_MMDAAD 14 /* MMD Access Address/Data */
/* Good transmitted packets counter registers */
#define IGC_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
/* Statistics Register Descriptions */
#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
#define IGC_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
#define IGC_RXERRC 0x0400C /* Receive Error Count - R/clr */
#define IGC_MPC 0x04010 /* Missed Packet Count - R/clr */
#define IGC_SCC 0x04014 /* Single Collision Count - R/clr */
#define IGC_ECOL 0x04018 /* Excessive Collision Count - R/clr */
#define IGC_MCC 0x0401C /* Multiple Collision Count - R/clr */
#define IGC_LATECOL 0x04020 /* Late Collision Count - R/clr */
#define IGC_COLC 0x04028 /* Collision Count - R/clr */
#define IGC_DC 0x04030 /* Defer Count - R/clr */
#define IGC_TNCRS 0x04034 /* Tx-No CRS - R/clr */
#define IGC_SEC 0x04038 /* Sequence Error Count - R/clr */
#define IGC_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
#define IGC_RLEC 0x04040 /* Receive Length Error Count - R/clr */
#define IGC_XONRXC 0x04048 /* XON Rx Count - R/clr */
#define IGC_XONTXC 0x0404C /* XON Tx Count - R/clr */
#define IGC_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
#define IGC_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
#define IGC_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
#define IGC_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
#define IGC_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
#define IGC_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
#define IGC_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
#define IGC_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
#define IGC_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
#define IGC_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
#define IGC_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
#define IGC_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
#define IGC_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
#define IGC_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
#define IGC_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
#define IGC_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
#define IGC_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
#define IGC_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
#define IGC_RUC 0x040A4 /* Rx Undersize Count - R/clr */
#define IGC_RFC 0x040A8 /* Rx Fragment Count - R/clr */
#define IGC_ROC 0x040AC /* Rx Oversize Count - R/clr */
#define IGC_RJC 0x040B0 /* Rx Jabber Count - R/clr */
#define IGC_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
#define IGC_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
#define IGC_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
#define IGC_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
#define IGC_TORH 0x040C4 /* Total Octets Rx High - R/clr */
#define IGC_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
#define IGC_TOTH 0x040CC /* Total Octets Tx High - R/clr */
#define IGC_TPR 0x040D0 /* Total Packets Rx - R/clr */
#define IGC_TPT 0x040D4 /* Total Packets Tx - R/clr */
#define IGC_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
#define IGC_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
#define IGC_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
#define IGC_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
#define IGC_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
#define IGC_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
#define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
#define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
#define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
#define IGC_IAC 0x04100 /* Interrupt Assertion Count */
#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
#define IGC_RPTHC 0x04104 /* Rx Packets To Host */
#define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */
#define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */
#define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */
#define IGC_HGORCH 0x0412C /* Host Good Octets Received Count High */
#define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
#define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
#define IGC_LENERRS 0x04138 /* Length Errors Count */
#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
#define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
/* Management registers */
#define IGC_MANC 0x05820 /* Management Control - RW */
/* Shadow Ram Write Register - RW */
#define IGC_SRWR 0x12018
/* forward declaration */
struct igc_hw;
u32 igc_rd32(struct igc_hw *hw, u32 reg);
/* write operations, indexed using DWORDS */
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
if (!IGC_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
#define wrfl() ((void)rd32(IGC_STATUS))
#define array_wr32(reg, offset, value) \
wr32((reg) + ((offset) << 2), (value))
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment